repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
google-research/google-research
seq2act/data_generation/create_android_synthetic_dataset.py
1
22915
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Creates screen dataset with tfExample proto in TFRecord format. For all the valid xml or json files in the input directory, it parses their view hierarchy attributes, extracts the feature data into tf.train.Example proto and saves the results with TFRecord format in the output directory as multiple sharded files. A file containing the dimension data for padding purpose is also created. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import operator import os import threading import concurrent.futures import numpy as np import tensorflow.compat.v1 as tf # tf from seq2act.data_generation import common from seq2act.data_generation import config from seq2act.data_generation import proto_utils from seq2act.data_generation import synthetic_action_generator from seq2act.data_generation import view_hierarchy gfile = tf.gfile flags = tf.flags FLAGS = flags.FLAGS _INPUT_DIR = '/tmp/input' _OUTPUT_DIR = '/tmp/' _FILTER_FILE = '/tmp/' _NUM_THREADS_DEFAULT = 10 _PADDING_DIMENSIONS_FILE_NAME = 'padding_dimensions.txt' _TOKEN_TYPE = 'subtoken' _NUM_SHARDS_DEFAULT = config.SHARD_NUM _MAX_WORD_NUM_UPPER_DEFAULT = config.MAX_WORD_NUM_UPPER_BOUND _MAX_WORD_LENGTH_UPPER_DEFAULT = config.MAX_WORD_LENGTH_UPPER_BOUND _DATASET_TYPE_DEFAULT = 'rico' _STATS_DIMENSIONS = False _MAX_WORD_NUM = 30 _MAX_WORD_LENGTH = 23 _FREQ_OBJ_TYPE = [ view_hierarchy.UIObjectType.UNKNOWN, view_hierarchy.UIObjectType.BUTTON, view_hierarchy.UIObjectType.IMAGEVIEW, view_hierarchy.UIObjectType.TEXTVIEW, ] _INFREQUENT_OBJ_TYPE_MAX_RATIO = 0.9 _FILTER_ACTIONS_BY_NAME = True _FILTER_ACTION_BY_TYPE = True flags.DEFINE_string( 'input_dir', _INPUT_DIR, 'Full path to the directory containing the data files for a set of tasks.') flags.DEFINE_string( 'output_dir', _OUTPUT_DIR, 'Full path to the directory for saving the tf record file.') flags.DEFINE_string( 'filter_file', _FILTER_FILE, 'Full path to the directory for saving filter file or RICO.') flags.DEFINE_integer( 'num_threads', _NUM_THREADS_DEFAULT, 'The number of threads to process the data files concurrently.') flags.DEFINE_integer( 'num_shards', _NUM_SHARDS_DEFAULT, 'The number of sharded files to save the created dataset.') flags.DEFINE_integer('max_word_num_upper', _MAX_WORD_NUM_UPPER_DEFAULT, 'The max number of words for building model features.') flags.DEFINE_integer('max_word_length_upper', _MAX_WORD_LENGTH_UPPER_DEFAULT, 'The max length of words for building model features.') flags.DEFINE_enum('dataset', _DATASET_TYPE_DEFAULT, ['android_settings', 'rico'], 'The type of supported dataset.') flags.DEFINE_enum('file_to_generate', 'tf_example', ['tf_example', 'corpus'], 'Whether generate feature tfexample or corpus txt.') debug_info_lock = threading.Lock() longest_stats = collections.Counter() distributions = collections.defaultdict(collections.Counter) def _stat_distribution(name, value_list): with debug_info_lock: # longest_stats[name] = max(longest_stats[name], num) for value in value_list: distributions[name][value] += 1 sums = collections.defaultdict(int) def _stat_sum(name, num): with debug_info_lock: sums[name] += num def _get_data_dimensions(input_dir): """Processes the dimension data. The dimension data includes maximum word numbers and maximum word lengths from all the ui objects across all the .xml/.json files in input_dir. It will be used for padding purpose. The results are written in <output_dir>/padding_dimensions.txt. Args: input_dir: The directory that contains the input xml/json files. Returns: A tuple (max_word_num_all_files, max_word_length_all_files) max_word_num_all_files: The max number of words from all the ui objects across all the .xml/.json files. max_word_length_all_files: The max length of words from all the ui objects across all the .xml/.json files. """ max_word_num_all_files = 0 max_word_length_all_files = 0 # We can use ThreadPool since these are IO-bound operations. with concurrent.futures.ThreadPoolExecutor(FLAGS.num_threads) as executor: futures = [] for file_path in gfile.Glob(os.path.join(input_dir, '*.xml')) + gfile.Glob( os.path.join(input_dir, '*.json')): futures.append(executor.submit(common.get_word_statistics, file_path)) for future in concurrent.futures.as_completed(futures): _, max_word_num_one_file, max_word_length_one_file = future.result() max_word_num_all_files = max(max_word_num_all_files, max_word_num_one_file) max_word_length_all_files = max(max_word_length_all_files, max_word_length_one_file) tf.logging.info('max_word_num_all_files=%d, max_word_length_all_files=%d', max_word_num_all_files, max_word_length_all_files) return max_word_num_all_files, max_word_length_all_files def _process_dimensions(input_dir, output_dir): """Processes the dimension data. The dimension data includes maximum word numbers and maximum word lengths from all the ui objects across all the .xml/.json files in input_dir. It will be used for padding purpose. The results are written in <output_dir>/padding_dimensions.txt. Args: input_dir: The directory that contains the input xml/json files. output_dir: The directory that saves output dimension data. Returns: A tuple (max_word_num, max_word_length) max_word_num: The max number of words for building model features. max_word_length: The max length of words for building model features. """ tf.logging.info('Processing data dimensions...') max_word_num, max_word_length = _get_data_dimensions(input_dir) # Apply pre-configured upper bound to clip possibly rare outlier values. max_word_num = min(max_word_num, FLAGS.max_word_num_upper) max_word_length = min(max_word_length, FLAGS.max_word_length_upper) with gfile.GFile( os.path.join(output_dir, _PADDING_DIMENSIONS_FILE_NAME), 'w+') as f: f.write('max_word_num: %d\nmax_word_length: %d\n' % (max_word_num, max_word_length)) return max_word_num, max_word_length def _filter_synthetic_by_name_overlap(action_list, screen_feature, first_k_tokens=5, overlap_threshold=0.5): """Filter synthetic action by object name overlap. For each action, if any non-target object's name contains more than first_k_tokens*overlap_threshold same tokens from target object name, this action will be treated as invalid and will be filtered out. For example: If: target object = ['data_', 'usage_'] non_target_object = ['no_', 'data_', 'limitation_', 'usage_', 'set_'] first_k_tokens = 3 overlap_threshold = 0.5 non_target_object[0:first_k_tokens] = ['no_', 'data_', 'limitation_'] Overlapped tokens are ['data_'], covered ratio is len(['data_']) / len(['data_', 'usage_']) = 0.5 >= overlap_threshold So this action is invalid and will be removed. Args: action_list: list of actions screen_feature: screen feature dictionary first_k_tokens: number of heading tokens selected to calculate token overlap overlap_threshold: Threshold of object name overlap ratio. Returns: valid action list """ obj_tokens_id = screen_feature['ui_obj_word_id_seq'] target_obj_idx = [action.target_obj_idx for action in action_list] target_objs_first_k_token = [ screen_feature['ui_obj_word_id_seq'][idx][0:first_k_tokens] for idx in target_obj_idx ] filter_flag = [] for obj_first_k_token in target_objs_first_k_token: trim_obj_first_k_token = np.trim_zeros(obj_first_k_token) target_token_appear_times = np.sum( np.array([ np.isin(trim_obj_first_k_token, one_obj_tokens) for one_obj_tokens in obj_tokens_id ]), axis=1) filter_flag.append( np.sum(target_token_appear_times > overlap_threshold * trim_obj_first_k_token.shape[0]) <= 1) valid_actions = [ action for action, valid in zip(action_list, filter_flag) if valid ] invalid_actions = [ action for action, valid in zip(action_list, filter_flag) if not valid ] # Enable when debugging to see invalid synthetic # _stat_distribution('obj_name_invalid_synthetic', # [action.instruction_str for action in invalid_actions]) _stat_sum('obj_name_invalid_synthetic', len(invalid_actions)) return valid_actions def _filter_synthetic_by_obj_type(ui_object_list, action_list, max_num_syn_per_screen=20): """Filters synthetic data by object type. For all synthetic actions, split them into frequent/non-frequent actions. First select non-frequent actions, non-frequent actions number should be no more than max_num_syn_per_screen*_INFREQUENT_OBJ_TYPE_MAX_RATIO. Then select frequent actions, total selected actions number should be no more than max_num_syn_per_screen. Args: ui_object_list: list of ui objects action_list: list of actions max_num_syn_per_screen: max number of synthetic sentence for each screen Returns: valid action list """ max_infreq_num = int(max_num_syn_per_screen*_INFREQUENT_OBJ_TYPE_MAX_RATIO) freq_obj_actions = [] infreq_obj_actions = [] for action in action_list: if (ui_object_list[action.target_obj_idx].obj_type not in _FREQ_OBJ_TYPE or action.action_type == common.ActionTypes.INPUT): infreq_obj_actions.append(action) else: freq_obj_actions.append(action) if len(infreq_obj_actions) > max_infreq_num: valid_actions = np.random.choice( infreq_obj_actions, size=max_infreq_num, replace=False).tolist() else: valid_actions = infreq_obj_actions left_space = max_num_syn_per_screen - len(valid_actions) if len(freq_obj_actions) < left_space: valid_actions.extend(freq_obj_actions) else: valid_actions.extend( np.random.choice(freq_obj_actions, size=left_space, replace=False).tolist()) return valid_actions def _get_full_feature_dict(dataset_type, file_path, max_word_num, max_word_length): """Gets full padded feature dictionary from xml/json file_path. Args: dataset_type: The supported dataset type. file_path: The full path of xml/json file. max_word_num: The max number of words in each ui object. max_word_length: The max length of words in each ui object. Returns: padded_feature_dict: A dictionary that contains ui_object features, view hierarchy leaf node adjacency matrix features and synthetic action features. Each value of the feature dicionary is padded. The padding shape is as follow: feature_dict = { 'instruction_str': synthetic action strings, np array of string, shape = (phrase_count,) 'instruction_word_id_seq': encoded action words, np int array, shape = (phrase_count, max_word_num) 'instruction_rule_id': representing action rule(single_object_rule/ grid_context_rule/neighbor_context_rule), np int array, shape = (phrase_count,) 'ui_obj_str_seq': string of ui object name/content_description/resourceid, np string array, shape = (ui_object_num) 'ui_obj_word_id_seq': encoded word sequence, np int array, shape = (ui_object_num, max_word_num) 'ui_obj_type_id_seq': object type ids, np int array, shape = (ui_object_num,) 'ui_obj_clickable_seq': clickable sequence, np int array, shape = (ui_object_num,) 'ui_obj_cord_x_seq': x coordinate sequence, np float array, shape = (ui_object_num*2,) 'ui_obj_cord_y_seq': y coordinate sequence, np float array, shape = (ui_object_num*2,) 'ui_obj_v_distance': vertical relation matrix, np float array, shape = (ui_object_num, ui_object_num) 'ui_obj_h_distance': horizontal relation matrix, np float array, shape = (ui_object_num, ui_object_num) 'ui_obj_dom_distance': dom relation matrix, np int array, shape = (ui_object_num, ui_object_num) 'ui_obj_dom_location_seq': index of pre-order/in-order/post-order in view hierarchy tree. np int array, shape = (ui_object_num*3,) 'verb_id_seq': representing action verb id(click/input/swipe), np int array, shape = (phrase_count,) 'verb_str_position_seq': index of verb string, np int array, shape = (phrase_count*2,) 'ui_target_id_seq': index of ui object target, np int array, shape = (phrase_count,) 'input_str_position_seq': input words' start end position in instruction, np int array, shape = (phrase_count*2,) 'obj_desc_position_seq': target object words' start end position, np int array, shape = (phrase_count*2,) } """ view_hierarchy_leaf_nodes = common.get_view_hierarchy_list(file_path) ui_obj_list = [ele.uiobject for ele in view_hierarchy_leaf_nodes] ui_object_num = len(view_hierarchy_leaf_nodes) padded_obj_feature_dict = proto_utils.get_ui_objects_feature_dict( view_hierarchy_leaf_nodes, padding_shape=(ui_object_num, max_word_num, max_word_length), lower_case=True) actions = synthetic_action_generator.generate_all_actions( view_hierarchy_leaf_nodes, action_rules=('single', 'screen_loc', 'neighbor_loc', 'swipe')) if actions and _FILTER_ACTIONS_BY_NAME: actions = _filter_synthetic_by_name_overlap( actions, padded_obj_feature_dict, config.MAX_OBJ_NAME_WORD_NUM, overlap_threshold=0.5) if actions and _FILTER_ACTION_BY_TYPE: actions = _filter_synthetic_by_obj_type( ui_obj_list, actions, max_num_syn_per_screen=20) padded_syn_feature_dict = synthetic_action_generator.get_synthetic_feature_dict( actions, max_word_num, max_word_length) full_feature = {} full_feature.update(padded_obj_feature_dict) full_feature.update(padded_syn_feature_dict) # Normalize ui object's coordinates to be within [0,1]. if dataset_type == 'android_settings': screen_width, screen_height = (config.SCREEN_WIDTH, config.SCREEN_HEIGHT) else: screen_width, screen_height = (config.RICO_SCREEN_WIDTH, config.RICO_SCREEN_HEIGHT) full_feature['ui_obj_cord_x_seq'] = full_feature['ui_obj_cord_x_seq'] / float( screen_width) full_feature['ui_obj_cord_y_seq'] = full_feature['ui_obj_cord_y_seq'] / float( screen_height) return full_feature def _assert_feature_value(feature): """Asserts feature value doesn't have -1, except for anchor features.""" anchor_features = [ 'instruction_word_id_seq', 'ui_obj_type_id_seq', 'verb_id_seq' ] for key in feature: if key in anchor_features: continue if -1 in feature[key]: tf.logging.info('[FATAL]: Feature %d contains -1', key) return False return True def _assert_feature_shape(feature, expected_shape): """Asserts feature shape is legal, same as expected_shape.""" assert set(feature.keys()) == set(expected_shape.keys( )), '[FATAL] feature keys %s different from expected %s' % ( sorted(feature.keys()), sorted(expected_shape.keys())) for key in feature: if feature[key].shape != expected_shape[key]: tf.logging.info('[FATAL] feature %s shape is different from expected', key) return False return True def _produce_corpus(corpus_writer, writer_lock, file_path): """Writes all UI names.""" view_hierarchy_leaf_nodes = common.get_view_hierarchy_list(file_path) ui_obj_list = [ele.uiobject for ele in view_hierarchy_leaf_nodes] ui_names = [ui.obj_name.lower().strip() for ui in ui_obj_list if ui.obj_name] corpus = '\n'.join(ui_names) with writer_lock: corpus_writer.write(corpus + '\n') def _process_features(tf_record_writer, writer_lock, dataset_type, file_path, max_word_num, max_word_length): """Processes features from one xml/json file. Args: tf_record_writer: TFRecordWriter object writer_lock: The lock to protect the writer's write from multi threads. dataset_type: The supported dataset type. file_path: The full path of xml/json file. max_word_num: The max number of words in each ui object. max_word_length: The max length of words in each ui object. synthetic input actions. """ feature_dict = _get_full_feature_dict( dataset_type, file_path, max_word_num, max_word_length, ) phrase_count = feature_dict['instruction_str'].shape[0] ui_object_num = feature_dict['ui_obj_str_seq'].shape[0] expected_feature_shape = { 'instruction_str': (phrase_count,), 'instruction_word_id_seq': (phrase_count, max_word_num), 'instruction_rule_id': (phrase_count,), 'ui_obj_str_seq': (ui_object_num,), 'ui_obj_word_id_seq': (ui_object_num, max_word_num), 'ui_obj_type_id_seq': (ui_object_num,), 'ui_obj_clickable_seq': (ui_object_num,), 'ui_obj_cord_x_seq': (ui_object_num*2,), 'ui_obj_cord_y_seq': (ui_object_num*2,), 'ui_obj_v_distance': (ui_object_num, ui_object_num), 'ui_obj_h_distance': (ui_object_num, ui_object_num), 'ui_obj_dom_distance': (ui_object_num, ui_object_num), 'ui_obj_dom_location_seq': (ui_object_num*3,), 'verb_id_seq': (phrase_count,), 'ui_target_id_seq': (phrase_count,), 'verb_str_position_seq': (phrase_count*2,), 'input_str_position_seq': (phrase_count*2,), 'obj_desc_position_seq': (phrase_count*2,), } _stat_distribution('ui_obj_type_id_seq', feature_dict['ui_obj_type_id_seq']) _stat_distribution('verb_id_seq', feature_dict['verb_id_seq']) _stat_distribution('instruction_rule_id', feature_dict['instruction_rule_id']) target_objs = feature_dict['ui_target_id_seq'] _stat_distribution('target_obj_type', feature_dict['ui_obj_type_id_seq'][target_objs]) # When feature_dict['verb_id_seq'] is not always padded value, generate # tfexample if (_assert_feature_shape(feature_dict, expected_feature_shape) and _assert_feature_value(feature_dict) and not np.array(feature_dict['verb_id_seq'] == config.LABEL_DEFAULT_INVALID_INT).all()): tf_proto = proto_utils.features_to_tf_example(feature_dict) with writer_lock: tf_record_writer.write(tf_proto.SerializeToString()) def _write_dataset(dataset_type, input_dir, output_dir, max_word_num, max_word_length): """Processes features from xml/json files and writes results to dataset files. Args: dataset_type: The supported dataset type. input_dir: The directory that contains the input xml/json files. output_dir: The directory that saves output dimension data. max_word_num: The max number of words in each ui object. max_word_length: The max length of words in each ui object. synthetic input actions. """ tf.logging.info('Processing data features...') tf_record_writers = [] writer_locks = [] for shard in range(FLAGS.num_shards): tf_record_writers.append( tf.python_io.TFRecordWriter( os.path.join( output_dir, 'one_shot_%s_%s_%d.tfrecord' % (dataset_type, _TOKEN_TYPE, shard)))) writer_locks.append(threading.Lock()) num_processed_files = 0 # We can use ThreadPool since these are IO-bound operations. with concurrent.futures.ThreadPoolExecutor(FLAGS.num_threads) as executor: futures = [] all_file_path = gfile.Glob(os.path.join(input_dir, '*.xml')) + gfile.Glob( os.path.join(input_dir, '*.json')) all_file_path = filter_file_by_name(all_file_path) assert len(all_file_path) == 24598 for file_path in sorted(all_file_path): shard = num_processed_files % FLAGS.num_shards futures.append( executor.submit(_process_features, tf_record_writers[shard], writer_locks[shard], dataset_type, file_path, max_word_num, max_word_length)) num_processed_files += 1 concurrent.futures.wait(futures) for shard in range(FLAGS.num_shards): tf_record_writers[shard].close() def filter_file_by_name(file_path): """Filters input file by name.""" filter_filepath = FLAGS.filter_file valid_data_set = set() with gfile.Open(filter_filepath, 'r') as f: for line in f.read().split('\n'): valid_data_set.add(line) return [fp for fp in file_path if fp.split('/')[-1] in valid_data_set] def create_dataset(dataset_type, input_dir, output_dir): """Converts xml/json files in input_dir to tfExample files in output_dir. tfExample file contains multiple tf.Example proto and the feature dictionary of tf.Example proto defined by _get_full_feature_dict() Args: dataset_type: The supported dataset type. input_dir: The directory that contains input xml/json file. output_dir: The directory that saves output files. """ if _STATS_DIMENSIONS: max_word_num, max_word_length = _process_dimensions(input_dir, output_dir) else: max_word_num, max_word_length = _MAX_WORD_NUM, _MAX_WORD_LENGTH _write_dataset(dataset_type, input_dir, output_dir, max_word_num, max_word_length) def main(_): create_dataset(FLAGS.dataset, FLAGS.input_dir, FLAGS.output_dir) tf.logging.info('\n\n%s\n\n', longest_stats) if FLAGS.file_to_generate == 'tf_example': with open(os.path.join(FLAGS.output_dir, 'stats.txt'), 'w+') as writer: for key, distribution in distributions.items(): writer.write( '%s: %s\n' % (key, sorted(distribution.items(), key=operator.itemgetter(0)))) for key, distribution in sums.items(): writer.write('%s: %s\n' % (key, sorted(sums.items(), key=operator.itemgetter(0)))) if __name__ == '__main__': FLAGS.set_default('logtostderr', True) tf.app.run(main)
apache-2.0
sumedhasingla/VTK
ThirdParty/Twisted/twisted/python/win32.py
36
5436
# -*- test-case-name: twisted.python.test.test_win32 -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Win32 utilities. See also twisted.python.shortcut. @var O_BINARY: the 'binary' mode flag on Windows, or 0 on other platforms, so it may safely be OR'ed into a mask for os.open. """ from __future__ import division, absolute_import import re import os try: import win32api import win32con except ImportError: pass from twisted.python.runtime import platform # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/debug/base/system_error_codes.asp ERROR_FILE_NOT_FOUND = 2 ERROR_PATH_NOT_FOUND = 3 ERROR_INVALID_NAME = 123 ERROR_DIRECTORY = 267 O_BINARY = getattr(os, "O_BINARY", 0) class FakeWindowsError(OSError): """ Stand-in for sometimes-builtin exception on platforms for which it is missing. """ try: WindowsError = WindowsError except NameError: WindowsError = FakeWindowsError # XXX fix this to use python's builtin _winreg? def getProgramsMenuPath(): """ Get the path to the Programs menu. Probably will break on non-US Windows. @return: the filesystem location of the common Start Menu->Programs. @rtype: L{str} """ if not platform.isWindows(): return "C:\\Windows\\Start Menu\\Programs" keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders' hShellFolders = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ) return win32api.RegQueryValueEx(hShellFolders, 'Common Programs')[0] def getProgramFilesPath(): """Get the path to the Program Files folder.""" keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion' currentV = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ) return win32api.RegQueryValueEx(currentV, 'ProgramFilesDir')[0] _cmdLineQuoteRe = re.compile(r'(\\*)"') _cmdLineQuoteRe2 = re.compile(r'(\\+)\Z') def cmdLineQuote(s): """ Internal method for quoting a single command-line argument. @param s: an unquoted string that you want to quote so that something that does cmd.exe-style unquoting will interpret it as a single argument, even if it contains spaces. @type s: C{str} @return: a quoted string. @rtype: C{str} """ quote = ((" " in s) or ("\t" in s) or ('"' in s) or s == '') and '"' or '' return quote + _cmdLineQuoteRe2.sub(r"\1\1", _cmdLineQuoteRe.sub(r'\1\1\\"', s)) + quote def quoteArguments(arguments): """ Quote an iterable of command-line arguments for passing to CreateProcess or a similar API. This allows the list passed to C{reactor.spawnProcess} to match the child process's C{sys.argv} properly. @param arglist: an iterable of C{str}, each unquoted. @return: a single string, with the given sequence quoted as necessary. """ return ' '.join([cmdLineQuote(a) for a in arguments]) class _ErrorFormatter(object): """ Formatter for Windows error messages. @ivar winError: A callable which takes one integer error number argument and returns an L{exceptions.WindowsError} instance for that error (like L{ctypes.WinError}). @ivar formatMessage: A callable which takes one integer error number argument and returns a C{str} giving the message for that error (like L{win32api.FormatMessage}). @ivar errorTab: A mapping from integer error numbers to C{str} messages which correspond to those erorrs (like L{socket.errorTab}). """ def __init__(self, WinError, FormatMessage, errorTab): self.winError = WinError self.formatMessage = FormatMessage self.errorTab = errorTab def fromEnvironment(cls): """ Get as many of the platform-specific error translation objects as possible and return an instance of C{cls} created with them. """ try: from ctypes import WinError except ImportError: WinError = None try: from win32api import FormatMessage except ImportError: FormatMessage = None try: from socket import errorTab except ImportError: errorTab = None return cls(WinError, FormatMessage, errorTab) fromEnvironment = classmethod(fromEnvironment) def formatError(self, errorcode): """ Returns the string associated with a Windows error message, such as the ones found in socket.error. Attempts direct lookup against the win32 API via ctypes and then pywin32 if available), then in the error table in the socket module, then finally defaulting to C{os.strerror}. @param errorcode: the Windows error code @type errorcode: C{int} @return: The error message string @rtype: C{str} """ if self.winError is not None: return self.winError(errorcode).strerror if self.formatMessage is not None: return self.formatMessage(errorcode) if self.errorTab is not None: result = self.errorTab.get(errorcode) if result is not None: return result return os.strerror(errorcode) formatError = _ErrorFormatter.fromEnvironment().formatError
bsd-3-clause
mlperf/training_results_v0.5
v0.5.0/google/cloud_v2.8/resnet-tpuv2-8/code/resnet/model/models/official/utils/misc/model_helpers_test.py
4
4502
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Tests for Model Helper functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf # pylint: disable=g-bad-import-order from official.utils.misc import model_helpers class PastStopThresholdTest(tf.test.TestCase): """Tests for past_stop_threshold.""" def test_past_stop_threshold(self): """Tests for normal operating conditions.""" self.assertTrue(model_helpers.past_stop_threshold(0.54, 1)) self.assertTrue(model_helpers.past_stop_threshold(54, 100)) self.assertFalse(model_helpers.past_stop_threshold(0.54, 0.1)) self.assertFalse(model_helpers.past_stop_threshold(-0.54, -1.5)) self.assertTrue(model_helpers.past_stop_threshold(-0.54, 0)) self.assertTrue(model_helpers.past_stop_threshold(0, 0)) self.assertTrue(model_helpers.past_stop_threshold(0.54, 0.54)) def test_past_stop_threshold_none_false(self): """Tests that check None returns false.""" self.assertFalse(model_helpers.past_stop_threshold(None, -1.5)) self.assertFalse(model_helpers.past_stop_threshold(None, None)) self.assertFalse(model_helpers.past_stop_threshold(None, 1.5)) # Zero should be okay, though. self.assertTrue(model_helpers.past_stop_threshold(0, 1.5)) def test_past_stop_threshold_not_number(self): """Tests for error conditions.""" with self.assertRaises(ValueError): model_helpers.past_stop_threshold("str", 1) with self.assertRaises(ValueError): model_helpers.past_stop_threshold("str", tf.constant(5)) with self.assertRaises(ValueError): model_helpers.past_stop_threshold("str", "another") with self.assertRaises(ValueError): model_helpers.past_stop_threshold(0, None) with self.assertRaises(ValueError): model_helpers.past_stop_threshold(0.7, "str") with self.assertRaises(ValueError): model_helpers.past_stop_threshold(tf.constant(4), None) class SyntheticDataTest(tf.test.TestCase): """Tests for generate_synthetic_data.""" def test_generate_synethetic_data(self): input_element, label_element = model_helpers.generate_synthetic_data( input_shape=tf.TensorShape([5]), input_value=123, input_dtype=tf.float32, label_shape=tf.TensorShape([]), label_value=456, label_dtype=tf.int32).make_one_shot_iterator().get_next() with self.test_session() as sess: for n in range(5): inp, lab = sess.run((input_element, label_element)) self.assertAllClose(inp, [123., 123., 123., 123., 123.]) self.assertEquals(lab, 456) def test_generate_only_input_data(self): d = model_helpers.generate_synthetic_data( input_shape=tf.TensorShape([4]), input_value=43.5, input_dtype=tf.float32) element = d.make_one_shot_iterator().get_next() self.assertFalse(isinstance(element, tuple)) with self.test_session() as sess: inp = sess.run(element) self.assertAllClose(inp, [43.5, 43.5, 43.5, 43.5]) def test_generate_nested_data(self): d = model_helpers.generate_synthetic_data( input_shape={'a': tf.TensorShape([2]), 'b': {'c': tf.TensorShape([3]), 'd': tf.TensorShape([])}}, input_value=1.1) element = d.make_one_shot_iterator().get_next() self.assertIn('a', element) self.assertIn('b', element) self.assertEquals(len(element['b']), 2) self.assertIn('c', element['b']) self.assertIn('d', element['b']) self.assertNotIn('c', element) with self.test_session() as sess: inp = sess.run(element) self.assertAllClose(inp['a'], [1.1, 1.1]) self.assertAllClose(inp['b']['c'], [1.1, 1.1, 1.1]) self.assertAllClose(inp['b']['d'], 1.1) if __name__ == "__main__": tf.test.main()
apache-2.0
joaquimrocha/Rancho
rancho/lib/templatetags/displaystatistic.py
1
1691
######################################################################## # Rancho - Open Source Group/Project Management Tool # Copyright (C) 2008 The Rancho Team # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ######################################################################## from django import template from django.utils.translation import ugettext as _ from message.models import Message from milestone.models import Milestone from lib import utils register = template.Library() def displaystatistic(context, name, trans_name, number): icons_folder = '/media/basepage/images/icons/' icon = '' if name == 'message': icon = 'comment.png' elif name == 'milestone': icon = 'clock.png' elif name == 'wikiboard': icon = 'page.png' elif name == 'file': icon = 'page_white_put.png' elif name == 'todo': icon = 'note.png' icon = icons_folder + icon return {'icon': icon, 'name': trans_name, 'number': number} register.inclusion_tag("lib/displaystatistic.html", takes_context=True)(displaystatistic)
agpl-3.0
qnib/QNIBCollect
src/diamond/collectors/disktemp/disktemp.py
49
3036
# coding=utf-8 """ Collect disk temperature with S.M.A.R.T. This collector use hddtemp to collect only the disk temperature from the disk S.M.A.R.T information. This can be faster than smartctl since it only extracts a single value. #### Dependencies * [hddtemp](http://www.guzu.net/linux/hddtemp.php) """ import os import re import subprocess import diamond.collector from diamond.collector import str_to_bool class DiskTemperatureCollector(diamond.collector.Collector): def process_config(self): super(DiskTemperatureCollector, self).process_config() self.devices = re.compile(self.config['devices']) def get_default_config_help(self): config_help = super(DiskTemperatureCollector, self).get_default_config_help() config_help.update({ 'devices': "device regex to collect stats on", 'bin': 'The path to the hddtemp binary', 'use_sudo': 'Use sudo?', 'sudo_cmd': 'Path to sudo', }) return config_help def get_default_config(self): """ Returns default configuration options. """ config = super(DiskTemperatureCollector, self).get_default_config() config.update({ 'path': 'disktemp', 'bin': 'hddtemp', 'use_sudo': False, 'sudo_cmd': '/usr/bin/sudo', 'devices': '^disk[0-9]$|^sd[a-z]$|^hd[a-z]$' }) return config def get_temp(self, device): command = [self.config['bin'], '-n', device] if str_to_bool(self.config['use_sudo']): command.insert(0, self.config['sudo_cmd']) return subprocess.Popen(command, stdout=subprocess.PIPE) def match_device(self, device, path): m = self.devices.match(device) if m: key = device # If the regex has a capture group for pretty printing, pick # the last matched capture group if self.devices.groups > 0: key = '.'.join(filter(None, [g for g in m.groups()])) return {key: self.get_temp(os.path.join('/dev', device))} return {} def collect(self): """ Collect and publish disk temperatures """ instances = {} # Support disks such as /dev/(sd.*) for device in os.listdir('/dev/'): instances.update(self.match_device(device, '/dev/')) # Support disk by id such as /dev/disk/by-id/wwn-(.*) for device_id in os.listdir('/dev/disk/by-id/'): instances.update(self.match_device(device, '/dev/disk/by-id/')) metrics = {} for device, p in instances.items(): output = p.communicate()[0].strip() try: metrics[device + ".Temperature"] = float(output) except: self.log.warn('Disk temperature retrieval failed on ' + device) for metric in metrics.keys(): self.publish(metric, metrics[metric])
apache-2.0
chushao/Gradesource-Uploader---GUI
Gradesource-Uploader-master/gradesourceuploader.py
2
2394
# gradesourceuploader.py # Chu Shao # Dec 22, 2012 # cshao@eng.ucsd.edu from gradesourcesession import GradesourceSession from getpass import getpass # To use: gradesourceuploader.downloadEmail(loginname, courseID) # Also, these can be hardcoded as global variables # # staticLogin = 'moocow' # staticCourseID = '12345' def downloadEmail(login, courseID): # Alternative: # gradesource = GradesourceSession(self.staticLogin, getpass('Password: '), self.staticCourseID) gradesource = GradesourceSession(login, getpass('Password: '), courseID) gradesource.downloadEmail() def downloadiClicker(login, courseID): gradesource = GradesourceSession(login, getpass('Password: '), courseID) gradesource.downloadiClicker() # To use: gradesourceuploader.updateScores(loginnname, courseID, assignment name, CSVFile, overwrite) # Can also be replaced with static as well. # def updateScoresByEmail(login, courseID, assignmentID, CSVFile, overwrite): gradesource = GradesourceSession(login, getpass('Password: '), courseID) gradesource.updateEmailScore(assignmentID, CSVFile, overwrite) def updateScoresByPID(login,courseID, assignmentID, CSVFile, overwrite): gradesource = GradesourceSession(login, getpass('Password: '), courseID) gradesource.updatePIDScore(assignmentID, CSVFile, overwrite) # FOR GUI. IF USED MANUALLY THEN PASSWORD IS PLAINTEXT. DO NOT USE THROUGH CLI. def downloadEmailGUI(login, courseID, password): # Alternative: # gradesource = GradesourceSession(self.staticLogin, getpass('Password: '), self.staticCourseID) gradesource = GradesourceSession(login, password, courseID) gradesource.downloadEmail() def downloadiClickerGUI(login, courseID, password): gradesource = GradesourceSession(login, password, courseID) gradesource.downloadiClicker() # To use: gradesourceuploader.updateScores(loginnname, courseID, assignment name, CSVFile) # Can also be replaced with static as well. # def updateScoresByEmailGUI(login, courseID, assignmentID, CSVFile, password, overwrite): gradesource = GradesourceSession(login, password, courseID) gradesource.updateEmailScore(assignmentID, CSVFile, overwrite) def updateScoresByPIDGUI(login,courseID, assignmentID, CSVFile, password, overwrite): gradesource = GradesourceSession(login, password, courseID) gradesource.updatePIDScore(assignmentID, CSVFile, overwrite)
mit
tuxfux-hlp-notes/python-batches
batch-67/19-files/myenv/lib/python2.7/site-packages/pip/_vendor/ordereddict.py
1047
4094
# Copyright (c) 2009 Raymond Hettinger # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from UserDict import DictMixin class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): if len(self) != len(other): return False for p, q in zip(self.items(), other.items()): if p != q: return False return True return dict.__eq__(self, other) def __ne__(self, other): return not self == other
gpl-3.0
MicroTrustRepos/microkernel
src/l4/pkg/python/contrib/Lib/test/test_nis.py
58
1317
from test import test_support import unittest import nis class NisTests(unittest.TestCase): def test_maps(self): try: maps = nis.maps() except nis.error, msg: # NIS is probably not active, so this test isn't useful if test_support.verbose: print "Test Skipped:", msg # Can't raise TestSkipped as regrtest only recognizes the exception # import time. return try: # On some systems, this map is only accessible to the # super user maps.remove("passwd.adjunct.byname") except ValueError: pass done = 0 for nismap in maps: mapping = nis.cat(nismap) for k, v in mapping.items(): if not k: continue if nis.match(k, nismap) != v: self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap)) else: # just test the one key, otherwise this test could take a # very long time done = 1 break if done: break def test_main(): test_support.run_unittest(NisTests) if __name__ == '__main__': test_main()
gpl-2.0
sylarcp/anita
venv/lib/python2.7/site-packages/wheel/test/test_install.py
455
1866
# Test wheel. # The file has the following contents: # hello.pyd # hello/hello.py # hello/__init__.py # test-1.0.data/data/hello.dat # test-1.0.data/headers/hello.dat # test-1.0.data/scripts/hello.sh # test-1.0.dist-info/WHEEL # test-1.0.dist-info/METADATA # test-1.0.dist-info/RECORD # The root is PLATLIB # So, some in PLATLIB, and one in each of DATA, HEADERS and SCRIPTS. import wheel.tool import wheel.pep425tags from wheel.install import WheelFile from tempfile import mkdtemp import shutil import os THISDIR = os.path.dirname(__file__) TESTWHEEL = os.path.join(THISDIR, 'test-1.0-py2.py3-none-win32.whl') def check(*path): return os.path.exists(os.path.join(*path)) def test_install(): tempdir = mkdtemp() def get_supported(): return list(wheel.pep425tags.get_supported()) + [('py3', 'none', 'win32')] whl = WheelFile(TESTWHEEL, context=get_supported) assert whl.supports_current_python(get_supported) try: locs = {} for key in ('purelib', 'platlib', 'scripts', 'headers', 'data'): locs[key] = os.path.join(tempdir, key) os.mkdir(locs[key]) whl.install(overrides=locs) assert len(os.listdir(locs['purelib'])) == 0 assert check(locs['platlib'], 'hello.pyd') assert check(locs['platlib'], 'hello', 'hello.py') assert check(locs['platlib'], 'hello', '__init__.py') assert check(locs['data'], 'hello.dat') assert check(locs['headers'], 'hello.dat') assert check(locs['scripts'], 'hello.sh') assert check(locs['platlib'], 'test-1.0.dist-info', 'RECORD') finally: shutil.rmtree(tempdir) def test_install_tool(): """Slightly improve coverage of wheel.install""" wheel.tool.install([TESTWHEEL], force=True, dry_run=True)
mit
okwow123/djangol2
example/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py
216
13871
'''SSL with SNI_-support for Python 2. This needs the following packages installed: * pyOpenSSL (tested with 0.13) * ndg-httpsclient (tested with 0.3.2) * pyasn1 (tested with 0.1.6) To activate it call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3`. This can be done in a ``sitecustomize`` module, or at any other time before your application begins using ``urllib3``, like this:: try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. Activating this module also has the positive side effect of disabling SSL/TLS encryption in Python 2 (see `CRIME attack`_). If you want to configure the default list of supported cipher suites, you can set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. Module Variables ---------------- :var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites. Default: ``EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA256 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EDH+aRSA EECDH RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS'`` .. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication .. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) ''' from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName import OpenSSL.SSL from pyasn1.codec.der import decoder as der_decoder from pyasn1.type import univ, constraint from socket import _fileobject import ssl import select from cStringIO import StringIO from .. import connection from .. import util __all__ = ['inject_into_urllib3', 'extract_from_urllib3'] # SNI only *really* works if we can read the subjectAltName of certificates. HAS_SNI = SUBJ_ALT_NAME_SUPPORT # Map from urllib3 to PyOpenSSL compatible parameter-values. _openssl_versions = { ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD, ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD, ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD, } _openssl_verify = { ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE, ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER, ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } # Default SSL/TLS cipher list. # Recommendation by https://community.qualys.com/blogs/securitylabs/2013/08/05/ # configuring-apache-nginx-and-openssl-for-forward-secrecy DEFAULT_SSL_CIPHER_LIST = 'EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM ' + \ 'EECDH+ECDSA+SHA256 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EDH+aRSA ' + \ 'EECDH RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS' orig_util_HAS_SNI = util.HAS_SNI orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket def inject_into_urllib3(): 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.' connection.ssl_wrap_socket = ssl_wrap_socket util.HAS_SNI = HAS_SNI def extract_from_urllib3(): 'Undo monkey-patching by :func:`inject_into_urllib3`.' connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket util.HAS_SNI = orig_util_HAS_SNI ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. class SubjectAltName(BaseSubjectAltName): '''ASN.1 implementation for subjectAltNames support''' # There is no limit to how many SAN certificates a certificate may have, # however this needs to have some limit so we'll set an arbitrarily high # limit. sizeSpec = univ.SequenceOf.sizeSpec + \ constraint.ValueSizeConstraint(1, 1024) ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. def get_subj_alt_name(peer_cert): # Search through extensions dns_name = [] if not SUBJ_ALT_NAME_SUPPORT: return dns_name general_names = SubjectAltName() for i in range(peer_cert.get_extension_count()): ext = peer_cert.get_extension(i) ext_name = ext.get_short_name() if ext_name != 'subjectAltName': continue # PyOpenSSL returns extension data in ASN.1 encoded form ext_dat = ext.get_data() decoded_dat = der_decoder.decode(ext_dat, asn1Spec=general_names) for name in decoded_dat: if not isinstance(name, SubjectAltName): continue for entry in range(len(name)): component = name.getComponentByPosition(entry) if component.getName() != 'dNSName': continue dns_name.append(str(component.getComponent())) return dns_name class fileobject(_fileobject): def read(self, size=-1): # Use max, disallow tiny reads in a loop as they are very inefficient. # We never leave read() with any leftover data from a new recv() call # in our internal buffer. rbufsize = max(self._rbufsize, self.default_bufsize) # Our use of StringIO rather than lists of string objects returned by # recv() minimizes memory usage and fragmentation that occurs when # rbufsize is large compared to the typical return value of recv(). buf = self._rbuf buf.seek(0, 2) # seek end if size < 0: # Read until EOF self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: try: data = self._sock.recv(rbufsize) except OpenSSL.SSL.WantReadError: continue if not data: break buf.write(data) return buf.getvalue() else: # Read until size bytes or EOF seen, whichever comes first buf_len = buf.tell() if buf_len >= size: # Already have size bytes in our buffer? Extract and return. buf.seek(0) rv = buf.read(size) self._rbuf = StringIO() self._rbuf.write(buf.read()) return rv self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: left = size - buf_len # recv() will malloc the amount of memory given as its # parameter even though it often returns much less data # than that. The returned data string is short lived # as we copy it into a StringIO and free it. This avoids # fragmentation issues on many platforms. try: data = self._sock.recv(left) except OpenSSL.SSL.WantReadError: continue if not data: break n = len(data) if n == size and not buf_len: # Shortcut. Avoid buffer data copies when: # - We have no data in our buffer. # AND # - Our call to recv returned exactly the # number of bytes we were asked to read. return data if n == left: buf.write(data) del data # explicit free break assert n <= left, "recv(%d) returned %d bytes" % (left, n) buf.write(data) buf_len += n del data # explicit free #assert buf_len == buf.tell() return buf.getvalue() def readline(self, size=-1): buf = self._rbuf buf.seek(0, 2) # seek end if buf.tell() > 0: # check if we already have it in our buffer buf.seek(0) bline = buf.readline(size) if bline.endswith('\n') or len(bline) == size: self._rbuf = StringIO() self._rbuf.write(buf.read()) return bline del bline if size < 0: # Read until \n or EOF, whichever comes first if self._rbufsize <= 1: # Speed up unbuffered case buf.seek(0) buffers = [buf.read()] self._rbuf = StringIO() # reset _rbuf. we consume it via buf. data = None recv = self._sock.recv while True: try: while data != "\n": data = recv(1) if not data: break buffers.append(data) except OpenSSL.SSL.WantReadError: continue break return "".join(buffers) buf.seek(0, 2) # seek end self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: try: data = self._sock.recv(self._rbufsize) except OpenSSL.SSL.WantReadError: continue if not data: break nl = data.find('\n') if nl >= 0: nl += 1 buf.write(data[:nl]) self._rbuf.write(data[nl:]) del data break buf.write(data) return buf.getvalue() else: # Read until size bytes or \n or EOF seen, whichever comes first buf.seek(0, 2) # seek end buf_len = buf.tell() if buf_len >= size: buf.seek(0) rv = buf.read(size) self._rbuf = StringIO() self._rbuf.write(buf.read()) return rv self._rbuf = StringIO() # reset _rbuf. we consume it via buf. while True: try: data = self._sock.recv(self._rbufsize) except OpenSSL.SSL.WantReadError: continue if not data: break left = size - buf_len # did we just receive a newline? nl = data.find('\n', 0, left) if nl >= 0: nl += 1 # save the excess data to _rbuf self._rbuf.write(data[nl:]) if buf_len: buf.write(data[:nl]) break else: # Shortcut. Avoid data copy through buf when returning # a substring of our first recv(). return data[:nl] n = len(data) if n == size and not buf_len: # Shortcut. Avoid data copy through buf when # returning exactly all of our first recv(). return data if n >= left: buf.write(data[:left]) self._rbuf.write(data[left:]) break buf.write(data) buf_len += n #assert buf_len == buf.tell() return buf.getvalue() class WrappedSocket(object): '''API-compatibility wrapper for Python OpenSSL's Connection-class.''' def __init__(self, connection, socket): self.connection = connection self.socket = socket def fileno(self): return self.socket.fileno() def makefile(self, mode, bufsize=-1): return fileobject(self.connection, mode, bufsize) def settimeout(self, timeout): return self.socket.settimeout(timeout) def sendall(self, data): return self.connection.sendall(data) def close(self): return self.connection.shutdown() def getpeercert(self, binary_form=False): x509 = self.connection.get_peer_certificate() if not x509: return x509 if binary_form: return OpenSSL.crypto.dump_certificate( OpenSSL.crypto.FILETYPE_ASN1, x509) return { 'subject': ( (('commonName', x509.get_subject().CN),), ), 'subjectAltName': [ ('DNS', value) for value in get_subj_alt_name(x509) ] } def _verify_callback(cnx, x509, err_no, err_depth, return_code): return err_no == 0 def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None): ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version]) if certfile: ctx.use_certificate_file(certfile) if keyfile: ctx.use_privatekey_file(keyfile) if cert_reqs != ssl.CERT_NONE: ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback) if ca_certs: try: ctx.load_verify_locations(ca_certs, None) except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e) # Disable TLS compression to migitate CRIME attack (issue #309) OP_NO_COMPRESSION = 0x20000 ctx.set_options(OP_NO_COMPRESSION) # Set list of supported ciphersuites. ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST) cnx = OpenSSL.SSL.Connection(ctx, sock) cnx.set_tlsext_host_name(server_hostname) cnx.set_connect_state() while True: try: cnx.do_handshake() except OpenSSL.SSL.WantReadError: select.select([sock], [], []) continue except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad handshake', e) break return WrappedSocket(cnx, sock)
mit
kariefury/sailor-atlas
sailor-atlas/main.py
1
1590
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import webapp2 import os #from tweepy import * from google.appengine.ext.webapp import template class MainHandler(webapp2.RequestHandler): def get(self): path = os.path.join(os.path.dirname(__file__), 'index.html') self.response.out.write(template.render(path,{})) class DataHandler(webapp2.RequestHandler): def get(self): path = os.path.join(os.path.dirname(__file__), 'data.json') self.response.out.write( template.render(path, {} ) ) # def twitterData(): # consumer_key = '' # consumer_secret = '' # access_token = '' # access_token_secret = '' # auth = tweepy.OAuthHandler(consumer_key, consumer_secret) # auth.set_access_token(access_token, access_token_secret) # api = tweepy.API(auth) # public_tweets = api.home_timeline() # #for tweet in public_tweets: # # print tweet.text # #return {} app = webapp2.WSGIApplication([ ('/', MainHandler), ('/data/', DataHandler) ], debug=True)
gpl-2.0
rhertzog/django
django/apps/registry.py
51
16980
import sys import threading import warnings from collections import Counter, OrderedDict, defaultdict from functools import partial from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured from django.utils import lru_cache from .config import AppConfig class Apps(object): """ A registry that stores the configuration of installed applications. It also keeps track of models eg. to provide reverse-relations. """ def __init__(self, installed_apps=()): # installed_apps is set to None when creating the master registry # because it cannot be populated at that point. Other registries must # provide a list of installed apps and are populated immediately. if installed_apps is None and hasattr(sys.modules[__name__], 'apps'): raise RuntimeError("You must supply an installed_apps argument.") # Mapping of app labels => model names => model classes. Every time a # model is imported, ModelBase.__new__ calls apps.register_model which # creates an entry in all_models. All imported models are registered, # regardless of whether they're defined in an installed application # and whether the registry has been populated. Since it isn't possible # to reimport a module safely (it could reexecute initialization code) # all_models is never overridden or reset. self.all_models = defaultdict(OrderedDict) # Mapping of labels to AppConfig instances for installed apps. self.app_configs = OrderedDict() # Stack of app_configs. Used to store the current state in # set_available_apps and set_installed_apps. self.stored_app_configs = [] # Whether the registry is populated. self.apps_ready = self.models_ready = self.ready = False # Lock for thread-safe population. self._lock = threading.Lock() # Maps ("app_label", "modelname") tuples to lists of functions to be # called when the corresponding model is ready. Used by this class's # `lazy_model_operation()` and `do_pending_operations()` methods. self._pending_operations = defaultdict(list) # Populate apps and models, unless it's the master registry. if installed_apps is not None: self.populate(installed_apps) def populate(self, installed_apps=None): """ Loads application configurations and models. This method imports each application module and then each model module. It is thread safe and idempotent, but not reentrant. """ if self.ready: return # populate() might be called by two threads in parallel on servers # that create threads before initializing the WSGI callable. with self._lock: if self.ready: return # app_config should be pristine, otherwise the code below won't # guarantee that the order matches the order in INSTALLED_APPS. if self.app_configs: raise RuntimeError("populate() isn't reentrant") # Load app configs and app modules. for entry in installed_apps: if isinstance(entry, AppConfig): app_config = entry else: app_config = AppConfig.create(entry) if app_config.label in self.app_configs: raise ImproperlyConfigured( "Application labels aren't unique, " "duplicates: %s" % app_config.label) self.app_configs[app_config.label] = app_config # Check for duplicate app names. counts = Counter( app_config.name for app_config in self.app_configs.values()) duplicates = [ name for name, count in counts.most_common() if count > 1] if duplicates: raise ImproperlyConfigured( "Application names aren't unique, " "duplicates: %s" % ", ".join(duplicates)) self.apps_ready = True # Load models. for app_config in self.app_configs.values(): all_models = self.all_models[app_config.label] app_config.import_models(all_models) self.clear_cache() self.models_ready = True for app_config in self.get_app_configs(): app_config.ready() self.ready = True def check_apps_ready(self): """ Raises an exception if all apps haven't been imported yet. """ if not self.apps_ready: raise AppRegistryNotReady("Apps aren't loaded yet.") def check_models_ready(self): """ Raises an exception if all models haven't been imported yet. """ if not self.models_ready: raise AppRegistryNotReady("Models aren't loaded yet.") def get_app_configs(self): """ Imports applications and returns an iterable of app configs. """ self.check_apps_ready() return self.app_configs.values() def get_app_config(self, app_label): """ Imports applications and returns an app config for the given label. Raises LookupError if no application exists with this label. """ self.check_apps_ready() try: return self.app_configs[app_label] except KeyError: message = "No installed app with label '%s'." % app_label for app_config in self.get_app_configs(): if app_config.name == app_label: message += " Did you mean '%s'?" % app_config.label break raise LookupError(message) # This method is performance-critical at least for Django's test suite. @lru_cache.lru_cache(maxsize=None) def get_models(self, include_auto_created=False, include_swapped=False): """ Returns a list of all installed models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models created to satisfy deferred attribute queries, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. """ self.check_models_ready() result = [] for app_config in self.app_configs.values(): result.extend(list(app_config.get_models(include_auto_created, include_swapped))) return result def get_model(self, app_label, model_name=None): """ Returns the model matching the given app_label and model_name. As a shortcut, this function also accepts a single argument in the form <app_label>.<model_name>. model_name is case-insensitive. Raises LookupError if no application exists with this label, or no model exists with this name in the application. Raises ValueError if called with a single argument that doesn't contain exactly one dot. """ self.check_models_ready() if model_name is None: app_label, model_name = app_label.split('.') return self.get_app_config(app_label).get_model(model_name.lower()) def register_model(self, app_label, model): # Since this method is called when models are imported, it cannot # perform imports because of the risk of import loops. It mustn't # call get_app_config(). model_name = model._meta.model_name app_models = self.all_models[app_label] if model_name in app_models: if (model.__name__ == app_models[model_name].__name__ and model.__module__ == app_models[model_name].__module__): warnings.warn( "Model '%s.%s' was already registered. " "Reloading models is not advised as it can lead to inconsistencies, " "most notably with related models." % (app_label, model_name), RuntimeWarning, stacklevel=2) else: raise RuntimeError( "Conflicting '%s' models in application '%s': %s and %s." % (model_name, app_label, app_models[model_name], model)) app_models[model_name] = model self.do_pending_operations(model) self.clear_cache() def is_installed(self, app_name): """ Checks whether an application with this name exists in the registry. app_name is the full name of the app eg. 'django.contrib.admin'. """ self.check_apps_ready() return any(ac.name == app_name for ac in self.app_configs.values()) def get_containing_app_config(self, object_name): """ Look for an app config containing a given object. object_name is the dotted Python path to the object. Returns the app config for the inner application in case of nesting. Returns None if the object isn't in any registered app config. """ self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name):] if subpath == '' or subpath[0] == '.': candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0] def get_registered_model(self, app_label, model_name): """ Similar to get_model(), but doesn't require that an app exists with the given app_label. It's safe to call this method at import time, even while the registry is being populated. """ model = self.all_models[app_label].get(model_name.lower()) if model is None: raise LookupError( "Model '%s.%s' not registered." % (app_label, model_name)) return model @lru_cache.lru_cache(maxsize=None) def get_swappable_settings_name(self, to_string): """ For a given model string (e.g. "auth.User"), return the name of the corresponding settings name if it refers to a swappable model. If the referred model is not swappable, return None. This method is decorated with lru_cache because it's performance critical when it comes to migrations. Since the swappable settings don't change after Django has loaded the settings, there is no reason to get the respective settings attribute over and over again. """ for model in self.get_models(include_swapped=True): swapped = model._meta.swapped # Is this model swapped out for the model given by to_string? if swapped and swapped == to_string: return model._meta.swappable # Is this model swappable and the one given by to_string? if model._meta.swappable and model._meta.label == to_string: return model._meta.swappable return None def set_available_apps(self, available): """ Restricts the set of installed apps used by get_app_config[s]. available must be an iterable of application names. set_available_apps() must be balanced with unset_available_apps(). Primarily used for performance optimization in TransactionTestCase. This method is safe is the sense that it doesn't trigger any imports. """ available = set(available) installed = set(app_config.name for app_config in self.get_app_configs()) if not available.issubset(installed): raise ValueError( "Available apps isn't a subset of installed apps, extra apps: %s" % ", ".join(available - installed) ) self.stored_app_configs.append(self.app_configs) self.app_configs = OrderedDict( (label, app_config) for label, app_config in self.app_configs.items() if app_config.name in available) self.clear_cache() def unset_available_apps(self): """ Cancels a previous call to set_available_apps(). """ self.app_configs = self.stored_app_configs.pop() self.clear_cache() def set_installed_apps(self, installed): """ Enables a different set of installed apps for get_app_config[s]. installed must be an iterable in the same format as INSTALLED_APPS. set_installed_apps() must be balanced with unset_installed_apps(), even if it exits with an exception. Primarily used as a receiver of the setting_changed signal in tests. This method may trigger new imports, which may add new models to the registry of all imported models. They will stay in the registry even after unset_installed_apps(). Since it isn't possible to replay imports safely (eg. that could lead to registering listeners twice), models are registered when they're imported and never removed. """ if not self.ready: raise AppRegistryNotReady("App registry isn't ready yet.") self.stored_app_configs.append(self.app_configs) self.app_configs = OrderedDict() self.apps_ready = self.models_ready = self.ready = False self.clear_cache() self.populate(installed) def unset_installed_apps(self): """ Cancels a previous call to set_installed_apps(). """ self.app_configs = self.stored_app_configs.pop() self.apps_ready = self.models_ready = self.ready = True self.clear_cache() def clear_cache(self): """ Clears all internal caches, for methods that alter the app registry. This is mostly used in tests. """ # Call expire cache on each model. This will purge # the relation tree and the fields cache. self.get_models.cache_clear() if self.ready: # Circumvent self.get_models() to prevent that the cache is refilled. # This particularly prevents that an empty value is cached while cloning. for app_config in self.app_configs.values(): for model in app_config.get_models(include_auto_created=True): model._meta._expire_cache() def lazy_model_operation(self, function, *model_keys): """ Take a function and a number of ("app_label", "modelname") tuples, and when all the corresponding models have been imported and registered, call the function with the model classes as its arguments. The function passed to this method must accept exactly n models as arguments, where n=len(model_keys). """ # Base case: no arguments, just execute the function. if not model_keys: function() # Recursive case: take the head of model_keys, wait for the # corresponding model class to be imported and registered, then apply # that argument to the supplied function. Pass the resulting partial # to lazy_model_operation() along with the remaining model args and # repeat until all models are loaded and all arguments are applied. else: next_model, more_models = model_keys[0], model_keys[1:] # This will be executed after the class corresponding to next_model # has been imported and registered. The `func` attribute provides # duck-type compatibility with partials. def apply_next_model(model): next_function = partial(apply_next_model.func, model) self.lazy_model_operation(next_function, *more_models) apply_next_model.func = function # If the model has already been imported and registered, partially # apply it to the function now. If not, add it to the list of # pending operations for the model, where it will be executed with # the model class as its sole argument once the model is ready. try: model_class = self.get_registered_model(*next_model) except LookupError: self._pending_operations[next_model].append(apply_next_model) else: apply_next_model(model_class) def do_pending_operations(self, model): """ Take a newly-prepared model and pass it to each function waiting for it. This is called at the very end of `Apps.register_model()`. """ key = model._meta.app_label, model._meta.model_name for function in self._pending_operations.pop(key, []): function(model) apps = Apps(installed_apps=None)
bsd-3-clause
ThomasFeher/audacity
lib-src/lv2/lv2/plugins/eg-amp.lv2/waflib/Tools/gcc.py
196
2738
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file from waflib.Tools import ccroot,ar from waflib.Configure import conf @conf def find_gcc(conf): cc=conf.find_program(['gcc','cc'],var='CC') cc=conf.cmd_to_list(cc) conf.get_cc_version(cc,gcc=True) conf.env.CC_NAME='gcc' conf.env.CC=cc @conf def gcc_common_flags(conf): v=conf.env v['CC_SRC_F']=[] v['CC_TGT_F']=['-c','-o'] if not v['LINK_CC']:v['LINK_CC']=v['CC'] v['CCLNK_SRC_F']=[] v['CCLNK_TGT_F']=['-o'] v['CPPPATH_ST']='-I%s' v['DEFINES_ST']='-D%s' v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STLIB_ST']='-l%s' v['STLIBPATH_ST']='-L%s' v['RPATH_ST']='-Wl,-rpath,%s' v['SONAME_ST']='-Wl,-h,%s' v['SHLIB_MARKER']='-Wl,-Bdynamic' v['STLIB_MARKER']='-Wl,-Bstatic' v['cprogram_PATTERN']='%s' v['CFLAGS_cshlib']=['-fPIC'] v['LINKFLAGS_cshlib']=['-shared'] v['cshlib_PATTERN']='lib%s.so' v['LINKFLAGS_cstlib']=['-Wl,-Bstatic'] v['cstlib_PATTERN']='lib%s.a' v['LINKFLAGS_MACBUNDLE']=['-bundle','-undefined','dynamic_lookup'] v['CFLAGS_MACBUNDLE']=['-fPIC'] v['macbundle_PATTERN']='%s.bundle' @conf def gcc_modifier_win32(conf): v=conf.env v['cprogram_PATTERN']='%s.exe' v['cshlib_PATTERN']='%s.dll' v['implib_PATTERN']='lib%s.dll.a' v['IMPLIB_ST']='-Wl,--out-implib,%s' v['CFLAGS_cshlib']=[] v.append_value('LINKFLAGS',['-Wl,--enable-auto-import']) @conf def gcc_modifier_cygwin(conf): gcc_modifier_win32(conf) v=conf.env v['cshlib_PATTERN']='cyg%s.dll' v.append_value('LINKFLAGS_cshlib',['-Wl,--enable-auto-image-base']) v['CFLAGS_cshlib']=[] @conf def gcc_modifier_darwin(conf): v=conf.env v['CFLAGS_cshlib']=['-fPIC'] v['LINKFLAGS_cshlib']=['-dynamiclib','-Wl,-compatibility_version,1','-Wl,-current_version,1'] v['cshlib_PATTERN']='lib%s.dylib' v['FRAMEWORKPATH_ST']='-F%s' v['FRAMEWORK_ST']=['-framework'] v['ARCH_ST']=['-arch'] v['LINKFLAGS_cstlib']=[] v['SHLIB_MARKER']=[] v['STLIB_MARKER']=[] v['SONAME_ST']=[] @conf def gcc_modifier_aix(conf): v=conf.env v['LINKFLAGS_cprogram']=['-Wl,-brtl'] v['LINKFLAGS_cshlib']=['-shared','-Wl,-brtl,-bexpfull'] v['SHLIB_MARKER']=[] @conf def gcc_modifier_hpux(conf): v=conf.env v['SHLIB_MARKER']=[] v['STLIB_MARKER']='-Bstatic' v['CFLAGS_cshlib']=['-fPIC','-DPIC'] v['cshlib_PATTERN']='lib%s.sl' @conf def gcc_modifier_openbsd(conf): conf.env.SONAME_ST=[] @conf def gcc_modifier_platform(conf): gcc_modifier_func=getattr(conf,'gcc_modifier_'+conf.env.DEST_OS,None) if gcc_modifier_func: gcc_modifier_func() def configure(conf): conf.find_gcc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
gpl-2.0
Xeralux/tensorflow
tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2.py
4
15143
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=invalid-name # pylint: disable=unused-import """Inception-ResNet V2 model for Keras. # Reference - [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.applications import imagenet_utils from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions from tensorflow.python.keras._impl.keras.engine.network import get_source_inputs from tensorflow.python.keras._impl.keras.layers import Activation from tensorflow.python.keras._impl.keras.layers import AveragePooling2D from tensorflow.python.keras._impl.keras.layers import BatchNormalization from tensorflow.python.keras._impl.keras.layers import Concatenate from tensorflow.python.keras._impl.keras.layers import Conv2D from tensorflow.python.keras._impl.keras.layers import Dense from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D from tensorflow.python.keras._impl.keras.layers import Input from tensorflow.python.keras._impl.keras.layers import Lambda from tensorflow.python.keras._impl.keras.layers import MaxPooling2D from tensorflow.python.keras._impl.keras.models import Model from tensorflow.python.keras._impl.keras.utils.data_utils import get_file from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export BASE_WEIGHT_URL = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.7/' @tf_export('keras.applications.inception_resnet_v2.preprocess_input') def preprocess_input(x): """Preprocesses a numpy array encoding a batch of images. Arguments: x: a 4D numpy array consists of RGB values within [0, 255]. Returns: Preprocessed array. """ return imagenet_utils.preprocess_input(x, mode='tf') def conv2d_bn(x, filters, kernel_size, strides=1, padding='same', activation='relu', use_bias=False, name=None): """Utility function to apply conv + BN. Arguments: x: input tensor. filters: filters in `Conv2D`. kernel_size: kernel size as in `Conv2D`. strides: strides in `Conv2D`. padding: padding mode in `Conv2D`. activation: activation in `Conv2D`. use_bias: whether to use a bias in `Conv2D`. name: name of the ops; will become `name + '_ac'` for the activation and `name + '_bn'` for the batch norm layer. Returns: Output tensor after applying `Conv2D` and `BatchNormalization`. """ x = Conv2D( filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias, name=name)( x) if not use_bias: bn_axis = 1 if K.image_data_format() == 'channels_first' else 3 bn_name = None if name is None else name + '_bn' x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if activation is not None: ac_name = None if name is None else name + '_ac' x = Activation(activation, name=ac_name)(x) return x def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'): """Adds a Inception-ResNet block. This function builds 3 types of Inception-ResNet blocks mentioned in the paper, controlled by the `block_type` argument (which is the block name used in the official TF-slim implementation): - Inception-ResNet-A: `block_type='block35'` - Inception-ResNet-B: `block_type='block17'` - Inception-ResNet-C: `block_type='block8'` Arguments: x: input tensor. scale: scaling factor to scale the residuals (i.e., the output of passing `x` through an inception module) before adding them to the shortcut branch. Let `r` be the output from the residual branch, the output of this block will be `x + scale * r`. block_type: `'block35'`, `'block17'` or `'block8'`, determines the network structure in the residual branch. block_idx: an `int` used for generating layer names. The Inception-ResNet blocks are repeated many times in this network. We use `block_idx` to identify each of the repetitions. For example, the first Inception-ResNet-A block will have `block_type='block35', block_idx=0`, ane the layer names will have a common prefix `'block35_0'`. activation: activation function to use at the end of the block. When `activation=None`, no activation is applied (i.e., "linear" activation: `a(x) = x`). Returns: Output tensor for the block. Raises: ValueError: if `block_type` is not one of `'block35'`, `'block17'` or `'block8'`. """ if block_type == 'block35': branch_0 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(x, 32, 1) branch_1 = conv2d_bn(branch_1, 32, 3) branch_2 = conv2d_bn(x, 32, 1) branch_2 = conv2d_bn(branch_2, 48, 3) branch_2 = conv2d_bn(branch_2, 64, 3) branches = [branch_0, branch_1, branch_2] elif block_type == 'block17': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 128, 1) branch_1 = conv2d_bn(branch_1, 160, [1, 7]) branch_1 = conv2d_bn(branch_1, 192, [7, 1]) branches = [branch_0, branch_1] elif block_type == 'block8': branch_0 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(x, 192, 1) branch_1 = conv2d_bn(branch_1, 224, [1, 3]) branch_1 = conv2d_bn(branch_1, 256, [3, 1]) branches = [branch_0, branch_1] else: raise ValueError('Unknown Inception-ResNet block type. ' 'Expects "block35", "block17" or "block8", ' 'but got: ' + str(block_type)) block_name = block_type + '_' + str(block_idx) channel_axis = 1 if K.image_data_format() == 'channels_first' else 3 mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches) up = conv2d_bn( mixed, K.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=block_name + '_conv') x = Lambda( lambda inputs, scale: inputs[0] + inputs[1] * scale, output_shape=K.int_shape(x)[1:], arguments={'scale': scale}, name=block_name)([x, up]) if activation is not None: x = Activation(activation, name=block_name + '_ac')(x) return x @tf_export('keras.applications.InceptionResNetV2', 'keras.applications.inception_resnet_v2.InceptionResNetV2') def InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the Inception-ResNet v2 architecture. Optionally loads weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `"image_data_format": "channels_last"` in your Keras config at `~/.keras/keras.json`. The model and the weights are compatible with TensorFlow, Theano and CNTK backends. The data format convention used by the model is the one specified in your Keras config file. Note that the default input image size for this model is 299x299, instead of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing function is different (i.e., do not use `imagenet_utils.preprocess_input()` with this model. Use `preprocess_input()` defined in this module instead). Arguments: include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is `False` (otherwise the input shape has to be `(299, 299, 3)` (with `'channels_last'` data format) or `(3, 299, 299)` (with `'channels_first'` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 139. E.g. `(150, 150, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `'avg'` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `'max'` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is `True`, and if no `weights` argument is specified. Returns: A Keras `Model` instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. """ if not (weights in {'imagenet', None} or os.path.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = _obtain_input_shape( input_shape, default_size=299, min_size=139, data_format=K.image_data_format(), require_flatten=False, weights=weights) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor # Stem block: 35 x 35 x 192 x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid') x = conv2d_bn(x, 32, 3, padding='valid') x = conv2d_bn(x, 64, 3) x = MaxPooling2D(3, strides=2)(x) x = conv2d_bn(x, 80, 1, padding='valid') x = conv2d_bn(x, 192, 3, padding='valid') x = MaxPooling2D(3, strides=2)(x) # Mixed 5b (Inception-A block): 35 x 35 x 320 branch_0 = conv2d_bn(x, 96, 1) branch_1 = conv2d_bn(x, 48, 1) branch_1 = conv2d_bn(branch_1, 64, 5) branch_2 = conv2d_bn(x, 64, 1) branch_2 = conv2d_bn(branch_2, 96, 3) branch_2 = conv2d_bn(branch_2, 96, 3) branch_pool = AveragePooling2D(3, strides=1, padding='same')(x) branch_pool = conv2d_bn(branch_pool, 64, 1) branches = [branch_0, branch_1, branch_2, branch_pool] channel_axis = 1 if K.image_data_format() == 'channels_first' else 3 x = Concatenate(axis=channel_axis, name='mixed_5b')(branches) # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320 for block_idx in range(1, 11): x = inception_resnet_block( x, scale=0.17, block_type='block35', block_idx=block_idx) # Mixed 6a (Reduction-A block): 17 x 17 x 1088 branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid') branch_1 = conv2d_bn(x, 256, 1) branch_1 = conv2d_bn(branch_1, 256, 3) branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid') branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x) branches = [branch_0, branch_1, branch_pool] x = Concatenate(axis=channel_axis, name='mixed_6a')(branches) # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088 for block_idx in range(1, 21): x = inception_resnet_block( x, scale=0.1, block_type='block17', block_idx=block_idx) # Mixed 7a (Reduction-B block): 8 x 8 x 2080 branch_0 = conv2d_bn(x, 256, 1) branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid') branch_1 = conv2d_bn(x, 256, 1) branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid') branch_2 = conv2d_bn(x, 256, 1) branch_2 = conv2d_bn(branch_2, 288, 3) branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid') branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x) branches = [branch_0, branch_1, branch_2, branch_pool] x = Concatenate(axis=channel_axis, name='mixed_7a')(branches) # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080 for block_idx in range(1, 10): x = inception_resnet_block( x, scale=0.2, block_type='block8', block_idx=block_idx) x = inception_resnet_block( x, scale=1., activation=None, block_type='block8', block_idx=10) # Final convolution block: 8 x 8 x 1536 x = conv2d_bn(x, 1536, 1, name='conv_7b') if include_top: # Classification block x = GlobalAveragePooling2D(name='avg_pool')(x) x = Dense(classes, activation='softmax', name='predictions')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor` if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model model = Model(inputs, x, name='inception_resnet_v2') # Load weights if weights == 'imagenet': if include_top: fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5' weights_path = get_file( fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='e693bd0210a403b3192acc6073ad2e96') else: fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5' weights_path = get_file( fname, BASE_WEIGHT_URL + fname, cache_subdir='models', file_hash='d19885ff4a710c122648d3b5c3b684e4') model.load_weights(weights_path) elif weights is not None: model.load_weights(weights) return model
apache-2.0
bopo/PopClip-Extensions
source/OneNote/requests/packages/urllib3/packages/six.py
2375
11628
"""Utilities for writing code that runs on Python 2 and 3""" #Copyright (c) 2010-2011 Benjamin Peterson #Permission is hereby granted, free of charge, to any person obtaining a copy of #this software and associated documentation files (the "Software"), to deal in #the Software without restriction, including without limitation the rights to #use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of #the Software, and to permit persons to whom the Software is furnished to do so, #subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS #FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER #IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN #CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.2.0" # Revision 41c74fef2ded # True if we are running on Python 3. PY3 = sys.version_info[0] == 3 if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # This is a bit ugly, but it avoids running this again. delattr(tp, self.name) return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _MovedItems(types.ModuleType): """Lazy loading of moved objects""" _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) del attr moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_code = "__code__" _func_defaults = "__defaults__" _iterkeys = "keys" _itervalues = "values" _iteritems = "items" else: _meth_func = "im_func" _meth_self = "im_self" _func_code = "func_code" _func_defaults = "func_defaults" _iterkeys = "iterkeys" _itervalues = "itervalues" _iteritems = "iteritems" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator if PY3: def get_unbound_function(unbound): return unbound Iterator = object def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) else: def get_unbound_function(unbound): return unbound.im_func class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) def iterkeys(d): """Return an iterator over the keys of a dictionary.""" return iter(getattr(d, _iterkeys)()) def itervalues(d): """Return an iterator over the values of a dictionary.""" return iter(getattr(d, _itervalues)()) def iteritems(d): """Return an iterator over the (key, value) pairs of a dictionary.""" return iter(getattr(d, _iteritems)()) if PY3: def b(s): return s.encode("latin-1") def u(s): return s if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") import io StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s def u(s): return unicode(s, "unicode_escape") int2byte = chr import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") if PY3: import builtins exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value print_ = getattr(builtins, "print") del builtins else: def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) def print_(*args, **kwargs): """The new-style print function.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) _add_doc(reraise, """Reraise an exception.""") def with_metaclass(meta, base=object): """Create a base class with a metaclass.""" return meta("NewBase", (base,), {})
mit
bdh1011/wau
venv/lib/python2.7/site-packages/path.py
1
55665
# # Copyright (c) 2010 Mikhail Gusarov # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # """ path.py - An object representing a path to a file or directory. https://github.com/jaraco/path.py Example:: from path import Path d = Path('/home/guido/bin') for f in d.files('*.py'): f.chmod(0o755) """ import sys import warnings import os import fnmatch import glob import shutil import codecs import hashlib import errno import tempfile import functools import operator import re import contextlib import io from distutils import dir_util import importlib try: import win32security except ImportError: pass try: import pwd except ImportError: pass try: import grp except ImportError: pass ############################################################################## # Python 2/3 support PY3 = sys.version_info >= (3,) PY2 = not PY3 string_types = str, text_type = str getcwdu = os.getcwd u = lambda x: x def surrogate_escape(error): """ Simulate the Python 3 ``surrogateescape`` handler, but for Python 2 only. """ chars = error.object[error.start:error.end] assert len(chars) == 1 val = ord(chars) val += 0xdc00 return __builtin__.unichr(val), error.end if PY2: import __builtin__ string_types = __builtin__.basestring, text_type = __builtin__.unicode getcwdu = os.getcwdu u = lambda x: codecs.unicode_escape_decode(x)[0] codecs.register_error('surrogateescape', surrogate_escape) @contextlib.contextmanager def io_error_compat(): try: yield except IOError as io_err: # On Python 2, io.open raises IOError; transform to OSError for # future compatibility. os_err = OSError(*io_err.args) os_err.filename = getattr(io_err, 'filename', None) raise os_err ############################################################################## __all__ = ['Path', 'CaseInsensitivePattern'] LINESEPS = [u('\r\n'), u('\r'), u('\n')] U_LINESEPS = LINESEPS + [u('\u0085'), u('\u2028'), u('\u2029')] NEWLINE = re.compile('|'.join(LINESEPS)) U_NEWLINE = re.compile('|'.join(U_LINESEPS)) NL_END = re.compile(u(r'(?:{0})$').format(NEWLINE.pattern)) U_NL_END = re.compile(u(r'(?:{0})$').format(U_NEWLINE.pattern)) try: import pkg_resources __version__ = pkg_resources.require('path.py')[0].version except ImportError: __version__ = 'unknown' except pkg_resources.DistributionNotFound: __version__ = 'unknown' class TreeWalkWarning(Warning): pass # from jaraco.functools def compose(*funcs): compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs)) return functools.reduce(compose_two, funcs) def simple_cache(func): """ Save results for the :meth:'path.using_module' classmethod. When Python 3.2 is available, use functools.lru_cache instead. """ saved_results = {} def wrapper(cls, module): if module in saved_results: return saved_results[module] saved_results[module] = func(cls, module) return saved_results[module] return wrapper class ClassProperty(property): def __get__(self, cls, owner): return self.fget.__get__(None, owner)() class multimethod(object): """ Acts like a classmethod when invoked from the class and like an instancemethod when invoked from the instance. """ def __init__(self, func): self.func = func def __get__(self, instance, owner): return ( functools.partial(self.func, owner) if instance is None else functools.partial(self.func, owner, instance) ) class Path(text_type): """ Represents a filesystem path. For documentation on individual methods, consult their counterparts in :mod:`os.path`. Some methods are additionally included from :mod:`shutil`. The functions are linked directly into the class namespace such that they will be bound to the Path instance. For example, ``Path(src).copy(target)`` is equivalent to ``shutil.copy(src, target)``. Therefore, when referencing the docs for these methods, assume `src` references `self`, the Path instance. """ module = os.path """ The path module to use for path operations. .. seealso:: :mod:`os.path` """ def __init__(self, other=''): if other is None: raise TypeError("Invalid initial value for path: None") @classmethod @simple_cache def using_module(cls, module): subclass_name = cls.__name__ + '_' + module.__name__ bases = (cls,) ns = {'module': module} return type(subclass_name, bases, ns) @ClassProperty @classmethod def _next_class(cls): """ What class should be used to construct new instances from this class """ return cls @classmethod def _always_unicode(cls, path): """ Ensure the path as retrieved from a Python API, such as :func:`os.listdir`, is a proper Unicode string. """ if PY3 or isinstance(path, text_type): return path return path.decode(sys.getfilesystemencoding(), 'surrogateescape') # --- Special Python methods. def __repr__(self): return '%s(%s)' % (type(self).__name__, super(Path, self).__repr__()) # Adding a Path and a string yields a Path. def __add__(self, more): try: return self._next_class(super(Path, self).__add__(more)) except TypeError: # Python bug return NotImplemented def __radd__(self, other): if not isinstance(other, string_types): return NotImplemented return self._next_class(other.__add__(self)) # The / operator joins Paths. def __div__(self, rel): """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) Join two path components, adding a separator character if needed. .. seealso:: :func:`os.path.join` """ return self._next_class(self.module.join(self, rel)) # Make the / operator work even when true division is enabled. __truediv__ = __div__ # The / operator joins Paths the other way around def __rdiv__(self, rel): """ fp.__rdiv__(rel) == rel / fp Join two path components, adding a separator character if needed. .. seealso:: :func:`os.path.join` """ return self._next_class(self.module.join(rel, self)) # Make the / operator work even when true division is enabled. __rtruediv__ = __rdiv__ def __enter__(self): self._old_dir = self.getcwd() os.chdir(self) return self def __exit__(self, *_): os.chdir(self._old_dir) @classmethod def getcwd(cls): """ Return the current working directory as a path object. .. seealso:: :func:`os.getcwdu` """ return cls(getcwdu()) # # --- Operations on Path strings. def abspath(self): """ .. seealso:: :func:`os.path.abspath` """ return self._next_class(self.module.abspath(self)) def normcase(self): """ .. seealso:: :func:`os.path.normcase` """ return self._next_class(self.module.normcase(self)) def normpath(self): """ .. seealso:: :func:`os.path.normpath` """ return self._next_class(self.module.normpath(self)) def realpath(self): """ .. seealso:: :func:`os.path.realpath` """ return self._next_class(self.module.realpath(self)) def expanduser(self): """ .. seealso:: :func:`os.path.expanduser` """ return self._next_class(self.module.expanduser(self)) def expandvars(self): """ .. seealso:: :func:`os.path.expandvars` """ return self._next_class(self.module.expandvars(self)) def dirname(self): """ .. seealso:: :attr:`parent`, :func:`os.path.dirname` """ return self._next_class(self.module.dirname(self)) def basename(self): """ .. seealso:: :attr:`name`, :func:`os.path.basename` """ return self._next_class(self.module.basename(self)) def expand(self): """ Clean up a filename by calling :meth:`expandvars()`, :meth:`expanduser()`, and :meth:`normpath()` on it. This is commonly everything needed to clean up a filename read from a configuration file, for example. """ return self.expandvars().expanduser().normpath() @property def namebase(self): """ The same as :meth:`name`, but with one file extension stripped off. For example, ``Path('/home/guido/python.tar.gz').name == 'python.tar.gz'``, but ``Path('/home/guido/python.tar.gz').namebase == 'python.tar'``. """ base, ext = self.module.splitext(self.name) return base @property def ext(self): """ The file extension, for example ``'.py'``. """ f, ext = self.module.splitext(self) return ext @property def drive(self): """ The drive specifier, for example ``'C:'``. This is always empty on systems that don't use drive specifiers. """ drive, r = self.module.splitdrive(self) return self._next_class(drive) parent = property( dirname, None, None, """ This path's parent directory, as a new Path object. For example, ``Path('/usr/local/lib/libpython.so').parent == Path('/usr/local/lib')`` .. seealso:: :meth:`dirname`, :func:`os.path.dirname` """) name = property( basename, None, None, """ The name of this file or directory without the full path. For example, ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'`` .. seealso:: :meth:`basename`, :func:`os.path.basename` """) def splitpath(self): """ p.splitpath() -> Return ``(p.parent, p.name)``. .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split` """ parent, child = self.module.split(self) return self._next_class(parent), child def splitdrive(self): """ p.splitdrive() -> Return ``(p.drive, <the rest of p>)``. Split the drive specifier from this path. If there is no drive specifier, :samp:`{p.drive}` is empty, so the return value is simply ``(Path(''), p)``. This is always the case on Unix. .. seealso:: :func:`os.path.splitdrive` """ drive, rel = self.module.splitdrive(self) return self._next_class(drive), rel def splitext(self): """ p.splitext() -> Return ``(p.stripext(), p.ext)``. Split the filename extension from this path and return the two parts. Either part may be empty. The extension is everything from ``'.'`` to the end of the last path segment. This has the property that if ``(a, b) == p.splitext()``, then ``a + b == p``. .. seealso:: :func:`os.path.splitext` """ filename, ext = self.module.splitext(self) return self._next_class(filename), ext def stripext(self): """ p.stripext() -> Remove one file extension from the path. For example, ``Path('/home/guido/python.tar.gz').stripext()`` returns ``Path('/home/guido/python.tar')``. """ return self.splitext()[0] def splitunc(self): """ .. seealso:: :func:`os.path.splitunc` """ unc, rest = self.module.splitunc(self) return self._next_class(unc), rest @property def uncshare(self): """ The UNC mount point for this path. This is empty for paths on local drives. """ unc, r = self.module.splitunc(self) return self._next_class(unc) @multimethod def joinpath(cls, first, *others): """ Join first to zero or more :class:`Path` components, adding a separator character (:samp:`{first}.module.sep`) if needed. Returns a new instance of :samp:`{first}._next_class`. .. seealso:: :func:`os.path.join` """ if not isinstance(first, cls): first = cls(first) return first._next_class(first.module.join(first, *others)) def splitall(self): r""" Return a list of the path components in this path. The first item in the list will be a Path. Its value will be either :data:`os.curdir`, :data:`os.pardir`, empty, or the root directory of this path (for example, ``'/'`` or ``'C:\\'``). The other items in the list will be strings. ``path.Path.joinpath(*result)`` will yield the original path. """ parts = [] loc = self while loc != os.curdir and loc != os.pardir: prev = loc loc, child = prev.splitpath() if loc == prev: break parts.append(child) parts.append(loc) parts.reverse() return parts def relpath(self, start='.'): """ Return this path as a relative path, based from `start`, which defaults to the current working directory. """ cwd = self._next_class(start) return cwd.relpathto(self) def relpathto(self, dest): """ Return a relative path from `self` to `dest`. If there is no relative path from `self` to `dest`, for example if they reside on different drives in Windows, then this returns ``dest.abspath()``. """ origin = self.abspath() dest = self._next_class(dest).abspath() orig_list = origin.normcase().splitall() # Don't normcase dest! We want to preserve the case. dest_list = dest.splitall() if orig_list[0] != self.module.normcase(dest_list[0]): # Can't get here from there. return dest # Find the location where the two paths start to differ. i = 0 for start_seg, dest_seg in zip(orig_list, dest_list): if start_seg != self.module.normcase(dest_seg): break i += 1 # Now i is the point where the two paths diverge. # Need a certain number of "os.pardir"s to work up # from the origin to the point of divergence. segments = [os.pardir] * (len(orig_list) - i) # Need to add the diverging part of dest_list. segments += dest_list[i:] if len(segments) == 0: # If they happen to be identical, use os.curdir. relpath = os.curdir else: relpath = self.module.join(*segments) return self._next_class(relpath) # --- Listing, searching, walking, and matching def listdir(self, pattern=None): """ D.listdir() -> List of items in this directory. Use :meth:`files` or :meth:`dirs` instead if you want a listing of just files or just subdirectories. The elements of the list are Path objects. With the optional `pattern` argument, this only lists items whose names match the given pattern. .. seealso:: :meth:`files`, :meth:`dirs` """ if pattern is None: pattern = '*' return [ self / child for child in map(self._always_unicode, os.listdir(self)) if self._next_class(child).fnmatch(pattern) ] def dirs(self, pattern=None): """ D.dirs() -> List of this directory's subdirectories. The elements of the list are Path objects. This does not walk recursively into subdirectories (but see :meth:`walkdirs`). With the optional `pattern` argument, this only lists directories whose names match the given pattern. For example, ``d.dirs('build-*')``. """ return [p for p in self.listdir(pattern) if p.isdir()] def files(self, pattern=None): """ D.files() -> List of the files in this directory. The elements of the list are Path objects. This does not walk into subdirectories (see :meth:`walkfiles`). With the optional `pattern` argument, this only lists files whose names match the given pattern. For example, ``d.files('*.pyc')``. """ return [p for p in self.listdir(pattern) if p.isfile()] def walk(self, pattern=None, errors='strict'): """ D.walk() -> iterator over files and subdirs, recursively. The iterator yields Path objects naming each child item of this directory and its descendants. This requires that ``D.isdir()``. This performs a depth-first traversal of the directory tree. Each directory is returned just before all its children. The `errors=` keyword argument controls behavior when an error occurs. The default is ``'strict'``, which causes an exception. Other allowed values are ``'warn'`` (which reports the error via :func:`warnings.warn()`), and ``'ignore'``. `errors` may also be an arbitrary callable taking a msg parameter. """ class Handlers: def strict(msg): raise def warn(msg): warnings.warn(msg, TreeWalkWarning) def ignore(msg): pass if not callable(errors) and errors not in vars(Handlers): raise ValueError("invalid errors parameter") errors = vars(Handlers).get(errors, errors) try: childList = self.listdir() except Exception: exc = sys.exc_info()[1] tmpl = "Unable to list directory '%(self)s': %(exc)s" msg = tmpl % locals() errors(msg) return for child in childList: if pattern is None or child.fnmatch(pattern): yield child try: isdir = child.isdir() except Exception: exc = sys.exc_info()[1] tmpl = "Unable to access '%(child)s': %(exc)s" msg = tmpl % locals() errors(msg) isdir = False if isdir: for item in child.walk(pattern, errors): yield item def walkdirs(self, pattern=None, errors='strict'): """ D.walkdirs() -> iterator over subdirs, recursively. With the optional `pattern` argument, this yields only directories whose names match the given pattern. For example, ``mydir.walkdirs('*test')`` yields only directories with names ending in ``'test'``. The `errors=` keyword argument controls behavior when an error occurs. The default is ``'strict'``, which causes an exception. The other allowed values are ``'warn'`` (which reports the error via :func:`warnings.warn()`), and ``'ignore'``. """ if errors not in ('strict', 'warn', 'ignore'): raise ValueError("invalid errors parameter") try: dirs = self.dirs() except Exception: if errors == 'ignore': return elif errors == 'warn': warnings.warn( "Unable to list directory '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) return else: raise for child in dirs: if pattern is None or child.fnmatch(pattern): yield child for subsubdir in child.walkdirs(pattern, errors): yield subsubdir def walkfiles(self, pattern=None, errors='strict'): """ D.walkfiles() -> iterator over files in D, recursively. The optional argument `pattern` limits the results to files with names that match the pattern. For example, ``mydir.walkfiles('*.tmp')`` yields only files with the ``.tmp`` extension. """ if errors not in ('strict', 'warn', 'ignore'): raise ValueError("invalid errors parameter") try: childList = self.listdir() except Exception: if errors == 'ignore': return elif errors == 'warn': warnings.warn( "Unable to list directory '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) return else: raise for child in childList: try: isfile = child.isfile() isdir = not isfile and child.isdir() except: if errors == 'ignore': continue elif errors == 'warn': warnings.warn( "Unable to access '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) continue else: raise if isfile: if pattern is None or child.fnmatch(pattern): yield child elif isdir: for f in child.walkfiles(pattern, errors): yield f def fnmatch(self, pattern, normcase=None): """ Return ``True`` if `self.name` matches the given `pattern`. `pattern` - A filename pattern with wildcards, for example ``'*.py'``. If the pattern contains a `normcase` attribute, it is applied to the name and path prior to comparison. `normcase` - (optional) A function used to normalize the pattern and filename before matching. Defaults to :meth:`self.module`, which defaults to :meth:`os.path.normcase`. .. seealso:: :func:`fnmatch.fnmatch` """ default_normcase = getattr(pattern, 'normcase', self.module.normcase) normcase = normcase or default_normcase name = normcase(self.name) pattern = normcase(pattern) return fnmatch.fnmatchcase(name, pattern) def glob(self, pattern): """ Return a list of Path objects that match the pattern. `pattern` - a path relative to this directory, with wildcards. For example, ``Path('/users').glob('*/bin/*')`` returns a list of all the files users have in their :file:`bin` directories. .. seealso:: :func:`glob.glob` """ cls = self._next_class return [cls(s) for s in glob.glob(self / pattern)] # # --- Reading or writing an entire file at once. def open(self, *args, **kwargs): """ Open this file and return a corresponding :class:`file` object. Keyword arguments work as in :func:`io.open`. If the file cannot be opened, an :class:`~exceptions.OSError` is raised. """ with io_error_compat(): return io.open(self, *args, **kwargs) def bytes(self): """ Open this file, read all bytes, return them as a string. """ with self.open('rb') as f: return f.read() def chunks(self, size, *args, **kwargs): """ Returns a generator yielding chunks of the file, so it can be read piece by piece with a simple for loop. Any argument you pass after `size` will be passed to :meth:`open`. :example: >>> hash = hashlib.md5() >>> for chunk in Path("path.py").chunks(8192, mode='rb'): ... hash.update(chunk) This will read the file by chunks of 8192 bytes. """ with self.open(*args, **kwargs) as f: for chunk in iter(lambda: f.read(size) or None, None): yield chunk def write_bytes(self, bytes, append=False): """ Open this file and write the given bytes to it. Default behavior is to overwrite any existing file. Call ``p.write_bytes(bytes, append=True)`` to append instead. """ if append: mode = 'ab' else: mode = 'wb' with self.open(mode) as f: f.write(bytes) def text(self, encoding=None, errors='strict'): r""" Open this file, read it in, return the content as a string. All newline sequences are converted to ``'\n'``. Keyword arguments will be passed to :meth:`open`. .. seealso:: :meth:`lines` """ with self.open(mode='r', encoding=encoding, errors=errors) as f: return U_NEWLINE.sub('\n', f.read()) def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False): r""" Write the given text to this file. The default behavior is to overwrite any existing file; to append instead, use the `append=True` keyword argument. There are two differences between :meth:`write_text` and :meth:`write_bytes`: newline handling and Unicode handling. See below. Parameters: `text` - str/unicode - The text to be written. `encoding` - str - The Unicode encoding that will be used. This is ignored if `text` isn't a Unicode string. `errors` - str - How to handle Unicode encoding errors. Default is ``'strict'``. See ``help(unicode.encode)`` for the options. This is ignored if `text` isn't a Unicode string. `linesep` - keyword argument - str/unicode - The sequence of characters to be used to mark end-of-line. The default is :data:`os.linesep`. You can also specify ``None`` to leave all newlines as they are in `text`. `append` - keyword argument - bool - Specifies what to do if the file already exists (``True``: append to the end of it; ``False``: overwrite it.) The default is ``False``. --- Newline handling. ``write_text()`` converts all standard end-of-line sequences (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default end-of-line sequence (see :data:`os.linesep`; on Windows, for example, the end-of-line marker is ``'\r\n'``). If you don't like your platform's default, you can override it using the `linesep=` keyword argument. If you specifically want ``write_text()`` to preserve the newlines as-is, use ``linesep=None``. This applies to Unicode text the same as to 8-bit text, except there are three additional standard Unicode end-of-line sequences: ``u'\x85'``, ``u'\r\x85'``, and ``u'\u2028'``. (This is slightly different from when you open a file for writing with ``fopen(filename, "w")`` in C or ``open(filename, 'w')`` in Python.) --- Unicode If `text` isn't Unicode, then apart from newline handling, the bytes are written verbatim to the file. The `encoding` and `errors` arguments are not used and must be omitted. If `text` is Unicode, it is first converted to :func:`bytes` using the specified `encoding` (or the default encoding if `encoding` isn't specified). The `errors` argument applies only to this conversion. """ if isinstance(text, text_type): if linesep is not None: text = U_NEWLINE.sub(linesep, text) text = text.encode(encoding or sys.getdefaultencoding(), errors) else: assert encoding is None text = NEWLINE.sub(linesep, text) self.write_bytes(text, append=append) def lines(self, encoding=None, errors='strict', retain=True): r""" Open this file, read all lines, return them in a list. Optional arguments: `encoding` - The Unicode encoding (or character set) of the file. The default is ``None``, meaning the content of the file is read as 8-bit characters and returned as a list of (non-Unicode) str objects. `errors` - How to handle Unicode errors; see help(str.decode) for the options. Default is ``'strict'``. `retain` - If ``True``, retain newline characters; but all newline character combinations (``'\r'``, ``'\n'``, ``'\r\n'``) are translated to ``'\n'``. If ``False``, newline characters are stripped off. Default is ``True``. This uses ``'U'`` mode. .. seealso:: :meth:`text` """ if encoding is None and retain: with self.open('U') as f: return f.readlines() else: return self.text(encoding, errors).splitlines(retain) def write_lines(self, lines, encoding=None, errors='strict', linesep=os.linesep, append=False): r""" Write the given lines of text to this file. By default this overwrites any existing file at this path. This puts a platform-specific newline sequence on every line. See `linesep` below. `lines` - A list of strings. `encoding` - A Unicode encoding to use. This applies only if `lines` contains any Unicode strings. `errors` - How to handle errors in Unicode encoding. This also applies only to Unicode strings. linesep - The desired line-ending. This line-ending is applied to every line. If a line already has any standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``, ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will be stripped off and this will be used instead. The default is os.linesep, which is platform-dependent (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.). Specify ``None`` to write the lines as-is, like :meth:`file.writelines`. Use the keyword argument ``append=True`` to append lines to the file. The default is to overwrite the file. .. warning :: When you use this with Unicode data, if the encoding of the existing data in the file is different from the encoding you specify with the `encoding=` parameter, the result is mixed-encoding data, which can really confuse someone trying to read the file later. """ with self.open('ab' if append else 'wb') as f: for l in lines: isUnicode = isinstance(l, text_type) if linesep is not None: pattern = U_NL_END if isUnicode else NL_END l = pattern.sub('', l) + linesep if isUnicode: l = l.encode(encoding or sys.getdefaultencoding(), errors) f.write(l) def read_md5(self): """ Calculate the md5 hash for this file. This reads through the entire file. .. seealso:: :meth:`read_hash` """ return self.read_hash('md5') def _hash(self, hash_name): """ Returns a hash object for the file at the current path. `hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``) that's available in the :mod:`hashlib` module. """ m = hashlib.new(hash_name) for chunk in self.chunks(8192, mode="rb"): m.update(chunk) return m def read_hash(self, hash_name): """ Calculate given hash for this file. List of supported hashes can be obtained from :mod:`hashlib` package. This reads the entire file. .. seealso:: :meth:`hashlib.hash.digest` """ return self._hash(hash_name).digest() def read_hexhash(self, hash_name): """ Calculate given hash for this file, returning hexdigest. List of supported hashes can be obtained from :mod:`hashlib` package. This reads the entire file. .. seealso:: :meth:`hashlib.hash.hexdigest` """ return self._hash(hash_name).hexdigest() # --- Methods for querying the filesystem. # N.B. On some platforms, the os.path functions may be implemented in C # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get # bound. Playing it safe and wrapping them all in method calls. def isabs(self): """ .. seealso:: :func:`os.path.isabs` """ return self.module.isabs(self) def exists(self): """ .. seealso:: :func:`os.path.exists` """ return self.module.exists(self) def isdir(self): """ .. seealso:: :func:`os.path.isdir` """ return self.module.isdir(self) def isfile(self): """ .. seealso:: :func:`os.path.isfile` """ return self.module.isfile(self) def islink(self): """ .. seealso:: :func:`os.path.islink` """ return self.module.islink(self) def ismount(self): """ .. seealso:: :func:`os.path.ismount` """ return self.module.ismount(self) def samefile(self, other): """ .. seealso:: :func:`os.path.samefile` """ if not hasattr(self.module, 'samefile'): other = Path(other).realpath().normpath().normcase() return self.realpath().normpath().normcase() == other return self.module.samefile(self, other) def getatime(self): """ .. seealso:: :attr:`atime`, :func:`os.path.getatime` """ return self.module.getatime(self) atime = property( getatime, None, None, """ Last access time of the file. .. seealso:: :meth:`getatime`, :func:`os.path.getatime` """) def getmtime(self): """ .. seealso:: :attr:`mtime`, :func:`os.path.getmtime` """ return self.module.getmtime(self) mtime = property( getmtime, None, None, """ Last-modified time of the file. .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime` """) def getctime(self): """ .. seealso:: :attr:`ctime`, :func:`os.path.getctime` """ return self.module.getctime(self) ctime = property( getctime, None, None, """ Creation time of the file. .. seealso:: :meth:`getctime`, :func:`os.path.getctime` """) def getsize(self): """ .. seealso:: :attr:`size`, :func:`os.path.getsize` """ return self.module.getsize(self) size = property( getsize, None, None, """ Size of the file, in bytes. .. seealso:: :meth:`getsize`, :func:`os.path.getsize` """) if hasattr(os, 'access'): def access(self, mode): """ Return ``True`` if current user has access to this path. mode - One of the constants :data:`os.F_OK`, :data:`os.R_OK`, :data:`os.W_OK`, :data:`os.X_OK` .. seealso:: :func:`os.access` """ return os.access(self, mode) def stat(self): """ Perform a ``stat()`` system call on this path. .. seealso:: :meth:`lstat`, :func:`os.stat` """ return os.stat(self) def lstat(self): """ Like :meth:`stat`, but do not follow symbolic links. .. seealso:: :meth:`stat`, :func:`os.lstat` """ return os.lstat(self) def __get_owner_windows(self): r""" Return the name of the owner of this file or directory. Follow symbolic links. Return a name of the form ``ur'DOMAIN\User Name'``; may be a group. .. seealso:: :attr:`owner` """ desc = win32security.GetFileSecurity( self, win32security.OWNER_SECURITY_INFORMATION) sid = desc.GetSecurityDescriptorOwner() account, domain, typecode = win32security.LookupAccountSid(None, sid) return domain + u('\\') + account def __get_owner_unix(self): """ Return the name of the owner of this file or directory. Follow symbolic links. .. seealso:: :attr:`owner` """ st = self.stat() return pwd.getpwuid(st.st_uid).pw_name def __get_owner_not_implemented(self): raise NotImplementedError("Ownership not available on this platform.") if 'win32security' in globals(): get_owner = __get_owner_windows elif 'pwd' in globals(): get_owner = __get_owner_unix else: get_owner = __get_owner_not_implemented owner = property( get_owner, None, None, """ Name of the owner of this file or directory. .. seealso:: :meth:`get_owner`""") if hasattr(os, 'statvfs'): def statvfs(self): """ Perform a ``statvfs()`` system call on this path. .. seealso:: :func:`os.statvfs` """ return os.statvfs(self) if hasattr(os, 'pathconf'): def pathconf(self, name): """ .. seealso:: :func:`os.pathconf` """ return os.pathconf(self, name) # # --- Modifying operations on files and directories def utime(self, times): """ Set the access and modified times of this file. .. seealso:: :func:`os.utime` """ os.utime(self, times) return self def chmod(self, mode): """ Set the mode. May be the new mode (os.chmod behavior) or a `symbolic mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_. .. seealso:: :func:`os.chmod` """ if isinstance(mode, string_types): mask = _multi_permission_mask(mode) mode = mask(self.stat().st_mode) os.chmod(self, mode) return self def chown(self, uid=-1, gid=-1): """ Change the owner and group by names rather than the uid or gid numbers. .. seealso:: :func:`os.chown` """ if hasattr(os, 'chown'): if 'pwd' in globals() and isinstance(uid, string_types): uid = pwd.getpwnam(uid).pw_uid if 'grp' in globals() and isinstance(gid, string_types): gid = grp.getgrnam(gid).gr_gid os.chown(self, uid, gid) else: raise NotImplementedError("Ownership not available on this platform.") return self def rename(self, new): """ .. seealso:: :func:`os.rename` """ os.rename(self, new) return self._next_class(new) def renames(self, new): """ .. seealso:: :func:`os.renames` """ os.renames(self, new) return self._next_class(new) # # --- Create/delete operations on directories def mkdir(self, mode=0o777): """ .. seealso:: :func:`os.mkdir` """ os.mkdir(self, mode) return self def mkdir_p(self, mode=0o777): """ Like :meth:`mkdir`, but does not raise an exception if the directory already exists. """ try: self.mkdir(mode) except OSError: _, e, _ = sys.exc_info() if e.errno != errno.EEXIST: raise return self def makedirs(self, mode=0o777): """ .. seealso:: :func:`os.makedirs` """ os.makedirs(self, mode) return self def makedirs_p(self, mode=0o777): """ Like :meth:`makedirs`, but does not raise an exception if the directory already exists. """ try: self.makedirs(mode) except OSError: _, e, _ = sys.exc_info() if e.errno != errno.EEXIST: raise return self def rmdir(self): """ .. seealso:: :func:`os.rmdir` """ os.rmdir(self) return self def rmdir_p(self): """ Like :meth:`rmdir`, but does not raise an exception if the directory is not empty or does not exist. """ try: self.rmdir() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: raise return self def removedirs(self): """ .. seealso:: :func:`os.removedirs` """ os.removedirs(self) return self def removedirs_p(self): """ Like :meth:`removedirs`, but does not raise an exception if the directory is not empty or does not exist. """ try: self.removedirs() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: raise return self # --- Modifying operations on files def touch(self): """ Set the access/modified times of this file to the current time. Create the file if it does not exist. """ fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0o666) os.close(fd) os.utime(self, None) return self def remove(self): """ .. seealso:: :func:`os.remove` """ os.remove(self) return self def remove_p(self): """ Like :meth:`remove`, but does not raise an exception if the file does not exist. """ try: self.unlink() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOENT: raise return self def unlink(self): """ .. seealso:: :func:`os.unlink` """ os.unlink(self) return self def unlink_p(self): """ Like :meth:`unlink`, but does not raise an exception if the file does not exist. """ self.remove_p() return self # --- Links if hasattr(os, 'link'): def link(self, newpath): """ Create a hard link at `newpath`, pointing to this file. .. seealso:: :func:`os.link` """ os.link(self, newpath) return self._next_class(newpath) if hasattr(os, 'symlink'): def symlink(self, newlink): """ Create a symbolic link at `newlink`, pointing here. .. seealso:: :func:`os.symlink` """ os.symlink(self, newlink) return self._next_class(newlink) if hasattr(os, 'readlink'): def readlink(self): """ Return the path to which this symbolic link points. The result may be an absolute or a relative path. .. seealso:: :meth:`readlinkabs`, :func:`os.readlink` """ return self._next_class(os.readlink(self)) def readlinkabs(self): """ Return the path to which this symbolic link points. The result is always an absolute path. .. seealso:: :meth:`readlink`, :func:`os.readlink` """ p = self.readlink() if p.isabs(): return p else: return (self.parent / p).abspath() # High-level functions from shutil # These functions will be bound to the instance such that # Path(name).copy(target) will invoke shutil.copy(name, target) copyfile = shutil.copyfile copymode = shutil.copymode copystat = shutil.copystat copy = shutil.copy copy2 = shutil.copy2 copytree = shutil.copytree if hasattr(shutil, 'move'): move = shutil.move rmtree = shutil.rmtree def rmtree_p(self): """ Like :meth:`rmtree`, but does not raise an exception if the directory does not exist. """ try: self.rmtree() except OSError: _, e, _ = sys.exc_info() if e.errno != errno.ENOENT: raise return self def chdir(self): """ .. seealso:: :func:`os.chdir` """ os.chdir(self) cd = chdir def merge_tree(self, dst, symlinks=False, *args, **kwargs): """ Copy entire contents of self to dst, overwriting existing contents in dst with those in self. If the additional keyword `update` is True, each `src` will only be copied if `dst` does not exist, or `src` is newer than `dst`. Note that the technique employed stages the files in a temporary directory first, so this function is not suitable for merging trees with large files, especially if the temporary directory is not capable of storing a copy of the entire source tree. """ update = kwargs.pop('update', False) with tempdir() as _temp_dir: # first copy the tree to a stage directory to support # the parameters and behavior of copytree. stage = _temp_dir / str(hash(self)) self.copytree(stage, symlinks, *args, **kwargs) # now copy everything from the stage directory using # the semantics of dir_util.copy_tree dir_util.copy_tree(stage, dst, preserve_symlinks=symlinks, update=update) # # --- Special stuff from os if hasattr(os, 'chroot'): def chroot(self): """ .. seealso:: :func:`os.chroot` """ os.chroot(self) if hasattr(os, 'startfile'): def startfile(self): """ .. seealso:: :func:`os.startfile` """ os.startfile(self) return self # in-place re-writing, courtesy of Martijn Pieters # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/ @contextlib.contextmanager def in_place(self, mode='r', buffering=-1, encoding=None, errors=None, newline=None, backup_extension=None): """ A context in which a file may be re-written in-place with new content. Yields a tuple of :samp:`({readable}, {writable})` file objects, where `writable` replaces `readable`. If an exception occurs, the old file is restored, removing the written data. Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only read-only-modes are allowed. A :exc:`ValueError` is raised on invalid modes. For example, to add line numbers to a file:: p = Path(filename) assert p.isfile() with p.in_place() as (reader, writer): for number, line in enumerate(reader, 1): writer.write('{0:3}: '.format(number))) writer.write(line) Thereafter, the file at `filename` will have line numbers in it. """ import io if set(mode).intersection('wa+'): raise ValueError('Only read-only file modes can be used') # move existing file to backup, create new file with same permissions # borrowed extensively from the fileinput module backup_fn = self + (backup_extension or os.extsep + 'bak') try: os.unlink(backup_fn) except os.error: pass os.rename(self, backup_fn) readable = io.open(backup_fn, mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline) try: perm = os.fstat(readable.fileno()).st_mode except OSError: writable = open(self, 'w' + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline) else: os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC if hasattr(os, 'O_BINARY'): os_mode |= os.O_BINARY fd = os.open(self, os_mode, perm) writable = io.open(fd, "w" + mode.replace('r', ''), buffering=buffering, encoding=encoding, errors=errors, newline=newline) try: if hasattr(os, 'chmod'): os.chmod(self, perm) except OSError: pass try: yield readable, writable except Exception: # move backup back readable.close() writable.close() try: os.unlink(self) except os.error: pass os.rename(backup_fn, self) raise else: readable.close() writable.close() finally: try: os.unlink(backup_fn) except os.error: pass @ClassProperty @classmethod def special(cls): """ Return a SpecialResolver object suitable referencing a suitable directory for the relevant platform for the given type of content. For example, to get a user config directory, invoke: dir = Path.special().user.config Uses the `appdirs <https://pypi.python.org/pypi/appdirs/1.4.0>`_ to resolve the paths in a platform-friendly way. To create a config directory for 'My App', consider: dir = Path.special("My App").user.config.makedirs_p() If the ``appdirs`` module is not installed, invocation of special will raise an ImportError. """ return functools.partial(SpecialResolver, cls) class SpecialResolver(object): class ResolverScope: def __init__(self, paths, scope): self.paths = paths self.scope = scope def __getattr__(self, class_): return self.paths.get_dir(self.scope, class_) def __init__(self, path_class, *args, **kwargs): appdirs = importlib.import_module('appdirs') # let appname default to None until # https://github.com/ActiveState/appdirs/issues/55 is solved. not args and kwargs.setdefault('appname', None) vars(self).update( path_class=path_class, wrapper=appdirs.AppDirs(*args, **kwargs), ) def __getattr__(self, scope): return self.ResolverScope(self, scope) def get_dir(self, scope, class_): """ Return the callable function from appdirs, but with the result wrapped in self.path_class """ prop_name = '{scope}_{class_}_dir'.format(**locals()) value = getattr(self.wrapper, prop_name) MultiPath = Multi.for_class(self.path_class) return MultiPath.detect(value) class Multi: """ A mix-in for a Path which may contain multiple Path separated by pathsep. """ @classmethod def for_class(cls, path_cls): name = 'Multi' + path_cls.__name__ return type(name, (cls, path_cls), {}) @classmethod def detect(cls, input): if os.pathsep not in input: cls = cls._next_class return cls(input) def __iter__(self): return iter(map(self._next_class, self.split(os.pathsep))) @ClassProperty @classmethod def _next_class(cls): """ Multi-subclasses should use the parent class """ return next( class_ for class_ in cls.__mro__ if not issubclass(class_, Multi) ) class tempdir(Path): """ A temporary directory via :func:`tempfile.mkdtemp`, and constructed with the same parameters that you can use as a context manager. Example: with tempdir() as d: # do stuff with the Path object "d" # here the directory is deleted automatically .. seealso:: :func:`tempfile.mkdtemp` """ @ClassProperty @classmethod def _next_class(cls): return Path def __new__(cls, *args, **kwargs): dirname = tempfile.mkdtemp(*args, **kwargs) return super(tempdir, cls).__new__(cls, dirname) def __init__(self, *args, **kwargs): pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if not exc_value: self.rmtree() def _multi_permission_mask(mode): """ Support multiple, comma-separated Unix chmod symbolic modes. >>> _multi_permission_mask('a=r,u+w')(0) == 0o644 True """ compose = lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)) return functools.reduce(compose, map(_permission_mask, mode.split(','))) def _permission_mask(mode): """ Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function suitable for applying to a mask to affect that change. >>> mask = _permission_mask('ugo+rwx') >>> mask(0o554) == 0o777 True >>> _permission_mask('go-x')(0o777) == 0o766 True >>> _permission_mask('o-x')(0o445) == 0o444 True >>> _permission_mask('a+x')(0) == 0o111 True >>> _permission_mask('a=rw')(0o057) == 0o666 True >>> _permission_mask('u=x')(0o666) == 0o166 True >>> _permission_mask('g=')(0o157) == 0o107 True """ # parse the symbolic mode parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode) if not parsed: raise ValueError("Unrecognized symbolic mode", mode) # generate a mask representing the specified permission spec_map = dict(r=4, w=2, x=1) specs = (spec_map[perm] for perm in parsed.group('what')) spec = functools.reduce(operator.or_, specs, 0) # now apply spec to each subject in who shift_map = dict(u=6, g=3, o=0) who = parsed.group('who').replace('a', 'ugo') masks = (spec << shift_map[subj] for subj in who) mask = functools.reduce(operator.or_, masks) op = parsed.group('op') # if op is -, invert the mask if op == '-': mask ^= 0o777 # if op is =, retain extant values for unreferenced subjects if op == '=': masks = (0o7 << shift_map[subj] for subj in who) retain = functools.reduce(operator.or_, masks) ^ 0o777 op_map = { '+': operator.or_, '-': operator.and_, '=': lambda mask, target: target & retain ^ mask, } return functools.partial(op_map[op], mask) class CaseInsensitivePattern(text_type): """ A string with a ``'normcase'`` property, suitable for passing to :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`, :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive. For example, to get all files ending in .py, .Py, .pY, or .PY in the current directory:: from path import Path, CaseInsensitivePattern as ci Path('.').files(ci('*.py')) """ @property def normcase(self): return __import__('ntpath').normcase ######################## # Backward-compatibility class path(Path): def __new__(cls, *args, **kwargs): msg = "path is deprecated. Use Path instead." warnings.warn(msg, DeprecationWarning) return Path.__new__(cls, *args, **kwargs) __all__ += ['path'] ########################
mit
lwiecek/django
django/contrib/postgres/aggregates/statistics.py
493
2033
from django.db.models import FloatField, IntegerField from django.db.models.aggregates import Aggregate __all__ = [ 'CovarPop', 'Corr', 'RegrAvgX', 'RegrAvgY', 'RegrCount', 'RegrIntercept', 'RegrR2', 'RegrSlope', 'RegrSXX', 'RegrSXY', 'RegrSYY', 'StatAggregate', ] class StatAggregate(Aggregate): def __init__(self, y, x, output_field=FloatField()): if not x or not y: raise ValueError('Both y and x must be provided.') super(StatAggregate, self).__init__(y=y, x=x, output_field=output_field) self.x = x self.y = y self.source_expressions = self._parse_expressions(self.y, self.x) def get_source_expressions(self): return self.y, self.x def set_source_expressions(self, exprs): self.y, self.x = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): return super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize) class Corr(StatAggregate): function = 'CORR' class CovarPop(StatAggregate): def __init__(self, y, x, sample=False): self.function = 'COVAR_SAMP' if sample else 'COVAR_POP' super(CovarPop, self).__init__(y, x) class RegrAvgX(StatAggregate): function = 'REGR_AVGX' class RegrAvgY(StatAggregate): function = 'REGR_AVGY' class RegrCount(StatAggregate): function = 'REGR_COUNT' def __init__(self, y, x): super(RegrCount, self).__init__(y=y, x=x, output_field=IntegerField()) def convert_value(self, value, expression, connection, context): if value is None: return 0 return int(value) class RegrIntercept(StatAggregate): function = 'REGR_INTERCEPT' class RegrR2(StatAggregate): function = 'REGR_R2' class RegrSlope(StatAggregate): function = 'REGR_SLOPE' class RegrSXX(StatAggregate): function = 'REGR_SXX' class RegrSXY(StatAggregate): function = 'REGR_SXY' class RegrSYY(StatAggregate): function = 'REGR_SYY'
bsd-3-clause
teamfx/openjfx-9-dev-rt
modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
1
19188
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging import re import time from webkitpy.layout_tests.controllers import test_result_writer from webkitpy.port.driver import DriverInput, DriverOutput from webkitpy.layout_tests.models import test_expectations from webkitpy.layout_tests.models import test_failures from webkitpy.layout_tests.models.test_results import TestResult _log = logging.getLogger(__name__) def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done): runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done) return runner.run() class SingleTestRunner(object): (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update') def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done): self._port = port self._filesystem = port.host.filesystem self._options = options self._results_directory = results_directory self._driver = driver self._worker_name = worker_name self._test_name = test_input.test_name self._should_run_pixel_test = test_input.should_run_pixel_test self._reference_files = test_input.reference_files self._stop_when_done = stop_when_done self._timeout = test_input.timeout if self._reference_files: # Detect and report a test which has a wrong combination of expectation files. # For example, if 'foo.html' has two expectation files, 'foo-expected.html' and # 'foo-expected.txt', we should warn users. One test file must be used exclusively # in either layout tests or reftests, but not in both. for suffix in ('.txt', '.png', '.wav'): expected_filename = self._port.expected_filename(self._test_name, suffix) if self._filesystem.exists(expected_filename): _log.error('%s is a reftest, but has an unused expectation file. Please remove %s.', self._test_name, expected_filename) def _expected_driver_output(self): return DriverOutput(self._port.expected_text(self._test_name), self._port.expected_image(self._test_name), self._port.expected_checksum(self._test_name), self._port.expected_audio(self._test_name)) def _should_fetch_expected_checksum(self): return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results) def _driver_input(self): # The image hash is used to avoid doing an image dump if the # checksums match, so it should be set to a blank value if we # are generating a new baseline. (Otherwise, an image from a # previous run will be copied into the baseline.""" image_hash = None if self._should_fetch_expected_checksum(): image_hash = self._port.expected_checksum(self._test_name) return DriverInput(self._test_name, self._timeout, image_hash, self._should_run_pixel_test) def run(self): if self._reference_files: if self._port.get_option('no_ref_tests') or self._options.reset_results: reftest_type = set([reference_file[0] for reference_file in self._reference_files]) result = TestResult(self._test_name, reftest_type=reftest_type) result.type = test_expectations.SKIP return result return self._run_reftest() if self._options.reset_results: return self._run_rebaseline() return self._run_compare_test() def _run_compare_test(self): driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done) expected_driver_output = self._expected_driver_output() if self._options.ignore_metrics: expected_driver_output.strip_metrics() driver_output.strip_metrics() patterns = self._port.logging_patterns_to_strip() expected_driver_output.strip_patterns(patterns) driver_output.strip_patterns(patterns) driver_output.strip_stderror_patterns(self._port.stderr_patterns_to_strip()) test_result = self._compare_output(expected_driver_output, driver_output) if self._options.new_test_results: self._add_missing_baselines(test_result, driver_output) test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures) return test_result def _run_rebaseline(self): driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done) failures = self._handle_error(driver_output) test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures) # FIXME: It the test crashed or timed out, it might be better to avoid # to write new baselines. self._overwrite_baselines(driver_output) return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid) _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n") def _add_missing_baselines(self, test_result, driver_output): missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash) if test_result.has_failure_matching_types(test_failures.FailureMissingResult): self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt')) if test_result.has_failure_matching_types(test_failures.FailureMissingAudio): self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav')) if missingImage: self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png')) def _location_for_new_baseline(self, data, extension): if self._options.add_platform_exceptions: return self.VERSION_DIR if extension == '.png': return self.PLATFORM_DIR if extension == '.wav': return self.ALONGSIDE_TEST if extension == '.txt' and self._render_tree_dump_pattern.match(data): return self.PLATFORM_DIR return self.ALONGSIDE_TEST def _overwrite_baselines(self, driver_output): location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE self._save_baseline_data(driver_output.text, '.txt', location) self._save_baseline_data(driver_output.audio, '.wav', location) if self._should_run_pixel_test: self._save_baseline_data(driver_output.image, '.png', location) def _save_baseline_data(self, data, extension, location): if data is None: return port = self._port fs = self._filesystem if location == self.ALONGSIDE_TEST: output_dir = fs.dirname(port.abspath_for_test(self._test_name)) elif location == self.VERSION_DIR: output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name)) elif location == self.PLATFORM_DIR: output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name)) elif location == self.UPDATE: output_dir = fs.dirname(port.expected_filename(self._test_name, extension)) else: raise AssertionError('unrecognized baseline location: %s' % location) fs.maybe_make_directory(output_dir) output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension) output_path = fs.join(output_dir, output_basename) _log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path)) port.update_baseline(output_path, data) def _handle_error(self, driver_output, reference_filename=None): """Returns test failures if some unusual errors happen in driver's run. Args: driver_output: The output from the driver. reference_filename: The full path to the reference file which produced the driver_output. This arg is optional and should be used only in reftests until we have a better way to know which html file is used for producing the driver_output. """ failures = [] fs = self._filesystem if driver_output.timeout: failures.append(test_failures.FailureTimeout(bool(reference_filename))) if reference_filename: testname = self._port.relative_test_filename(reference_filename) else: testname = self._test_name if driver_output.crash: failures.append(test_failures.FailureCrash(bool(reference_filename), driver_output.crashed_process_name, driver_output.crashed_pid)) if driver_output.error: _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname)) else: _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname)) elif driver_output.error: _log.debug("%s %s output stderr lines:" % (self._worker_name, testname)) for line in driver_output.error.splitlines(): _log.debug(" %s" % line) return failures def _compare_output(self, expected_driver_output, driver_output): failures = [] failures.extend(self._handle_error(driver_output)) if driver_output.crash: # Don't continue any more if we already have a crash. # In case of timeouts, we continue since we still want to see the text and image output. return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid) failures.extend(self._compare_text(expected_driver_output.text, driver_output.text)) failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio)) if self._should_run_pixel_test: failures.extend(self._compare_image(expected_driver_output, driver_output)) return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid) def _compare_text(self, expected_text, actual_text): failures = [] if (expected_text and actual_text and # Assuming expected_text is already normalized. self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))): failures.append(test_failures.FailureTextMismatch()) elif actual_text and not expected_text: failures.append(test_failures.FailureMissingResult()) return failures def _compare_audio(self, expected_audio, actual_audio): failures = [] if (expected_audio and actual_audio and self._port.do_audio_results_differ(expected_audio, actual_audio)): failures.append(test_failures.FailureAudioMismatch()) elif actual_audio and not expected_audio: failures.append(test_failures.FailureMissingAudio()) return failures def _get_normalized_output_text(self, output): """Returns the normalized text output, i.e. the output in which the end-of-line characters are normalized to "\n".""" # Running tests on Windows produces "\r\n". The "\n" part is helpfully # changed to "\r\n" by our system (Python/Cygwin), resulting in # "\r\r\n", when, in fact, we wanted to compare the text output with # the normalized text expectation files. return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n") # FIXME: This function also creates the image diff. Maybe that work should # be handled elsewhere? def _compare_image(self, expected_driver_output, driver_output): failures = [] # If we didn't produce a hash file, this test must be text-only. if driver_output.image_hash is None: return failures if not expected_driver_output.image: failures.append(test_failures.FailureMissingImage()) elif not expected_driver_output.image_hash: failures.append(test_failures.FailureMissingImageHash()) elif driver_output.image_hash != expected_driver_output.image_hash: diff_result = self._port.diff_image(expected_driver_output.image, driver_output.image) err_str = diff_result[2] if err_str: _log.warning(' %s : %s' % (self._test_name, err_str)) failures.append(test_failures.FailureImageHashMismatch()) driver_output.error = (driver_output.error or '') + err_str else: driver_output.image_diff = diff_result[0] if driver_output.image_diff: failures.append(test_failures.FailureImageHashMismatch(diff_result[1])) else: # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure. _log.warning(' %s -> pixel hash failed (but diff passed)' % self._test_name) return failures def _run_reftest(self): test_output = self._driver.run_test(self._driver_input(), self._stop_when_done) total_test_time = 0 reference_output = None test_result = None # A reftest can have multiple match references and multiple mismatch references; # the test fails if any mismatch matches and all of the matches don't match. # To minimize the number of references we have to check, we run all of the mismatches first, # then the matches, and short-circuit out as soon as we can. # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do. putAllMismatchBeforeMatch = sorted reference_test_names = [] for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files): reference_test_name = self._port.relative_test_filename(reference_filename) reference_test_names.append(reference_test_name) reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done) test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=') if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures): break total_test_time += test_result.test_run_time assert(reference_output) test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures) reftest_type = set([reference_file[0] for reference_file in self._reference_files]) return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names) def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch): total_test_time = reference_driver_output.test_time + actual_driver_output.test_time has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr() failures = [] failures.extend(self._handle_error(actual_driver_output)) if failures: # Don't continue any more if we already have crash or timeout. return TestResult(self._test_name, failures, total_test_time, has_stderr) failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename)) if failures: return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid) if not reference_driver_output.image_hash and not actual_driver_output.image_hash: failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename)) elif mismatch: if reference_driver_output.image_hash == actual_driver_output.image_hash: diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0) if not diff_result[0]: failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename)) else: _log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name) elif reference_driver_output.image_hash != actual_driver_output.image_hash: diff_result = self._port.diff_image(reference_driver_output.image, actual_driver_output.image, tolerance=0) if diff_result[0]: failures.append(test_failures.FailureReftestMismatch(reference_filename)) else: _log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name) return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
gpl-2.0
rosmo/ansible
lib/ansible/modules/network/f5/bigip_provision.py
15
28869
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_provision short_description: Manage BIG-IP module provisioning description: - Manage BIG-IP module provisioning. This module will only provision at the standard levels of Dedicated, Nominal, and Minimum. version_added: 2.4 options: module: description: - The module to provision in BIG-IP. type: str required: True choices: - am - afm - apm - asm - avr - cgnat - fps - gtm - ilx - lc - ltm - pem - sam - swg - vcmp aliases: - name level: description: - Sets the provisioning level for the requested modules. Changing the level for one module may require modifying the level of another module. For example, changing one module to C(dedicated) requires setting all others to C(none). Setting the level of a module to C(none) means that the module is not activated. - This parameter is not relevant to C(cgnat) and will not be applied to the C(cgnat) module. type: str choices: - dedicated - nominal - minimum default: nominal state: description: - The state of the provisioned module on the system. When C(present), guarantees that the specified module is provisioned at the requested level provided that there are sufficient resources on the device (such as physical RAM) to support the provisioned module. When C(absent), de-provision the module. type: str choices: - present - absent default: present extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Provision PEM at "nominal" level bigip_provision: module: pem level: nominal provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost - name: Provision a dedicated SWG. This will unprovision every other module bigip_provision: module: swg level: dedicated provider: server: lb.mydomain.com password: secret user: admin delegate_to: localhost ''' RETURN = r''' level: description: The new provisioning level of the module. returned: changed type: str sample: minimum ''' import time from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.icontrol import TransactionContextManager except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.icontrol import TransactionContextManager class Parameters(AnsibleF5Parameters): api_attributes = ['level'] returnables = ['level'] updatables = ['level', 'cgnat'] @property def level(self): if self._values['level'] is None: return None if self.state == 'absent': return 'none' return str(self._values['level']) class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) return result except Exception: return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: result = self.__default(param) return result def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def cgnat(self): if self.want.module == 'cgnat': if self.want.state == 'absent' and self.have.enabled is True: return True if self.want.state == 'present' and self.have.disabled is True: return True class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.have = None self.want = ModuleParameters(params=self.module.params) self.changes = UsableChanges() def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def present(self): if self.exists(): return False return self.update() def exists(self): if self.want.module == 'cgnat': uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'disabled' in response and response['disabled'] is True: return False elif 'enabled' in response and response['enabled'] is True: return True try: for x in range(0, 5): uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if str(response['level']) != 'none' and self.want.level == 'none': return True if str(response['level']) == 'none' and self.want.level == 'none': return False if str(response['level']) == self.want.level: return True return False except Exception as ex: if 'not registered' in str(ex): return False time.sleep(1) def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True result = self.update_on_device() if self.want.module == 'cgnat': return result self._wait_for_module_provisioning() if self.want.module == 'vcmp': self._wait_for_reboot() self._wait_for_module_provisioning() if self.want.module == 'asm': self._wait_for_asm_ready() if self.want.module == 'afm': self._wait_for_afm_ready() return True def should_reboot(self): for x in range(0, 24): try: uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], 'provision.action' ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if response['value'] == 'reboot': return True elif response['value'] == 'none': time.sleep(5) except Exception: time.sleep(5) return False def reboot_device(self): nops = 0 last_reboot = self._get_last_reboot() try: params = dict( command="run", utilCmdArgs='-c "/sbin/reboot"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) except Exception: pass # Sleep a little to let rebooting take effect time.sleep(20) while nops < 3: try: self.client.reconnect() next_reboot = self._get_last_reboot() if next_reboot is None: nops = 0 if next_reboot == last_reboot: nops = 0 else: nops += 1 except Exception as ex: # This can be caused by restjavad restarting. pass time.sleep(10) return None def should_update(self): result = self._update_changed_options() if result: return True return False def update_on_device(self): if self.want.module == 'cgnat': if self.changes.cgnat: return self.provision_cgnat_on_device() return False elif self.want.level == 'dedicated': self.provision_dedicated_on_device() else: self.provision_non_dedicated_on_device() def provision_cgnat_on_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) params = dict(enabled=True) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def provision_dedicated_on_device(self): params = self.want.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/provision/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) resources = [x['name'] for x in response['items'] if x['name'] != self.want.module] with TransactionContextManager(self.client) as transact: for resource in resources: target = uri + resource resp = transact.api.patch(target, json=dict(level='none')) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) target = uri + self.want.module resp = transact.api.patch(target, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def provision_non_dedicated_on_device(self): params = self.want.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def read_current_from_device(self): if self.want.module == 'cgnat': uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) else: uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) def absent(self): if self.exists(): return self.remove() return False def remove(self): if self.module.check_mode: return True if self.want.module == 'cgnat': return self.deprovision_cgnat_on_device() self.remove_from_device() self._wait_for_module_provisioning() # For vCMP, because it has to reboot, we also wait for mcpd to become available # before "moving on", or else the REST API would not be available and subsequent # Tasks would fail. if self.want.module == 'vcmp': self._wait_for_reboot() self._wait_for_module_provisioning() if self.should_reboot(): self.save_on_device() self.reboot_device() self._wait_for_module_provisioning() if self.exists(): raise F5ModuleError("Failed to de-provision the module") return True def save_on_device(self): command = 'tmsh save sys config' params = dict( command="run", utilCmdArgs='-c "{0}"'.format(command) ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/provision/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.module ) resp = self.client.api.patch(uri, json=dict(level='none')) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def deprovision_cgnat_on_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/feature-module/cgnat/".format( self.client.provider['server'], self.client.provider['server_port'], ) params = dict(disabled=True) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return True def _wait_for_module_provisioning(self): # To prevent things from running forever, the hack is to check # for mprov's status twice. If mprov is finished, then in most # cases (not ASM) the provisioning is probably ready. nops = 0 # Sleep a little to let provisioning settle and begin properly time.sleep(5) while nops < 3: try: if not self._is_mprov_running_on_device(): nops += 1 else: nops = 0 except Exception: # This can be caused by restjavad restarting. try: self.client.reconnect() except Exception: pass time.sleep(5) def _is_mprov_running_on_device(self): # /usr/libexec/qemu-kvm is added here to prevent vcmp provisioning # from never allowing the mprov provisioning to succeed. # # It turns out that the 'mprov' string is found when enabling vcmp. The # qemu-kvm command that is run includes it. # # For example, # /usr/libexec/qemu-kvm -rt-usecs 880 ... -mem-path /dev/mprov/vcmp -f5-tracing ... # try: command = "ps aux | grep \'[m]prov\' | grep -v /usr/libexec/qemu-kvm" params = dict( command="run", utilCmdArgs='-c "{0}"'.format(command) ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return True except Exception: pass return False def _wait_for_asm_ready(self): """Waits specifically for ASM On older versions, ASM can take longer to actually start up than all the previous checks take. This check here is specifically waiting for the Policies API to stop raising errors :return: """ nops = 0 restarted_asm = False while nops < 3: try: uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if len(response['items']) >= 0: nops += 1 else: nops = 0 except Exception as ex: if not restarted_asm: self._restart_asm() restarted_asm = True time.sleep(5) def _wait_for_afm_ready(self): """Waits specifically for AFM AFM can take longer to actually start up than all the previous checks take. This check here is specifically waiting for the Security API to stop raising errors. :return: """ nops = 0 while nops < 3: try: uri = "https://{0}:{1}/mgmt/tm/security/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if len(response['items']) >= 0: nops += 1 else: nops = 0 except Exception as ex: pass time.sleep(5) def _restart_asm(self): try: params = dict( command="run", utilCmdArgs='-c "bigstart restart asm"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) time.sleep(60) return True except Exception: pass return None def _get_last_reboot(self): try: params = dict( command="run", utilCmdArgs='-c "/usr/bin/last reboot | head -1"' ) uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if 'commandResult' in response: return str(response['commandResult']) except Exception: pass return None def _wait_for_reboot(self): nops = 0 last_reboot = self._get_last_reboot() # Sleep a little to let provisioning settle and begin properly time.sleep(5) while nops < 6: try: self.client.reconnect() next_reboot = self._get_last_reboot() if next_reboot is None: nops = 0 if next_reboot == last_reboot: nops = 0 else: nops += 1 except Exception as ex: # This can be caused by restjavad restarting. pass time.sleep(10) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( module=dict( required=True, choices=[ 'afm', 'am', 'sam', 'asm', 'avr', 'fps', 'gtm', 'lc', 'ltm', 'pem', 'swg', 'ilx', 'apm', 'vcmp', 'cgnat' ], aliases=['name'] ), level=dict( default='nominal', choices=['nominal', 'dedicated', 'minimum'] ), state=dict( default='present', choices=['present', 'absent'] ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['parameters', 'parameters_src'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
tarzan0820/odoo
addons/account/account_bank.py
258
5481
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tools.translate import _ from openerp.osv import fields, osv class bank(osv.osv): _inherit = "res.partner.bank" _columns = { 'journal_id': fields.many2one('account.journal', 'Account Journal', help="This journal will be created automatically for this bank account when you save the record"), 'currency_id': fields.related('journal_id', 'currency', type="many2one", relation='res.currency', readonly=True, string="Currency", help="Currency of the related account journal."), } def create(self, cr, uid, data, context=None): result = super(bank, self).create(cr, uid, data, context=context) self.post_write(cr, uid, [result], context=context) return result def write(self, cr, uid, ids, data, context=None): result = super(bank, self).write(cr, uid, ids, data, context=context) self.post_write(cr, uid, ids, context=context) return result def _prepare_name(self, bank): "Return the name to use when creating a bank journal" return (bank.bank_name or '') + ' ' + (bank.acc_number or '') def _prepare_name_get(self, cr, uid, bank_dicts, context=None): """Add ability to have %(currency_name)s in the format_layout of res.partner.bank.type""" currency_ids = list(set(data['currency_id'][0] for data in bank_dicts if data.get('currency_id'))) currencies = self.pool.get('res.currency').browse(cr, uid, currency_ids, context=context) currency_name = dict((currency.id, currency.name) for currency in currencies) for data in bank_dicts: data['currency_name'] = data.get('currency_id') and currency_name[data['currency_id'][0]] or '' return super(bank, self)._prepare_name_get(cr, uid, bank_dicts, context=context) def post_write(self, cr, uid, ids, context=None): if isinstance(ids, (int, long)): ids = [ids] obj_acc = self.pool.get('account.account') obj_data = self.pool.get('ir.model.data') for bank in self.browse(cr, uid, ids, context): if bank.company_id and not bank.journal_id: # Find the code and parent of the bank account to create dig = 6 current_num = 1 ids = obj_acc.search(cr, uid, [('type','=','liquidity'), ('company_id', '=', bank.company_id.id), ('parent_id', '!=', False)], context=context) # No liquidity account exists, no template available if not ids: continue ref_acc_bank = obj_acc.browse(cr, uid, ids[0], context=context).parent_id while True: new_code = str(ref_acc_bank.code.ljust(dig-len(str(current_num)), '0')) + str(current_num) ids = obj_acc.search(cr, uid, [('code', '=', new_code), ('company_id', '=', bank.company_id.id)]) if not ids: break current_num += 1 name = self._prepare_name(bank) acc = { 'name': name, 'code': new_code, 'type': 'liquidity', 'user_type': ref_acc_bank.user_type.id, 'reconcile': False, 'parent_id': ref_acc_bank.id, 'company_id': bank.company_id.id, } acc_bank_id = obj_acc.create(cr,uid,acc,context=context) jour_obj = self.pool.get('account.journal') new_code = 1 while True: code = _('BNK')+str(new_code) ids = jour_obj.search(cr, uid, [('code','=',code)], context=context) if not ids: break new_code += 1 #create the bank journal vals_journal = { 'name': name, 'code': code, 'type': 'bank', 'company_id': bank.company_id.id, 'analytic_journal_id': False, 'default_credit_account_id': acc_bank_id, 'default_debit_account_id': acc_bank_id, } journal_id = jour_obj.create(cr, uid, vals_journal, context=context) self.write(cr, uid, [bank.id], {'journal_id': journal_id}, context=context) return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
trivoldus28/pulsarch-verilog
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/idlelib/ColorDelegator.py
2
9256
import time import re import keyword from Tkinter import * from Delegator import Delegator from configHandler import idleConf #$ event <<toggle-auto-coloring>> #$ win <Control-slash> #$ unix <Control-slash> DEBUG = 0 def any(name, list): return "(?P<%s>" % name + "|".join(list) + ")" def make_pat(): kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b" comment = any("COMMENT", [r"#[^\n]*"]) sqstring = r"(\b[rR])?'[^'\\\n]*(\\.[^'\\\n]*)*'?" dqstring = r'(\b[rR])?"[^"\\\n]*(\\.[^"\\\n]*)*"?' sq3string = r"(\b[rR])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?" dq3string = r'(\b[rR])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?' string = any("STRING", [sq3string, dq3string, sqstring, dqstring]) return kw + "|" + comment + "|" + string + "|" + any("SYNC", [r"\n"]) prog = re.compile(make_pat(), re.S) idprog = re.compile(r"\s+(\w+)", re.S) asprog = re.compile(r".*?\b(as)\b", re.S) class ColorDelegator(Delegator): def __init__(self): Delegator.__init__(self) self.prog = prog self.idprog = idprog self.asprog = asprog self.LoadTagDefs() def setdelegate(self, delegate): if self.delegate is not None: self.unbind("<<toggle-auto-coloring>>") Delegator.setdelegate(self, delegate) if delegate is not None: self.config_colors() self.bind("<<toggle-auto-coloring>>", self.toggle_colorize_event) self.notify_range("1.0", "end") def config_colors(self): for tag, cnf in self.tagdefs.items(): if cnf: self.tag_configure(tag, **cnf) self.tag_raise('sel') def LoadTagDefs(self): theme = idleConf.GetOption('main','Theme','name') self.tagdefs = { "COMMENT": idleConf.GetHighlight(theme, "comment"), "KEYWORD": idleConf.GetHighlight(theme, "keyword"), "STRING": idleConf.GetHighlight(theme, "string"), "DEFINITION": idleConf.GetHighlight(theme, "definition"), "SYNC": {'background':None,'foreground':None}, "TODO": {'background':None,'foreground':None}, "BREAK": idleConf.GetHighlight(theme, "break"), "ERROR": idleConf.GetHighlight(theme, "error"), # The following is used by ReplaceDialog: "hit": idleConf.GetHighlight(theme, "hit"), } if DEBUG: print 'tagdefs',tagdefs def insert(self, index, chars, tags=None): index = self.index(index) self.delegate.insert(index, chars, tags) self.notify_range(index, index + "+%dc" % len(chars)) def delete(self, index1, index2=None): index1 = self.index(index1) self.delegate.delete(index1, index2) self.notify_range(index1) after_id = None allow_colorizing = 1 colorizing = 0 def notify_range(self, index1, index2=None): self.tag_add("TODO", index1, index2) if self.after_id: if DEBUG: print "colorizing already scheduled" return if self.colorizing: self.stop_colorizing = 1 if DEBUG: print "stop colorizing" if self.allow_colorizing: if DEBUG: print "schedule colorizing" self.after_id = self.after(1, self.recolorize) close_when_done = None # Window to be closed when done colorizing def close(self, close_when_done=None): if self.after_id: after_id = self.after_id self.after_id = None if DEBUG: print "cancel scheduled recolorizer" self.after_cancel(after_id) self.allow_colorizing = 0 self.stop_colorizing = 1 if close_when_done: if not self.colorizing: close_when_done.destroy() else: self.close_when_done = close_when_done def toggle_colorize_event(self, event): if self.after_id: after_id = self.after_id self.after_id = None if DEBUG: print "cancel scheduled recolorizer" self.after_cancel(after_id) if self.allow_colorizing and self.colorizing: if DEBUG: print "stop colorizing" self.stop_colorizing = 1 self.allow_colorizing = not self.allow_colorizing if self.allow_colorizing and not self.colorizing: self.after_id = self.after(1, self.recolorize) if DEBUG: print "auto colorizing turned", self.allow_colorizing and "on" or "off" return "break" def recolorize(self): self.after_id = None if not self.delegate: if DEBUG: print "no delegate" return if not self.allow_colorizing: if DEBUG: print "auto colorizing is off" return if self.colorizing: if DEBUG: print "already colorizing" return try: self.stop_colorizing = 0 self.colorizing = 1 if DEBUG: print "colorizing..." t0 = time.clock() self.recolorize_main() t1 = time.clock() if DEBUG: print "%.3f seconds" % (t1-t0) finally: self.colorizing = 0 if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"): if DEBUG: print "reschedule colorizing" self.after_id = self.after(1, self.recolorize) if self.close_when_done: top = self.close_when_done self.close_when_done = None top.destroy() def recolorize_main(self): next = "1.0" while 1: item = self.tag_nextrange("TODO", next) if not item: break head, tail = item self.tag_remove("SYNC", head, tail) item = self.tag_prevrange("SYNC", head) if item: head = item[1] else: head = "1.0" chars = "" next = head lines_to_get = 1 ok = 0 while not ok: mark = next next = self.index(mark + "+%d lines linestart" % lines_to_get) lines_to_get = min(lines_to_get * 2, 100) ok = "SYNC" in self.tag_names(next + "-1c") line = self.get(mark, next) ##print head, "get", mark, next, "->", `line` if not line: return for tag in self.tagdefs.keys(): self.tag_remove(tag, mark, next) chars = chars + line m = self.prog.search(chars) while m: for key, value in m.groupdict().items(): if value: a, b = m.span(key) self.tag_add(key, head + "+%dc" % a, head + "+%dc" % b) if value in ("def", "class"): m1 = self.idprog.match(chars, b) if m1: a, b = m1.span(1) self.tag_add("DEFINITION", head + "+%dc" % a, head + "+%dc" % b) elif value == "import": # color all the "as" words on same line; # cheap approximation to the truth while 1: m1 = self.asprog.match(chars, b) if not m1: break a, b = m1.span(1) self.tag_add("KEYWORD", head + "+%dc" % a, head + "+%dc" % b) m = self.prog.search(chars, m.end()) if "SYNC" in self.tag_names(next + "-1c"): head = next chars = "" else: ok = 0 if not ok: # We're in an inconsistent state, and the call to # update may tell us to stop. It may also change # the correct value for "next" (since this is a # line.col string, not a true mark). So leave a # crumb telling the next invocation to resume here # in case update tells us to leave. self.tag_add("TODO", next) self.update() if self.stop_colorizing: if DEBUG: print "colorizing stopped" return def main(): from Percolator import Percolator root = Tk() root.wm_protocol("WM_DELETE_WINDOW", root.quit) text = Text(background="white") text.pack(expand=1, fill="both") text.focus_set() p = Percolator(text) d = ColorDelegator() p.insertfilter(d) root.mainloop() if __name__ == "__main__": main()
gpl-2.0
proppy/appengine-try-python-flask
lib/werkzeug/debug/repr.py
313
9350
# -*- coding: utf-8 -*- """ werkzeug.debug.repr ~~~~~~~~~~~~~~~~~~~ This module implements object representations for debugging purposes. Unlike the default repr these reprs expose a lot more information and produce HTML instead of ASCII. Together with the CSS and JavaScript files of the debugger this gives a colorful and more compact output. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD. """ import sys import re import codecs from traceback import format_exception_only try: from collections import deque except ImportError: # pragma: no cover deque = None from werkzeug.utils import escape from werkzeug._compat import iteritems, PY2, text_type, integer_types, \ string_types missing = object() _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') RegexType = type(_paragraph_re) HELP_HTML = '''\ <div class=box> <h3>%(title)s</h3> <pre class=help>%(text)s</pre> </div>\ ''' OBJECT_DUMP_HTML = '''\ <div class=box> <h3>%(title)s</h3> %(repr)s <table>%(items)s</table> </div>\ ''' def debug_repr(obj): """Creates a debug repr of an object as HTML unicode string.""" return DebugReprGenerator().repr(obj) def dump(obj=missing): """Print the object details to stdout._write (for the interactive console of the web debugger. """ gen = DebugReprGenerator() if obj is missing: rv = gen.dump_locals(sys._getframe(1).f_locals) else: rv = gen.dump_object(obj) sys.stdout._write(rv) class _Helper(object): """Displays an HTML version of the normal help, for the interactive debugger only because it requires a patched sys.stdout. """ def __repr__(self): return 'Type help(object) for help about object.' def __call__(self, topic=None): if topic is None: sys.stdout._write('<span class=help>%s</span>' % repr(self)) return import pydoc pydoc.help(topic) rv = sys.stdout.reset() if isinstance(rv, bytes): rv = rv.decode('utf-8', 'ignore') paragraphs = _paragraph_re.split(rv) if len(paragraphs) > 1: title = paragraphs[0] text = '\n\n'.join(paragraphs[1:]) else: # pragma: no cover title = 'Help' text = paragraphs[0] sys.stdout._write(HELP_HTML % {'title': title, 'text': text}) helper = _Helper() def _add_subclass_info(inner, obj, base): if isinstance(base, tuple): for base in base: if type(obj) is base: return inner elif type(obj) is base: return inner module = '' if obj.__class__.__module__ not in ('__builtin__', 'exceptions'): module = '<span class="module">%s.</span>' % obj.__class__.__module__ return '%s%s(%s)' % (module, obj.__class__.__name__, inner) class DebugReprGenerator(object): def __init__(self): self._stack = [] def _sequence_repr_maker(left, right, base=object(), limit=8): def proxy(self, obj, recursive): if recursive: return _add_subclass_info(left + '...' + right, obj, base) buf = [left] have_extended_section = False for idx, item in enumerate(obj): if idx: buf.append(', ') if idx == limit: buf.append('<span class="extended">') have_extended_section = True buf.append(self.repr(item)) if have_extended_section: buf.append('</span>') buf.append(right) return _add_subclass_info(u''.join(buf), obj, base) return proxy list_repr = _sequence_repr_maker('[', ']', list) tuple_repr = _sequence_repr_maker('(', ')', tuple) set_repr = _sequence_repr_maker('set([', '])', set) frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset) if deque is not None: deque_repr = _sequence_repr_maker('<span class="module">collections.' '</span>deque([', '])', deque) del _sequence_repr_maker def regex_repr(self, obj): pattern = repr(obj.pattern) if PY2: pattern = pattern.decode('string-escape', 'ignore') else: pattern = codecs.decode(pattern, 'unicode-escape', 'ignore') if pattern[:1] == 'u': pattern = 'ur' + pattern[1:] else: pattern = 'r' + pattern return u're.compile(<span class="string regex">%s</span>)' % pattern def string_repr(self, obj, limit=70): buf = ['<span class="string">'] escaped = escape(obj) a = repr(escaped[:limit]) b = repr(escaped[limit:]) if isinstance(obj, text_type) and PY2: buf.append('u') a = a[1:] b = b[1:] if b != "''": buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>')) else: buf.append(a) buf.append('</span>') return _add_subclass_info(u''.join(buf), obj, (bytes, text_type)) def dict_repr(self, d, recursive, limit=5): if recursive: return _add_subclass_info(u'{...}', d, dict) buf = ['{'] have_extended_section = False for idx, (key, value) in enumerate(iteritems(d)): if idx: buf.append(', ') if idx == limit - 1: buf.append('<span class="extended">') have_extended_section = True buf.append('<span class="pair"><span class="key">%s</span>: ' '<span class="value">%s</span></span>' % (self.repr(key), self.repr(value))) if have_extended_section: buf.append('</span>') buf.append('}') return _add_subclass_info(u''.join(buf), d, dict) def object_repr(self, obj): r = repr(obj) if PY2: r = r.decode('utf-8', 'replace') return u'<span class="object">%s</span>' % escape(r) def dispatch_repr(self, obj, recursive): if obj is helper: return u'<span class="help">%r</span>' % helper if isinstance(obj, (integer_types, float, complex)): return u'<span class="number">%r</span>' % obj if isinstance(obj, string_types): return self.string_repr(obj) if isinstance(obj, RegexType): return self.regex_repr(obj) if isinstance(obj, list): return self.list_repr(obj, recursive) if isinstance(obj, tuple): return self.tuple_repr(obj, recursive) if isinstance(obj, set): return self.set_repr(obj, recursive) if isinstance(obj, frozenset): return self.frozenset_repr(obj, recursive) if isinstance(obj, dict): return self.dict_repr(obj, recursive) if deque is not None and isinstance(obj, deque): return self.deque_repr(obj, recursive) return self.object_repr(obj) def fallback_repr(self): try: info = ''.join(format_exception_only(*sys.exc_info()[:2])) except Exception: # pragma: no cover info = '?' if PY2: info = info.decode('utf-8', 'ignore') return u'<span class="brokenrepr">&lt;broken repr (%s)&gt;' \ u'</span>' % escape(info.strip()) def repr(self, obj): recursive = False for item in self._stack: if item is obj: recursive = True break self._stack.append(obj) try: try: return self.dispatch_repr(obj, recursive) except Exception: return self.fallback_repr() finally: self._stack.pop() def dump_object(self, obj): repr = items = None if isinstance(obj, dict): title = 'Contents of' items = [] for key, value in iteritems(obj): if not isinstance(key, string_types): items = None break items.append((key, self.repr(value))) if items is None: items = [] repr = self.repr(obj) for key in dir(obj): try: items.append((key, self.repr(getattr(obj, key)))) except Exception: pass title = 'Details for' title += ' ' + object.__repr__(obj)[1:-1] return self.render_object_dump(items, title, repr) def dump_locals(self, d): items = [(key, self.repr(value)) for key, value in d.items()] return self.render_object_dump(items, 'Local variables in frame') def render_object_dump(self, items, title, repr=None): html_items = [] for key, value in items: html_items.append('<tr><th>%s<td><pre class=repr>%s</pre>' % (escape(key), value)) if not html_items: html_items.append('<tr><td><em>Nothing</em>') return OBJECT_DUMP_HTML % { 'title': escape(title), 'repr': repr and '<pre class=repr>%s</pre>' % repr or '', 'items': '\n'.join(html_items) }
apache-2.0
espadrine/opera
chromium/src/third_party/python_26/Lib/encodings/hp_roman8.py
647
7391
""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py. Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen) Original source: LaserJet IIP Printer User's Manual HP part no 33471-90901, Hewlet-Packard, June 1989. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_map) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_map)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='hp-roman8', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x00a1: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE 0x00a2: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00a3: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE 0x00a4: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x00a5: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00a6: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00a7: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x00a8: 0x00b4, # ACUTE ACCENT 0x00a9: 0x02cb, # MODIFIER LETTER GRAVE ACCENT (Mandarin Chinese fourth tone) 0x00aa: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT 0x00ab: 0x00a8, # DIAERESIS 0x00ac: 0x02dc, # SMALL TILDE 0x00ad: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE 0x00ae: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x00af: 0x20a4, # LIRA SIGN 0x00b0: 0x00af, # MACRON 0x00b1: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE 0x00b2: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE 0x00b3: 0x00b0, # DEGREE SIGN 0x00b4: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA 0x00b5: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA 0x00b6: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE 0x00b7: 0x00f1, # LATIN SMALL LETTER N WITH TILDE 0x00b8: 0x00a1, # INVERTED EXCLAMATION MARK 0x00b9: 0x00bf, # INVERTED QUESTION MARK 0x00ba: 0x00a4, # CURRENCY SIGN 0x00bb: 0x00a3, # POUND SIGN 0x00bc: 0x00a5, # YEN SIGN 0x00bd: 0x00a7, # SECTION SIGN 0x00be: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00bf: 0x00a2, # CENT SIGN 0x00c0: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x00c1: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x00c2: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x00c3: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x00c4: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00c5: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x00c6: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00c7: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00c8: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE 0x00c9: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE 0x00ca: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE 0x00cb: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE 0x00cc: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x00cd: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS 0x00ce: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x00cf: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x00d0: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x00d1: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x00d2: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE 0x00d3: 0x00c6, # LATIN CAPITAL LETTER AE 0x00d4: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE 0x00d5: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00d6: 0x00f8, # LATIN SMALL LETTER O WITH STROKE 0x00d7: 0x00e6, # LATIN SMALL LETTER AE 0x00d8: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00d9: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE 0x00da: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00db: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00dc: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x00dd: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS 0x00de: 0x00df, # LATIN SMALL LETTER SHARP S (German) 0x00df: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00e0: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE 0x00e1: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE 0x00e2: 0x00e3, # LATIN SMALL LETTER A WITH TILDE 0x00e3: 0x00d0, # LATIN CAPITAL LETTER ETH (Icelandic) 0x00e4: 0x00f0, # LATIN SMALL LETTER ETH (Icelandic) 0x00e5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE 0x00e6: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE 0x00e7: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00e8: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE 0x00e9: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE 0x00ea: 0x00f5, # LATIN SMALL LETTER O WITH TILDE 0x00eb: 0x0160, # LATIN CAPITAL LETTER S WITH CARON 0x00ec: 0x0161, # LATIN SMALL LETTER S WITH CARON 0x00ed: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE 0x00ee: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS 0x00ef: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS 0x00f0: 0x00de, # LATIN CAPITAL LETTER THORN (Icelandic) 0x00f1: 0x00fe, # LATIN SMALL LETTER THORN (Icelandic) 0x00f2: 0x00b7, # MIDDLE DOT 0x00f3: 0x00b5, # MICRO SIGN 0x00f4: 0x00b6, # PILCROW SIGN 0x00f5: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00f6: 0x2014, # EM DASH 0x00f7: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00f8: 0x00bd, # VULGAR FRACTION ONE HALF 0x00f9: 0x00aa, # FEMININE ORDINAL INDICATOR 0x00fa: 0x00ba, # MASCULINE ORDINAL INDICATOR 0x00fb: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00fc: 0x25a0, # BLACK SQUARE 0x00fd: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00fe: 0x00b1, # PLUS-MINUS SIGN 0x00ff: None, }) ### Encoding Map encoding_map = codecs.make_encoding_map(decoding_map)
bsd-3-clause
krafczyk/spack
var/spack/repos/builtin/packages/r-trimcluster/package.py
5
1642
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RTrimcluster(RPackage): """trimcluster: Cluster analysis with trimming""" homepage = "http://www.homepages.ucl.ac.uk/~ucakche" url = "https://cran.r-project.org/src/contrib/trimcluster_0.1-2.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/trimcluster" version('0.1-2', '7617920e224bd18f5b87db38a3116ec2') depends_on('r@1.9.0:')
lgpl-2.1
pigeonflight/strider-plone
docker/appengine/lib/django-1.4/tests/regressiontests/file_storage/tests.py
23
20745
# -*- coding: utf-8 -*- import errno import os import shutil import sys import tempfile import time from datetime import datetime, timedelta try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: import threading except ImportError: import dummy_threading as threading from django.conf import settings from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured from django.core.files.base import ContentFile from django.core.files.images import get_image_dimensions from django.core.files.storage import FileSystemStorage, get_storage_class from django.core.files.uploadedfile import UploadedFile from django.test import SimpleTestCase from django.utils import unittest # Try to import PIL in either of the two ways it can end up installed. # Checking for the existence of Image is enough for CPython, but # for PyPy, you need to check for the underlying modules try: from PIL import Image, _imaging except ImportError: try: import Image, _imaging except ImportError: Image = None class GetStorageClassTests(SimpleTestCase): def test_get_filesystem_storage(self): """ get_storage_class returns the class for a storage backend name/path. """ self.assertEqual( get_storage_class('django.core.files.storage.FileSystemStorage'), FileSystemStorage) def test_get_invalid_storage_module(self): """ get_storage_class raises an error if the requested import don't exist. """ self.assertRaisesMessage( ImproperlyConfigured, "NonExistingStorage isn't a storage module.", get_storage_class, 'NonExistingStorage') def test_get_nonexisting_storage_class(self): """ get_storage_class raises an error if the requested class don't exist. """ self.assertRaisesMessage( ImproperlyConfigured, 'Storage module "django.core.files.storage" does not define a '\ '"NonExistingStorage" class.', get_storage_class, 'django.core.files.storage.NonExistingStorage') def test_get_nonexisting_storage_module(self): """ get_storage_class raises an error if the requested module don't exist. """ # Error message may or may not be the fully qualified path. self.assertRaisesRegexp( ImproperlyConfigured, ('Error importing storage module django.core.files.non_existing_' 'storage: "No module named .*non_existing_storage"'), get_storage_class, 'django.core.files.non_existing_storage.NonExistingStorage' ) class FileStorageTests(unittest.TestCase): storage_class = FileSystemStorage def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/') # Set up a second temporary directory which is ensured to have a mixed # case name. self.temp_dir2 = tempfile.mkdtemp(suffix='aBc') def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.temp_dir2) def test_emtpy_location(self): """ Makes sure an exception is raised if the location is empty """ storage = self.storage_class(location='') self.assertEqual(storage.base_location, '') self.assertEqual(storage.location, os.getcwd()) def test_file_access_options(self): """ Standard file access options are available, and work as expected. """ self.assertFalse(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'w') f.write('storage contents') f.close() self.assertTrue(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'r') self.assertEqual(f.read(), 'storage contents') f.close() self.storage.delete('storage_test') self.assertFalse(self.storage.exists('storage_test')) def test_file_accessed_time(self): """ File storage returns a Datetime object for the last accessed time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) atime = self.storage.accessed_time(f_name) self.assertEqual(atime, datetime.fromtimestamp( os.path.getatime(self.storage.path(f_name)))) self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2)) self.storage.delete(f_name) def test_file_created_time(self): """ File storage returns a Datetime object for the creation time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) ctime = self.storage.created_time(f_name) self.assertEqual(ctime, datetime.fromtimestamp( os.path.getctime(self.storage.path(f_name)))) self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2)) self.storage.delete(f_name) def test_file_modified_time(self): """ File storage returns a Datetime object for the last modified time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) mtime = self.storage.modified_time(f_name) self.assertEqual(mtime, datetime.fromtimestamp( os.path.getmtime(self.storage.path(f_name)))) self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2)) self.storage.delete(f_name) def test_file_save_without_name(self): """ File storage extracts the filename from the content object if no name is given explicitly. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f.name = 'test.file' storage_f_name = self.storage.save(None, f) self.assertEqual(storage_f_name, f.name) self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name))) self.storage.delete(storage_f_name) def test_file_save_with_path(self): """ Saving a pathname should create intermediate directories as necessary. """ self.assertFalse(self.storage.exists('path/to')) self.storage.save('path/to/test.file', ContentFile('file saved with path')) self.assertTrue(self.storage.exists('path/to')) self.assertEqual(self.storage.open('path/to/test.file').read(), 'file saved with path') self.assertTrue(os.path.exists( os.path.join(self.temp_dir, 'path', 'to', 'test.file'))) self.storage.delete('path/to/test.file') def test_file_path(self): """ File storage returns the full path of a file """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name)) self.storage.delete(f_name) def test_file_url(self): """ File storage returns a url to access a given file from the Web. """ self.assertEqual(self.storage.url('test.file'), '%s%s' % (self.storage.base_url, 'test.file')) # should encode special chars except ~!*()' # like encodeURIComponent() JavaScript function do self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""), """/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""") # should stanslate os path separator(s) to the url path separator self.assertEqual(self.storage.url("""a/b\\c.file"""), """/test_media_url/a/b/c.file""") self.storage.base_url = None self.assertRaises(ValueError, self.storage.url, 'test.file') def test_listdir(self): """ File storage returns a tuple containing directories and files. """ self.assertFalse(self.storage.exists('storage_test_1')) self.assertFalse(self.storage.exists('storage_test_2')) self.assertFalse(self.storage.exists('storage_dir_1')) f = self.storage.save('storage_test_1', ContentFile('custom content')) f = self.storage.save('storage_test_2', ContentFile('custom content')) os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1')) dirs, files = self.storage.listdir('') self.assertEqual(set(dirs), set([u'storage_dir_1'])) self.assertEqual(set(files), set([u'storage_test_1', u'storage_test_2'])) self.storage.delete('storage_test_1') self.storage.delete('storage_test_2') os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1')) def test_file_storage_prevents_directory_traversal(self): """ File storage prevents directory traversal (files can only be accessed if they're below the storage location). """ self.assertRaises(SuspiciousOperation, self.storage.exists, '..') self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd') def test_file_storage_preserves_filename_case(self): """The storage backend should preserve case of filenames.""" # Create a storage backend associated with the mixed case name # directory. temp_storage = self.storage_class(location=self.temp_dir2) # Ask that storage backend to store a file with a mixed case filename. mixed_case = 'CaSe_SeNsItIvE' file = temp_storage.open(mixed_case, 'w') file.write('storage contents') file.close() self.assertEqual(os.path.join(self.temp_dir2, mixed_case), temp_storage.path(mixed_case)) temp_storage.delete(mixed_case) def test_makedirs_race_handling(self): """ File storage should be robust against directory creation race conditions. """ real_makedirs = os.makedirs # Monkey-patch os.makedirs, to simulate a normal call, a raced call, # and an error. def fake_makedirs(path): if path == os.path.join(self.temp_dir, 'normal'): real_makedirs(path) elif path == os.path.join(self.temp_dir, 'raced'): real_makedirs(path) raise OSError(errno.EEXIST, 'simulated EEXIST') elif path == os.path.join(self.temp_dir, 'error'): raise OSError(errno.EACCES, 'simulated EACCES') else: self.fail('unexpected argument %r' % path) try: os.makedirs = fake_makedirs self.storage.save('normal/test.file', ContentFile('saved normally')) self.assertEqual(self.storage.open('normal/test.file').read(), 'saved normally') self.storage.save('raced/test.file', ContentFile('saved with race')) self.assertEqual(self.storage.open('raced/test.file').read(), 'saved with race') # Check that OSErrors aside from EEXIST are still raised. self.assertRaises(OSError, self.storage.save, 'error/test.file', ContentFile('not saved')) finally: os.makedirs = real_makedirs def test_remove_race_handling(self): """ File storage should be robust against file removal race conditions. """ real_remove = os.remove # Monkey-patch os.remove, to simulate a normal call, a raced call, # and an error. def fake_remove(path): if path == os.path.join(self.temp_dir, 'normal.file'): real_remove(path) elif path == os.path.join(self.temp_dir, 'raced.file'): real_remove(path) raise OSError(errno.ENOENT, 'simulated ENOENT') elif path == os.path.join(self.temp_dir, 'error.file'): raise OSError(errno.EACCES, 'simulated EACCES') else: self.fail('unexpected argument %r' % path) try: os.remove = fake_remove self.storage.save('normal.file', ContentFile('delete normally')) self.storage.delete('normal.file') self.assertFalse(self.storage.exists('normal.file')) self.storage.save('raced.file', ContentFile('delete with race')) self.storage.delete('raced.file') self.assertFalse(self.storage.exists('normal.file')) # Check that OSErrors aside from ENOENT are still raised. self.storage.save('error.file', ContentFile('delete with error')) self.assertRaises(OSError, self.storage.delete, 'error.file') finally: os.remove = real_remove class CustomStorage(FileSystemStorage): def get_available_name(self, name): """ Append numbers to duplicate files rather than underscores, like Trac. """ parts = name.split('.') basename, ext = parts[0], parts[1:] number = 2 while self.exists(name): name = '.'.join([basename, str(number)] + ext) number += 1 return name class CustomStorageTests(FileStorageTests): storage_class = CustomStorage def test_custom_get_available_name(self): first = self.storage.save('custom_storage', ContentFile('custom contents')) self.assertEqual(first, 'custom_storage') second = self.storage.save('custom_storage', ContentFile('more contents')) self.assertEqual(second, 'custom_storage.2') self.storage.delete(first) self.storage.delete(second) class UnicodeFileNameTests(unittest.TestCase): def test_unicode_file_names(self): """ Regression test for #8156: files with unicode names I can't quite figure out the encoding situation between doctest and this file, but the actual repr doesn't matter; it just shouldn't return a unicode object. """ uf = UploadedFile(name=u'¿Cómo?',content_type='text') self.assertEqual(type(uf.__repr__()), str) # Tests for a race condition on file saving (#4948). # This is written in such a way that it'll always pass on platforms # without threading. class SlowFile(ContentFile): def chunks(self): time.sleep(1) return super(ContentFile, self).chunks() class FileSaveRaceConditionTest(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) self.thread = threading.Thread(target=self.save_file, args=['conflict']) def tearDown(self): shutil.rmtree(self.storage_dir) def save_file(self, name): name = self.storage.save(name, SlowFile("Data")) def test_race_condition(self): self.thread.start() name = self.save_file('conflict') self.thread.join() self.assertTrue(self.storage.exists('conflict')) self.assertTrue(self.storage.exists('conflict_1')) self.storage.delete('conflict') self.storage.delete('conflict_1') class FileStoragePermissions(unittest.TestCase): def setUp(self): self.old_perms = settings.FILE_UPLOAD_PERMISSIONS settings.FILE_UPLOAD_PERMISSIONS = 0666 self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): settings.FILE_UPLOAD_PERMISSIONS = self.old_perms shutil.rmtree(self.storage_dir) def test_file_upload_permissions(self): name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0777 self.assertEqual(actual_mode, 0666) class FileStoragePathParsing(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_directory_with_dot(self): """Regression test for #9610. If the directory name contains a dot and the file name doesn't, make sure we still mangle the file name instead of the directory name. """ self.storage.save('dotted.path/test', ContentFile("1")) self.storage.save('dotted.path/test', ContentFile("2")) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test'))) self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1'))) def test_first_character_dot(self): """ File names with a dot as their first character don't have an extension, and the underscore should get added to the end. """ self.storage.save('dotted.path/.test', ContentFile("1")) self.storage.save('dotted.path/.test', ContentFile("2")) self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test'))) # Before 2.6, a leading dot was treated as an extension, and so # underscore gets added to beginning instead of end. if sys.version_info < (2, 6): self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/_1.test'))) else: self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1'))) class DimensionClosingBug(unittest.TestCase): """ Test that get_image_dimensions() properly closes files (#8817) """ @unittest.skipUnless(Image, "PIL not installed") def test_not_closing_of_files(self): """ Open files passed into get_image_dimensions() should stay opened. """ empty_io = StringIO() try: get_image_dimensions(empty_io) finally: self.assertTrue(not empty_io.closed) @unittest.skipUnless(Image, "PIL not installed") def test_closing_of_filenames(self): """ get_image_dimensions() called with a filename should closed the file. """ # We need to inject a modified open() builtin into the images module # that checks if the file was closed properly if the function is # called with a filename instead of an file object. # get_image_dimensions will call our catching_open instead of the # regular builtin one. class FileWrapper(object): _closed = [] def __init__(self, f): self.f = f def __getattr__(self, name): return getattr(self.f, name) def close(self): self._closed.append(True) self.f.close() def catching_open(*args): return FileWrapper(open(*args)) from django.core.files import images images.open = catching_open try: get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png")) finally: del images.open self.assertTrue(FileWrapper._closed) class InconsistentGetImageDimensionsBug(unittest.TestCase): """ Test that get_image_dimensions() works properly after various calls using a file handler (#11158) """ @unittest.skipUnless(Image, "PIL not installed") def test_multiple_calls(self): """ Multiple calls of get_image_dimensions() should return the same size. """ from django.core.files.images import ImageFile img_path = os.path.join(os.path.dirname(__file__), "test.png") image = ImageFile(open(img_path, 'rb')) image_pil = Image.open(img_path) size_1, size_2 = get_image_dimensions(image), get_image_dimensions(image) self.assertEqual(image_pil.size, size_1) self.assertEqual(size_1, size_2) class ContentFileTestCase(unittest.TestCase): """ Test that the constructor of ContentFile accepts 'name' (#16590). """ def test_content_file_default_name(self): self.assertEqual(ContentFile("content").name, None) def test_content_file_custome_name(self): name = "I can have a name too!" self.assertEqual(ContentFile("content", name=name).name, name)
mit
tungvx/deploy
.google_appengine/google/appengine/api/appinfo_includes.py
7
9349
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Used to parse app.yaml files while following builtins/includes directives.""" import logging import os from google.appengine.api import appinfo from google.appengine.api import appinfo_errors from google.appengine.ext import builtins class IncludeFileNotFound(Exception): """Raised if a specified include file cannot be found on disk.""" def Parse(appinfo_file, open_fn=open): """Parse an AppYaml file and merge referenced includes and builtins.""" try: appinfo_path = appinfo_file.name if not os.path.isfile(appinfo_path): raise Exception('Name defined by appinfo_file does not appear to be a ' 'valid file: %s' % appinfo_path) except AttributeError: raise Exception('File object passed to ParseAndMerge does not define ' 'attribute "name" as as full file path.') appyaml = appinfo.LoadSingleAppInfo(appinfo_file) appyaml = _MergeBuiltinsIncludes(appinfo_path, appyaml, open_fn) if not appyaml.handlers: raise appinfo_errors.MissingURLMapping( 'No URLMap entries found in application configuration') if len(appyaml.handlers) > appinfo.MAX_URL_MAPS: raise appinfo_errors.TooManyURLMappings( 'Found more than %d URLMap entries in application configuration' % appinfo.MAX_URL_MAPS) if appyaml.runtime == 'python27' and appyaml.threadsafe: for handler in appyaml.handlers: if (handler.script and (handler.script.endswith('.py') or '/' in handler.script)): raise appinfo_errors.ThreadsafeWithCgiHandler( 'Threadsafe cannot be enabled with CGI handler: %s' % handler.script) return appyaml def _MergeBuiltinsIncludes(appinfo_path, appyaml, open_fn=open): """Merges app.yaml files from builtins and includes directives in appyaml. Args: appinfo_path: the application directory. appyaml: the yaml file to obtain builtins and includes directives from. open_fn: file opening function to pass to _ResolveIncludes, used when reading yaml files. Returns: the modified appyaml object which incorporates referenced yaml files. """ if not appyaml.builtins: appyaml.builtins = [appinfo.BuiltinHandler(default='on')] else: if not appinfo.BuiltinHandler.IsDefined(appyaml.builtins, 'default'): appyaml.builtins.append(appinfo.BuiltinHandler(default='on')) aggregate_appinclude = ( _ResolveIncludes(appinfo_path, appinfo.AppInclude(builtins=appyaml.builtins, includes=appyaml.includes), os.path.dirname(appinfo_path), appyaml.runtime, open_fn=open_fn)) return appinfo.AppInclude.MergeAppYamlAppInclude(appyaml, aggregate_appinclude) def _ResolveIncludes(included_from, app_include, basepath, runtime, state=None, open_fn=open): """Recursively includes all encountered builtins/includes directives. This function takes an initial AppInclude object specified as a parameter and recursively evaluates every builtins/includes directive in the passed in AppInclude and any files they reference. The sole output of the function is an AppInclude object that is the result of merging all encountered AppInclude objects. This must then be merged with the root AppYaml object. Args: included_from: file that included file was included from. app_include: the AppInclude object to resolve. basepath: application basepath. runtime: name of the runtime. state: contains the list of included and excluded files as well as the directives of all encountered AppInclude objects. open_fn: file opening function udes, used when reading yaml files. Returns: AppInclude object merged from following all builtins/includes defined in provided AppInclude object. Raises: IncludeFileNotFound: if file specified in an include statement cannot be resolved to an includeable file (result from _ResolvePath is False). """ class RecurseState(object): def __init__(self): self.includes = {} self.excludes = {} self.aggregate_appinclude = appinfo.AppInclude() if not state: state = RecurseState() appinfo.AppInclude.MergeAppIncludes(state.aggregate_appinclude, app_include) includes_list = _ConvertBuiltinsToIncludes(included_from, app_include, state, runtime) includes_list.extend(app_include.includes or []) for i in includes_list: inc_path = _ResolvePath(included_from, i, basepath) if not inc_path: raise IncludeFileNotFound('File %s listed in includes directive of %s ' 'could not be found.' % (i, included_from)) if inc_path in state.excludes: logging.warning('%s already disabled by %s but later included by %s', inc_path, state.excludes[inc_path], included_from) elif not inc_path in state.includes: state.includes[inc_path] = included_from yaml_file = open_fn(inc_path, 'r') try: inc_yaml = appinfo.LoadAppInclude(yaml_file) _ResolveIncludes(inc_path, inc_yaml, basepath, runtime, state=state, open_fn=open_fn) except appinfo_errors.EmptyConfigurationFile: if not os.path.basename(os.path.dirname(inc_path)) == 'default': logging.warning('Nothing to include in %s', inc_path) return state.aggregate_appinclude def _ConvertBuiltinsToIncludes(included_from, app_include, state, runtime): """Converts builtins directives to includes directives. Moves all builtins directives in app_include into the includes directives list. Modifies class excludes dict if any builtins are switched off. Class includes dict is used to determine if an excluded builtin was previously included. Args: included_from: file that builtin directive was found in app_include: the AppInclude object currently being processed. state: contains the list of included and excluded files as well as the directives of all encountered AppInclude objects. runtime: name of the runtime. Returns: list of the absolute paths to the include files for builtins where "x: on" directive was specified, e.g. "builtins:\n default: on" -> ['/google/appengine/ext/builtins/default/include.yaml'] """ includes_list = [] if app_include.builtins: builtins_list = appinfo.BuiltinHandler.ListToTuples(app_include.builtins) for builtin_name, on_or_off in builtins_list: if not on_or_off: continue yaml_path = builtins.get_yaml_path(builtin_name, runtime) if on_or_off == 'on': includes_list.append(yaml_path) elif on_or_off == 'off': if yaml_path in state.includes: logging.warning('%s already included by %s but later disabled by %s', yaml_path, state.includes[yaml_path], included_from) state.excludes[yaml_path] = included_from else: logging.error('Invalid state for AppInclude object loaded from %s; ' 'builtins directive "%s: %s" ignored.', included_from, builtin_name, on_or_off) return includes_list def _ResolvePath(included_from, included_path, basepath): """Gets the absolute path of the file to be included. Resolves in the following order: - absolute path or relative to working directory (path as specified resolves to a file) - relative to basepath (basepath + path resolves to a file) - relative to file it was included from (included_from + included_path resolves to a file) Args: included_from: absolute path of file that included_path was included from. included_path: file string from includes directive. basepath: the application directory. Returns: absolute path of the first file found for included_path or ''. """ path = os.path.join(os.path.dirname(included_from), included_path) if not _IsFileOrDirWithFile(path): path = os.path.join(basepath, included_path) if not _IsFileOrDirWithFile(path): path = included_path if not _IsFileOrDirWithFile(path): return '' if os.path.isfile(path): return os.path.normcase(os.path.abspath(path)) return os.path.normcase(os.path.abspath(os.path.join(path, 'include.yaml'))) def _IsFileOrDirWithFile(path): """Determine if a path is a file or a directory with an appropriate file.""" return os.path.isfile(path) or ( os.path.isdir(path) and os.path.isfile(os.path.join(path, 'include.yaml')))
apache-2.0
SaM-Solutions/samba
source4/scripting/python/samba/netcmd/drs.py
19
19605
#!/usr/bin/env python # # implement samba_tool drs commands # # Copyright Andrew Tridgell 2010 # # based on C implementation by Kamen Mazdrashki <kamen.mazdrashki@postpath.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import samba.getopt as options import ldb from samba.auth import system_session from samba.netcmd import ( Command, CommandError, Option, SuperCommand, ) from samba.samdb import SamDB from samba import drs_utils, nttime2string, dsdb from samba.dcerpc import drsuapi, misc import common def drsuapi_connect(ctx): '''make a DRSUAPI connection to the server''' binding_options = "seal" if ctx.lp.get("log level") >= 5: binding_options += ",print" binding_string = "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options) try: ctx.drsuapi = drsuapi.drsuapi(binding_string, ctx.lp, ctx.creds) (ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drs_DsBind(ctx.drsuapi) except Exception, e: raise CommandError("DRS connection to %s failed" % ctx.server, e) def samdb_connect(ctx): '''make a ldap connection to the server''' try: ctx.samdb = SamDB(url="ldap://%s" % ctx.server, session_info=system_session(), credentials=ctx.creds, lp=ctx.lp) except Exception, e: raise CommandError("LDAP connection to %s failed" % ctx.server, e) def drs_errmsg(werr): '''return "was successful" or an error string''' (ecode, estring) = werr if ecode == 0: return "was successful" return "failed, result %u (%s)" % (ecode, estring) def attr_default(msg, attrname, default): '''get an attribute from a ldap msg with a default''' if attrname in msg: return msg[attrname][0] return default def drs_parse_ntds_dn(ntds_dn): '''parse a NTDS DN returning a site and server''' a = ntds_dn.split(',') if a[0] != "CN=NTDS Settings" or a[2] != "CN=Servers" or a[4] != 'CN=Sites': raise RuntimeError("bad NTDS DN %s" % ntds_dn) server = a[1].split('=')[1] site = a[3].split('=')[1] return (site, server) def get_dsServiceName(samdb): '''get the NTDS DN from the rootDSE''' res = samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"]) return res[0]["dsServiceName"][0] class cmd_drs_showrepl(Command): """show replication status""" synopsis = "%prog drs showrepl <DC>" takes_optiongroups = { "sambaopts": options.SambaOptions, "versionopts": options.VersionOptions, "credopts": options.CredentialsOptions, } takes_args = ["DC?"] def print_neighbour(self, n): '''print one set of neighbour information''' self.message("%s" % n.naming_context_dn) try: (site, server) = drs_parse_ntds_dn(n.source_dsa_obj_dn) self.message("\t%s\%s via RPC" % (site, server)) except RuntimeError: self.message("\tNTDS DN: %s" % n.source_dsa_obj_dn) self.message("\t\tDSA object GUID: %s" % n.source_dsa_obj_guid) self.message("\t\tLast attempt @ %s %s" % (nttime2string(n.last_attempt), drs_errmsg(n.result_last_attempt))) self.message("\t\t%u consecutive failure(s)." % n.consecutive_sync_failures) self.message("\t\tLast success @ %s" % nttime2string(n.last_success)) self.message("") def drsuapi_ReplicaInfo(ctx, info_type): '''call a DsReplicaInfo''' req1 = drsuapi.DsReplicaGetInfoRequest1() req1.info_type = info_type try: (info_type, info) = ctx.drsuapi.DsReplicaGetInfo(ctx.drsuapi_handle, 1, req1) except Exception, e: raise CommandError("DsReplicaGetInfo of type %u failed" % info_type, e) return (info_type, info) def run(self, DC=None, sambaopts=None, credopts=None, versionopts=None, server=None): self.lp = sambaopts.get_loadparm() if DC is None: DC = common.netcmd_dnsname(self.lp) self.server = DC self.creds = credopts.get_credentials(self.lp, fallback_machine=True) drsuapi_connect(self) samdb_connect(self) # show domain information ntds_dn = get_dsServiceName(self.samdb) server_dns = self.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dnsHostName"])[0]['dnsHostName'][0] (site, server) = drs_parse_ntds_dn(ntds_dn) try: ntds = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=['options', 'objectGUID', 'invocationId']) except Exception, e: raise CommandError("Failed to search NTDS DN %s" % ntds_dn) conn = self.samdb.search(base=ntds_dn, expression="(objectClass=nTDSConnection)") self.message("%s\\%s" % (site, server)) self.message("DSA Options: 0x%08x" % int(attr_default(ntds[0], "options", 0))) self.message("DSA object GUID: %s" % self.samdb.schema_format_value("objectGUID", ntds[0]["objectGUID"][0])) self.message("DSA invocationId: %s\n" % self.samdb.schema_format_value("objectGUID", ntds[0]["invocationId"][0])) self.message("==== INBOUND NEIGHBORS ====\n") (info_type, info) = self.drsuapi_ReplicaInfo(drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS) for n in info.array: self.print_neighbour(n) self.message("==== OUTBOUND NEIGHBORS ====\n") (info_type, info) = self.drsuapi_ReplicaInfo(drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO) for n in info.array: self.print_neighbour(n) reasons = ['NTDSCONN_KCC_GC_TOPOLOGY', 'NTDSCONN_KCC_RING_TOPOLOGY', 'NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY', 'NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY', 'NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY', 'NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY', 'NTDSCONN_KCC_INTERSITE_TOPOLOGY', 'NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY', 'NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY', 'NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY'] self.message("==== KCC CONNECTION OBJECTS ====\n") for c in conn: self.message("Connection --") self.message("\tConnection name: %s" % c['name'][0]) self.message("\tEnabled : %s" % attr_default(c, 'enabledConnection', 'TRUE')) self.message("\tServer DNS name : %s" % server_dns) self.message("\tServer DN name : %s" % c['fromServer'][0]) self.message("\t\tTransportType: RPC") self.message("\t\toptions: 0x%08X" % int(attr_default(c, 'options', 0))) if not 'mS-DS-ReplicatesNCReason' in c: self.message("Warning: No NC replicated for Connection!") continue for r in c['mS-DS-ReplicatesNCReason']: a = str(r).split(':') self.message("\t\tReplicatesNC: %s" % a[3]) self.message("\t\tReason: 0x%08x" % int(a[2])) for s in reasons: if getattr(dsdb, s, 0) & int(a[2]): self.message("\t\t\t%s" % s) class cmd_drs_kcc(Command): """trigger knowledge consistency center run""" synopsis = "%prog drs kcc <DC>" takes_optiongroups = { "sambaopts": options.SambaOptions, "versionopts": options.VersionOptions, "credopts": options.CredentialsOptions, } takes_args = ["DC?"] def run(self, DC=None, sambaopts=None, credopts=None, versionopts=None, server=None): self.lp = sambaopts.get_loadparm() if DC is None: DC = common.netcmd_dnsname(self.lp) self.server = DC self.creds = credopts.get_credentials(self.lp, fallback_machine=True) drsuapi_connect(self) req1 = drsuapi.DsExecuteKCC1() try: self.drsuapi.DsExecuteKCC(self.drsuapi_handle, 1, req1) except Exception, e: raise CommandError("DsExecuteKCC failed", e) self.message("Consistency check on %s successful." % DC) class cmd_drs_replicate(Command): """replicate a naming context between two DCs""" synopsis = "%prog drs replicate <DEST_DC> <SOURCE_DC> <NC>" takes_optiongroups = { "sambaopts": options.SambaOptions, "versionopts": options.VersionOptions, "credopts": options.CredentialsOptions, } takes_args = ["DEST_DC", "SOURCE_DC", "NC"] takes_options = [ Option("--add-ref", help="use ADD_REF to add to repsTo on source", action="store_true"), Option("--sync-forced", help="use SYNC_FORCED to force inbound replication", action="store_true"), ] def run(self, DEST_DC, SOURCE_DC, NC, add_ref=False, sync_forced=False, sambaopts=None, credopts=None, versionopts=None, server=None): self.server = DEST_DC self.lp = sambaopts.get_loadparm() self.creds = credopts.get_credentials(self.lp, fallback_machine=True) drsuapi_connect(self) samdb_connect(self) # we need to find the NTDS GUID of the source DC msg = self.samdb.search(base=self.samdb.get_config_basedn(), expression="(&(objectCategory=server)(|(name=%s)(dNSHostName=%s)))" % (SOURCE_DC, SOURCE_DC), attrs=[]) if len(msg) == 0: raise CommandError("Failed to find source DC %s" % SOURCE_DC) server_dn = msg[0]['dn'] msg = self.samdb.search(base=server_dn, scope=ldb.SCOPE_ONELEVEL, expression="(|(objectCategory=nTDSDSA)(objectCategory=nTDSDSARO))", attrs=['objectGUID', 'options']) if len(msg) == 0: raise CommandError("Failed to find source NTDS DN %s" % SOURCE_DC) source_dsa_guid = msg[0]['objectGUID'][0] options = int(attr_default(msg, 'options', 0)) nc = drsuapi.DsReplicaObjectIdentifier() nc.dn = NC req1 = drsuapi.DsReplicaSyncRequest1() req1.naming_context = nc; req1.options = 0 if not (options & dsdb.DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL): req1.options |= drsuapi.DRSUAPI_DRS_WRIT_REP if add_ref: req1.options |= drsuapi.DRSUAPI_DRS_ADD_REF if sync_forced: req1.options |= drsuapi.DRSUAPI_DRS_SYNC_FORCED req1.source_dsa_guid = misc.GUID(source_dsa_guid) try: self.drsuapi.DsReplicaSync(self.drsuapi_handle, 1, req1) except Exception, estr: raise CommandError("DsReplicaSync failed", estr) self.message("Replicate from %s to %s was successful." % (SOURCE_DC, DEST_DC)) class cmd_drs_bind(Command): """show DRS capabilities of a server""" synopsis = "%prog drs bind <DC>" takes_optiongroups = { "sambaopts": options.SambaOptions, "versionopts": options.VersionOptions, "credopts": options.CredentialsOptions, } takes_args = ["DC?"] def run(self, DC=None, sambaopts=None, credopts=None, versionopts=None, server=None): self.lp = sambaopts.get_loadparm() if DC is None: DC = common.netcmd_dnsname(self.lp) self.server = DC self.creds = credopts.get_credentials(self.lp, fallback_machine=True) drsuapi_connect(self) samdb_connect(self) bind_info = drsuapi.DsBindInfoCtr() bind_info.length = 28 bind_info.info = drsuapi.DsBindInfo28() (info, handle) = self.drsuapi.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info) optmap = [ ("DRSUAPI_SUPPORTED_EXTENSION_BASE" , "DRS_EXT_BASE"), ("DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION" , "DRS_EXT_ASYNCREPL"), ("DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI" , "DRS_EXT_REMOVEAPI"), ("DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2" , "DRS_EXT_MOVEREQ_V2"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS" , "DRS_EXT_GETCHG_DEFLATE"), ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1" , "DRS_EXT_DCINFO_V1"), ("DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION" , "DRS_EXT_RESTORE_USN_OPTIMIZATION"), ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY" , "DRS_EXT_ADDENTRY"), ("DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE" , "DRS_EXT_KCC_EXECUTE"), ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2" , "DRS_EXT_ADDENTRY_V2"), ("DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION" , "DRS_EXT_LINKED_VALUE_REPLICATION"), ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2" , "DRS_EXT_DCINFO_V2"), ("DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD","DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD"), ("DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND" , "DRS_EXT_CRYPTO_BIND"), ("DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO" , "DRS_EXT_GET_REPL_INFO"), ("DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION" , "DRS_EXT_STRONG_ENCRYPTION"), ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01" , "DRS_EXT_DCINFO_VFFFFFFFF"), ("DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP" , "DRS_EXT_TRANSITIVE_MEMBERSHIP"), ("DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY" , "DRS_EXT_ADD_SID_HISTORY"), ("DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3" , "DRS_EXT_POST_BETA3"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5" , "DRS_EXT_GETCHGREQ_V5"), ("DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2" , "DRS_EXT_GETMEMBERSHIPS2"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6" , "DRS_EXT_GETCHGREQ_V6"), ("DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS" , "DRS_EXT_NONDOMAIN_NCS"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8" , "DRS_EXT_GETCHGREQ_V8"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5" , "DRS_EXT_GETCHGREPLY_V5"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6" , "DRS_EXT_GETCHGREPLY_V6"), ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3" , "DRS_EXT_WHISTLER_BETA3"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7" , "DRS_EXT_WHISTLER_BETA3"), ("DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT" , "DRS_EXT_WHISTLER_BETA3"), ("DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS" , "DRS_EXT_W2K3_DEFLATE"), ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10" , "DRS_EXT_GETCHGREQ_V10"), ("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART2" , "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2"), ("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART3" , "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3") ] optmap_ext = [ ("DRSUAPI_SUPPORTED_EXTENSION_ADAM", "DRS_EXT_ADAM"), ("DRSUAPI_SUPPORTED_EXTENSION_LH_BETA2", "DRS_EXT_LH_BETA2"), ("DRSUAPI_SUPPORTED_EXTENSION_RECYCLE_BIN", "DRS_EXT_RECYCLE_BIN")] self.message("Bind to %s succeeded." % DC) self.message("Extensions supported:") for (opt, str) in optmap: optval = getattr(drsuapi, opt, 0) if info.info.supported_extensions & optval: yesno = "Yes" else: yesno = "No " self.message(" %-60s: %s (%s)" % (opt, yesno, str)) if isinstance(info.info, drsuapi.DsBindInfo48): self.message("\nExtended Extensions supported:") for (opt, str) in optmap_ext: optval = getattr(drsuapi, opt, 0) if info.info.supported_extensions_ext & optval: yesno = "Yes" else: yesno = "No " self.message(" %-60s: %s (%s)" % (opt, yesno, str)) self.message("\nSite GUID: %s" % info.info.site_guid) self.message("Repl epoch: %u" % info.info.repl_epoch) if isinstance(info.info, drsuapi.DsBindInfo48): self.message("Forest GUID: %s" % info.info.config_dn_guid) class cmd_drs_options(Command): """query or change 'options' for NTDS Settings object of a domain controller""" synopsis = ("%prog drs options <DC>" " [--dsa-option={+|-}IS_GC | {+|-}DISABLE_INBOUND_REPL" " |{+|-}DISABLE_OUTBOUND_REPL | {+|-}DISABLE_NTDSCONN_XLATE]") takes_optiongroups = { "sambaopts": options.SambaOptions, "versionopts": options.VersionOptions, "credopts": options.CredentialsOptions, } takes_args = ["DC"] takes_options = [ Option("--dsa-option", help="DSA option to enable/disable", type="str"), ] option_map = {"IS_GC": 0x00000001, "DISABLE_INBOUND_REPL": 0x00000002, "DISABLE_OUTBOUND_REPL": 0x00000004, "DISABLE_NTDSCONN_XLATE": 0x00000008} def run(self, DC, dsa_option=None, sambaopts=None, credopts=None, versionopts=None): self.lp = sambaopts.get_loadparm() if DC is None: DC = common.netcmd_dnsname(self.lp) self.server = DC self.creds = credopts.get_credentials(self.lp, fallback_machine=True) samdb_connect(self) ntds_dn = get_dsServiceName(self.samdb) res = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=["options"]) dsa_opts = int(res[0]["options"][0]) # print out current DSA options cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts] self.message("Current DSA options: " + ", ".join(cur_opts)) # modify options if dsa_option: if dsa_option[:1] not in ("+", "-"): raise CommandError("Unknown option %s" % dsa_option) flag = dsa_option[1:] if flag not in self.option_map.keys(): raise CommandError("Unknown option %s" % dsa_option) if dsa_option[:1] == "+": dsa_opts |= self.option_map[flag] else: dsa_opts &= ~self.option_map[flag] #save new options m = ldb.Message() m.dn = ldb.Dn(self.samdb, ntds_dn) m["options"]= ldb.MessageElement(str(dsa_opts), ldb.FLAG_MOD_REPLACE, "options") self.samdb.modify(m) # print out new DSA options cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts] self.message("New DSA options: " + ", ".join(cur_opts)) class cmd_drs(SuperCommand): """DRS commands""" subcommands = {} subcommands["bind"] = cmd_drs_bind() subcommands["kcc"] = cmd_drs_kcc() subcommands["replicate"] = cmd_drs_replicate() subcommands["showrepl"] = cmd_drs_showrepl() subcommands["options"] = cmd_drs_options()
gpl-3.0
chrisfranzen/django
tests/regressiontests/introspection/tests.py
44
7272
from __future__ import absolute_import, unicode_literals from functools import update_wrapper from django.db import connection from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature from django.utils import six, unittest from .models import Reporter, Article if connection.vendor == 'oracle': expectedFailureOnOracle = unittest.expectedFailure else: expectedFailureOnOracle = lambda f: f # The introspection module is optional, so methods tested here might raise # NotImplementedError. This is perfectly acceptable behavior for the backend # in question, but the tests need to handle this without failing. Ideally we'd # skip these tests, but until #4788 is done we'll just ignore them. # # The easiest way to accomplish this is to decorate every test case with a # wrapper that ignores the exception. # # The metaclass is just for fun. def ignore_not_implemented(func): def _inner(*args, **kwargs): try: return func(*args, **kwargs) except NotImplementedError: return None update_wrapper(_inner, func) return _inner class IgnoreNotimplementedError(type): def __new__(cls, name, bases, attrs): for k, v in attrs.items(): if k.startswith('test'): attrs[k] = ignore_not_implemented(v) return type.__new__(cls, name, bases, attrs) class IntrospectionTests(six.with_metaclass(IgnoreNotimplementedError, TestCase)): def test_table_names(self): tl = connection.introspection.table_names() self.assertEqual(tl, sorted(tl)) self.assertTrue(Reporter._meta.db_table in tl, "'%s' isn't in table_list()." % Reporter._meta.db_table) self.assertTrue(Article._meta.db_table in tl, "'%s' isn't in table_list()." % Article._meta.db_table) def test_django_table_names(self): cursor = connection.cursor() cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names() cursor.execute("DROP TABLE django_ixn_test_table;") self.assertTrue('django_ixn_testcase_table' not in tl, "django_table_names() returned a non-Django table") def test_django_table_names_retval_type(self): # Ticket #15216 cursor = connection.cursor() cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);') tl = connection.introspection.django_table_names(only_existing=True) self.assertIs(type(tl), list) tl = connection.introspection.django_table_names(only_existing=False) self.assertIs(type(tl), list) def test_installed_models(self): tables = [Article._meta.db_table, Reporter._meta.db_table] models = connection.introspection.installed_models(tables) self.assertEqual(models, set([Article, Reporter])) def test_sequence_list(self): sequences = connection.introspection.sequence_list() expected = {'table': Reporter._meta.db_table, 'column': 'id'} self.assertTrue(expected in sequences, 'Reporter sequence not found in sequence_list()') def test_get_table_description_names(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual([r[0] for r in desc], [f.column for f in Reporter._meta.fields]) def test_get_table_description_types(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [datatype(r[1], r) for r in desc], ['IntegerField', 'CharField', 'CharField', 'CharField', 'BigIntegerField'] ) # The following test fails on Oracle due to #17202 (can't correctly # inspect the length of character columns). @expectedFailureOnOracle def test_get_table_description_col_lengths(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[3] for r in desc if datatype(r[1], r) == 'CharField'], [30, 30, 75] ) # Oracle forces null=True under the hood in some cases (see # https://docs.djangoproject.com/en/dev/ref/databases/#null-and-empty-strings) # so its idea about null_ok in cursor.description is different from ours. @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_get_table_description_nullable(self): cursor = connection.cursor() desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table) self.assertEqual( [r[6] for r in desc], [False, False, False, False, True] ) # Regression test for #9991 - 'real' types in postgres @skipUnlessDBFeature('has_real_datatype') def test_postgresql_real_type(self): cursor = connection.cursor() cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);") desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table') cursor.execute('DROP TABLE django_ixn_real_test_table;') self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField') def test_get_relations(self): cursor = connection.cursor() relations = connection.introspection.get_relations(cursor, Article._meta.db_table) # Older versions of MySQL don't have the chops to report on this stuff, # so just skip it if no relations come back. If they do, though, we # should test that the response is correct. if relations: # That's {field_index: (field_index_other_table, other_table)} self.assertEqual(relations, {3: (0, Reporter._meta.db_table)}) def test_get_key_columns(self): cursor = connection.cursor() key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table) self.assertEqual(key_columns, [('reporter_id', Reporter._meta.db_table, 'id')]) def test_get_primary_key_column(self): cursor = connection.cursor() primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table) self.assertEqual(primary_key_column, 'id') def test_get_indexes(self): cursor = connection.cursor() indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table) self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False}) def test_get_indexes_multicol(self): """ Test that multicolumn indexes are not included in the introspection results. """ cursor = connection.cursor() indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table) self.assertNotIn('first_name', indexes) self.assertIn('id', indexes) def datatype(dbtype, description): """Helper to convert a data type into a string.""" dt = connection.introspection.get_field_type(dbtype, description) if type(dt) is tuple: return dt[0] else: return dt
bsd-3-clause
MediffRobotics/DeepRobotics
rpiRobotArm/InverseKine/robotics-toolbox-python1/trunk/robotics-toolbox-python/test/robot/jacobian.py
5
3119
""" Jacobian matrix operations. @author: Peter Corke @copyright: Peter Corke """ from numpy import * from robot.utility import * from robot.transform import * from robot.kinematics import * from numpy.linalg import norm def jacob0(robot, q): """ Compute manipulator Jacobian in world coordinates for joint coordinates C{q}. The manipulator Jacobian matrix maps differential changes in joint space to differential Cartesian motion (world coord frame) of the end-effector M{dX = J dQ} @type robot: Robot @type q: vector M{n x 1} @param q: joint coordinate @rtype: matrix M{6 x n} @return: Manipulator Jacobian @see: L{jacobn}, L{diff2tr}, L{tr2diff} """ q = mat(q) Jn = jacobn(robot, q) # Jacobian from joint to wrist space # convert to Jacobian in base coordinates Tn = fkine(robot,q) # end-effector transformation R = t2r(Tn) return concatenate( ( concatenate( (R,zeros((3,3))) ,1) , concatenate( (zeros((3,3)),R) ,1) ))*Jn def jacobn(robot, q): """ Compute manipulator Jacobian in tool coordinates for joint coordinates C{q}. The manipulator Jacobian matrix maps differential changes in joint space to differential Cartesian motion (tool coord frame) of the end-effector. M{dX = J dQ} Reference ========= Paul, Shimano, Mayer Differential Kinematic Control Equations for Simple Manipulators IEEE SMC 11(6) 1981 pp. 456-460 @type robot: Robot @type q: vector M{n x 1} @param q: joint coordinate @rtype: matrix M{6 x n} @return: Manipulator Jacobian @see: L{jacobn}, L{diff2tr}, L{tr2diff} """ q = arg2array(q) n = robot.n L = robot.links J = mat([[],[],[],[],[],[]]) U = robot.tool for j in range(n-1,-1,-1): if not robot.ismdh(): #standard DH convention U = L[j].tr(q[j])*U if L[j].sigma == 0: #revolute axis d = matrix([[-U[0,0]*U[1,3] + U[1,0]*U[0,3]],\ [-U[0,1]*U[1,3] + U[1,1]*U[0,3]],\ [-U[0,2]*U[1,3] + U[1,2]*U[0,3]]]) delta = U[2,0:3].T # nz oz az else: #prismatic axis d = U[2,0:3].T # nz oz az delta = zeros((3,1)) # 0 0 0 J = concatenate((concatenate((d,delta)),J),1) if robot.ismdh(): #modified DH convention U=L[j].tr(q[j])*U return J def tr2jac(t): """ Compute a Jacobian to map differentials motion between frames. The M{6x6} Jacobian matrix to map differentials (joint velocity) between frames related by the homogeneous transform C{t}. @rtype: matrix M{6 x 6} @return: Jacobian matrix @see: L{tr2diff}, L{diff2tr} """ t = mat(t) return concatenate(( concatenate((t[0:3,0].T, crossp(t[0:3,3],t[0:3,0]).T),1), concatenate((t[0:3,1].T, crossp(t[0:3,3],t[0:3,1]).T),1), concatenate((t[0:3,2].T, crossp(t[0:3,3],t[0:3,2]).T),1), concatenate((zeros((3,3)),t[0:3,0:3].T),1) ))
gpl-3.0
stephane-martin/salt-debian-packaging
salt-2016.3.3/salt/modules/smartos_vmadm.py
2
28353
# -*- coding: utf-8 -*- ''' Module for running vmadm command on SmartOS ''' from __future__ import absolute_import # Import Python libs import logging import json import os try: from shlex import quote as _quote_args # pylint: disable=E0611 except ImportError: from pipes import quote as _quote_args # Import Salt libs import salt.utils import salt.utils.decorators as decorators from salt.utils.odict import OrderedDict log = logging.getLogger(__name__) # Function aliases __func_alias__ = { 'list_vms': 'list' } # Define the module's virtual name __virtualname__ = 'vmadm' @decorators.memoize def _check_vmadm(): ''' Looks to see if vmadm is present on the system ''' return salt.utils.which('vmadm') def _check_zfs(): ''' Looks to see if zfs is present on the system ''' return salt.utils.which('zfs') def __virtual__(): ''' Provides vmadm on SmartOS ''' if salt.utils.is_smartos_globalzone() and _check_vmadm(): return __virtualname__ return ( False, '{0} module can only be loaded on SmartOS computed nodes'.format( __virtualname__ ) ) def _exit_status(retcode): ''' Translate exit status of vmadm ''' ret = {0: 'Successful completion.', 1: 'An error occurred.', 2: 'Usage error.'}[retcode] return ret def _create_update_from_file(mode='create', uuid=None, path=None): ''' Create vm from file ''' ret = {} vmadm = _check_vmadm() if not os.path.isfile(path) or path is None: ret['Error'] = 'File ({0}) does not exists!'.format(path) return ret # vmadm validate create|update [-f <filename>] cmd = '{vmadm} validate {mode} {brand} -f {path}'.format( vmadm=vmadm, mode=mode, brand=get(uuid)['brand'] if uuid is not None else '', path=path ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode) if 'stderr' in res: if res['stderr'][0] == '{': ret['Error'] = json.loads(res['stderr']) else: ret['Error'] = res['stderr'] return ret # vmadm create|update [-f <filename>] cmd = '{vmadm} {mode} {uuid} -f {path}'.format( vmadm=vmadm, mode=mode, uuid=uuid if uuid is not None else '', path=path ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode) if 'stderr' in res: if res['stderr'][0] == '{': ret['Error'] = json.loads(res['stderr']) else: ret['Error'] = res['stderr'] return ret else: if res['stderr'].startswith('Successfully created VM'): return res['stderr'][24:] return True def _create_update_from_cfg(mode='create', uuid=None, vmcfg=None): ''' Create vm from configuration ''' ret = {} vmadm = _check_vmadm() # write json file vmadm_json_file = __salt__['temp.file'](prefix='vmadm-') with salt.utils.fopen(vmadm_json_file, 'w') as vmadm_json: vmadm_json.write(json.dumps(vmcfg)) # vmadm validate create|update [-f <filename>] cmd = '{vmadm} validate {mode} {brand} -f {vmadm_json_file}'.format( vmadm=vmadm, mode=mode, brand=get(uuid)['brand'] if uuid is not None else '', vmadm_json_file=vmadm_json_file ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode) if 'stderr' in res: if res['stderr'][0] == '{': ret['Error'] = json.loads(res['stderr']) else: ret['Error'] = res['stderr'] return ret # vmadm create|update [-f <filename>] cmd = '{vmadm} {mode} {uuid} -f {vmadm_json_file}'.format( vmadm=vmadm, mode=mode, uuid=uuid if uuid is not None else '', vmadm_json_file=vmadm_json_file ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode) if 'stderr' in res: if res['stderr'][0] == '{': ret['Error'] = json.loads(res['stderr']) else: ret['Error'] = res['stderr'] return ret else: # cleanup json file (only when succesful to help troubleshooting) salt.utils.safe_rm(vmadm_json_file) # return uuid if res['stderr'].startswith('Successfully created VM'): return res['stderr'][24:] return True def start(vm, options=None, key='uuid'): ''' Start a vm vm : string vm to be started options : string optional additional options key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.start 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.start 186da9ab-7392-4f55-91a5-b8f1fe770543 'order=c,once=d cdrom=/path/to/image.iso,ide' salt '*' vmadm.start vm=nacl key=alias salt '*' vmadm.start vm=nina.example.org key=hostname ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm start <uuid> [option=value ...] cmd = '{vmadm} start {uuid} {options}'.format( vmadm=vmadm, uuid=vm, options=options if options else '' ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def stop(vm, force=False, key='uuid'): ''' Stop a vm vm : string vm to be stopped force : boolean force stop of vm if true key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.stop 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.stop 186da9ab-7392-4f55-91a5-b8f1fe770543 True salt '*' vmadm.stop vm=nacl key=alias salt '*' vmadm.stop vm=nina.example.org key=hostname ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm stop <uuid> [-F] cmd = '{vmadm} stop {force} {uuid}'.format( vmadm=vmadm, force='-F' if force else '', uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = _exit_status(retcode) return ret return True def reboot(vm, force=False, key='uuid'): ''' Reboot a vm vm : string vm to be rebooted force : boolean force reboot of vm if true key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.reboot 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.reboot 186da9ab-7392-4f55-91a5-b8f1fe770543 True salt '*' vmadm.reboot vm=nacl key=alias salt '*' vmadm.reboot vm=nina.example.org key=hostname ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm reboot <uuid> [-F] cmd = '{vmadm} reboot {force} {uuid}'.format( vmadm=vmadm, force='-F' if force else '', uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def list_vms(search=None, sort=None, order='uuid,type,ram,state,alias', keyed=True): ''' Return a list of VMs search : string vmadm filter property sort : string vmadm sort (-s) property order : string vmadm order (-o) property -- Default: uuid,type,ram,state,alias keyed : boolean specified if the output should be an array (False) or dict (True) For a dict the key is the first item from the order parameter. Note: If key is not unique last vm wins. CLI Example: .. code-block:: bash salt '*' vmadm.list salt '*' vmadm.list order=alias,ram,cpu_cap sort=-ram,-cpu_cap salt '*' vmadm.list search='type=KVM' ''' ret = {} vmadm = _check_vmadm() # vmadm list [-p] [-H] [-o field,...] [-s field,...] [field=value ...] cmd = '{vmadm} list -p -H {order} {sort} {search}'.format( vmadm=vmadm, order='-o {0}'.format(order) if order else '', sort='-s {0}'.format(sort) if sort else '', search=search if search else '' ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = OrderedDict() if keyed else [] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret fields = order.split(',') for vm in res['stdout'].splitlines(): vm_data = OrderedDict() vm = vm.split(':') if keyed: for field in fields: if fields.index(field) == 0: continue vm_data[field.strip()] = vm[fields.index(field)].strip() result[vm[0]] = vm_data else: if len(vm) > 1: for field in fields: vm_data[field.strip()] = vm[fields.index(field)].strip() else: vm_data = vm[0] result.append(vm_data) return result def lookup(search=None, order=None, one=False): ''' Return a list of VMs using lookup search : string vmadm filter property order : string vmadm order (-o) property -- Default: uuid,type,ram,state,alias one : boolean return only one result (vmadm's -1) CLI Example: .. code-block:: bash salt '*' vmadm.lookup search='state=running' salt '*' vmadm.lookup search='state=running' order=uuid,alias,hostname salt '*' vmadm.lookup search='alias=nacl' one=True ''' ret = {} vmadm = _check_vmadm() # vmadm lookup [-j|-1] [-o field,...] [field=value ...] cmd = '{vmadm} lookup {one} {order} {search}'.format( vmadm=vmadm, one='-1' if one else '-j', order='-o {0}'.format(order) if order else '', search=search if search else '' ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = [] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret if one: result = res['stdout'] else: for vm in json.loads(res['stdout']): result.append(vm) return result def sysrq(vm, action='nmi', key='uuid'): ''' Send non-maskable interrupt to vm or capture a screenshot vm : string vm to be targeted action : string nmi or screenshot -- Default: nmi key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.sysrq 186da9ab-7392-4f55-91a5-b8f1fe770543 nmi salt '*' vmadm.sysrq 186da9ab-7392-4f55-91a5-b8f1fe770543 screenshot salt '*' vmadm.sysrq nacl nmi key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret if action not in ['nmi', 'screenshot']: ret['Error'] = 'Action must be either nmi or screenshot' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm sysrq <uuid> <nmi|screenshot> cmd = '{vmadm} sysrq {uuid} {action}'.format( vmadm=vmadm, uuid=vm, action=action ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def delete(vm, key='uuid'): ''' Delete a vm vm : string vm to be deleted key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.delete 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.delete nacl key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm delete <uuid> cmd = '{vmadm} delete {uuid}'.format( vmadm=vmadm, uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def get(vm, key='uuid'): ''' Output the JSON object describing a VM vm : string vm to be targeted key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.get 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.get nacl key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm get <uuid> cmd = '{vmadm} get {uuid}'.format( vmadm=vmadm, uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return json.loads(res['stdout']) def info(vm, info_type='all', key='uuid'): ''' Lookup info on running kvm vm : string vm to be targeted info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc] info type to return key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc salt '*' vmadm.info nacl key=alias salt '*' vmadm.info nacl vnc key=alias ''' ret = {} vmadm = _check_vmadm() if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']: ret['Error'] = 'Requested info_type is not available' return ret if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm info <uuid> [type,...] cmd = '{vmadm} info {uuid} {type}'.format( vmadm=vmadm, uuid=vm, type=info_type ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return json.loads(res['stdout']) def create_snapshot(vm, name, key='uuid'): ''' Create snapshot of a vm vm : string vm to be targeted name : string snapshot name The snapname must be 64 characters or less and must only contain alphanumeric characters and characters in the set [-_.:%] to comply with ZFS restrictions. key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.create_snapshot 186da9ab-7392-4f55-91a5-b8f1fe770543 baseline salt '*' vmadm.create_snapshot nacl baseline key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm vmobj = get(vm) if 'datasets' in vmobj: ret['Error'] = 'VM cannot have datasets' return ret if vmobj['brand'] in ['kvm']: ret['Error'] = 'VM must be of type OS' return ret if vmobj['zone_state'] not in ['running']: # work around a vmadm bug ret['Error'] = 'VM must be running to take a snapshot' return ret # vmadm create-snapshot <uuid> <snapname> cmd = '{vmadm} create-snapshot {uuid} {snapshot}'.format( vmadm=vmadm, snapshot=name, uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def delete_snapshot(vm, name, key='uuid'): ''' Delete snapshot of a vm vm : string vm to be targeted name : string snapshot name The snapname must be 64 characters or less and must only contain alphanumeric characters and characters in the set [-_.:%] to comply with ZFS restrictions. key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.delete_snapshot 186da9ab-7392-4f55-91a5-b8f1fe770543 baseline salt '*' vmadm.delete_snapshot nacl baseline key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm vmobj = get(vm) if 'datasets' in vmobj: ret['Error'] = 'VM cannot have datasets' return ret if vmobj['brand'] in ['kvm']: ret['Error'] = 'VM must be of type OS' return ret # vmadm delete-snapshot <uuid> <snapname> cmd = '{vmadm} delete-snapshot {uuid} {snapshot}'.format( vmadm=vmadm, snapshot=name, uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def rollback_snapshot(vm, name, key='uuid'): ''' Rollback snapshot of a vm vm : string vm to be targeted name : string snapshot name The snapname must be 64 characters or less and must only contain alphanumeric characters and characters in the set [-_.:%] to comply with ZFS restrictions. key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.rollback_snapshot 186da9ab-7392-4f55-91a5-b8f1fe770543 baseline salt '*' vmadm.rollback_snapshot nacl baseline key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm vmobj = get(vm) if 'datasets' in vmobj: ret['Error'] = 'VM cannot have datasets' return ret if vmobj['brand'] in ['kvm']: ret['Error'] = 'VM must be of type OS' return ret # vmadm rollback-snapshot <uuid> <snapname> cmd = '{vmadm} rollback-snapshot {uuid} {snapshot}'.format( vmadm=vmadm, snapshot=name, uuid=vm ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def reprovision(vm, image, key='uuid'): ''' Reprovision a vm vm : string vm to be reprovisioned image : string uuid of new image key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.reprovision 186da9ab-7392-4f55-91a5-b8f1fe770543 c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 salt '*' vmadm.reprovision nacl c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 key=alias ''' ret = {} vmadm = _check_vmadm() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm if image not in __salt__['imgadm.list'](): ret['Error'] = 'Image ({0}) is not present on this host'.format(image) return ret # vmadm reprovision <uuid> [-f <filename>] cmd = 'echo {image} | {vmadm} reprovision {uuid}'.format( vmadm=vmadm, uuid=vm, image=_quote_args(json.dumps({'image_uuid': image})) ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def create(from_file=None, **kwargs): ''' Create a new vm from_file : string json file to create the vm from -- if present, all other options will be ignored kwargs : string|int|... options to set for the vm CLI Example: .. code-block:: bash salt '*' vmadm.create from_file=/tmp/new_vm.json salt '*' vmadm.create image_uuid='...' alias='...' nics='[{ "nic_tag": "admin", "ip": "198.51.100.123", ...}, {...}]' [...] ''' ret = {} # prepare vmcfg vmcfg = {} kwargs = salt.utils.clean_kwargs(**kwargs) for k, v in kwargs.iteritems(): vmcfg[k] = v if from_file: return _create_update_from_file('create', path=from_file) else: return _create_update_from_cfg('create', vmcfg=vmcfg) def update(vm, from_file=None, key='uuid', **kwargs): ''' Update a new vm vm : string vm to be updated from_file : string json file to update the vm with -- if present, all other options will be ignored key : string [uuid|alias|hostname] value type of 'vm' parameter kwargs : string|int|... options to update for the vm CLI Example: .. code-block:: bash salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 from_file=/tmp/new_vm.json salt '*' vmadm.update vm=nacl key=alias from_file=/tmp/new_vm.json salt '*' vmadm.update vm=186da9ab-7392-4f55-91a5-b8f1fe770543 max_physical_memory=1024 ''' ret = {} vmadm = _check_vmadm() # prepare vmcfg vmcfg = {} kwargs = salt.utils.clean_kwargs(**kwargs) for k, v in kwargs.iteritems(): vmcfg[k] = v if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret uuid = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in uuid: return uuid if from_file: return _create_update_from_file('update', uuid, path=from_file) else: return _create_update_from_cfg('update', uuid, vmcfg=vmcfg) def send(vm, target, key='uuid'): ''' Send a vm to a directory vm : string vm to be sent target : string target directory key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.send 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups salt '*' vmadm.send vm=nacl target=/opt/backups key=alias ''' ret = {} vmadm = _check_vmadm() zfs = _check_zfs() if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret if not os.path.isdir(target): ret['Error'] = 'Target must be a directory or host' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm send <uuid> [target] cmd = '{vmadm} send {uuid} > {target}'.format( vmadm=vmadm, uuid=vm, target=os.path.join(target, '{0}.vmdata'.format(vm)) ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret vmobj = get(vm) if 'datasets' not in vmobj: return True log.warning('one or more datasets detected, this is not supported!') log.warning('trying to zfs send datasets...') for dataset in vmobj['datasets']: name = dataset.split('/') name = name[-1] cmd = '{zfs} send {dataset} > {target}'.format( zfs=zfs, dataset=dataset, target=os.path.join(target, '{0}-{1}.zfsds'.format(vm, name)) ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True def receive(uuid, source): ''' Receive a vm from a directory uuid : string uuid of vm to be received source : string source directory CLI Example: .. code-block:: bash salt '*' vmadm.receive 186da9ab-7392-4f55-91a5-b8f1fe770543 /opt/backups ''' ret = {} vmadm = _check_vmadm() zfs = _check_zfs() if not os.path.isdir(source): ret['Error'] = 'Source must be a directory or host' return ret if not os.path.exists(os.path.join(source, '{0}.vmdata'.format(uuid))): ret['Error'] = 'Unknow vm with uuid in {0}'.format(source) return ret # vmadm receive cmd = '{vmadm} receive < {source}'.format( vmadm=vmadm, source=os.path.join(source, '{0}.vmdata'.format(uuid)) ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0 and not res['stderr'].endswith('datasets'): ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret vmobj = get(uuid) if 'datasets' not in vmobj: return True log.warning('one or more datasets detected, this is not supported!') log.warning('trying to restore datasets, mountpoints will need to be set again...') for dataset in vmobj['datasets']: name = dataset.split('/') name = name[-1] cmd = '{zfs} receive {dataset} < {source}'.format( zfs=zfs, dataset=dataset, source=os.path.join(source, '{0}-{1}.zfsds'.format(uuid, name)) ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret cmd = '{vmadm} install {uuid}'.format( vmadm=vmadm, uuid=uuid ) res = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = res['retcode'] if retcode != 0 and not res['stderr'].endswith('datasets'): ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return True # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
apache-2.0
Yen-Chung-En/w16b_test
static/Brython3.1.3-20150514-095342/Lib/subprocess.py
728
67282
# subprocess - Subprocesses with accessible I/O streams # # For more information about this module, see PEP 324. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license for licensing details. r"""subprocess - Subprocesses with accessible I/O streams This module allows you to spawn processes, connect to their input/output/error pipes, and obtain their return codes. This module intends to replace several other, older modules and functions, like: os.system os.spawn* Information about how the subprocess module can be used to replace these modules and functions can be found below. Using the subprocess module =========================== This module defines one class called Popen: class Popen(args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=()): Arguments are: args should be a string, or a sequence of program arguments. The program to execute is normally the first item in the args sequence or string, but can be explicitly set by using the executable argument. On POSIX, with shell=False (default): In this case, the Popen class uses os.execvp() to execute the child program. args should normally be a sequence. A string will be treated as a sequence with the string as the only item (the program to execute). On POSIX, with shell=True: If args is a string, it specifies the command string to execute through the shell. If args is a sequence, the first item specifies the command string, and any additional items will be treated as additional shell arguments. On Windows: the Popen class uses CreateProcess() to execute the child program, which operates on strings. If args is a sequence, it will be converted to a string using the list2cmdline method. Please note that not all MS Windows applications interpret the command line the same way: The list2cmdline is designed for applications using the same rules as the MS C runtime. bufsize will be supplied as the corresponding argument to the io.open() function when creating the stdin/stdout/stderr pipe file objects: 0 means unbuffered (read & write are one system call and can return short), 1 means line buffered, any other positive value means use a buffer of approximately that size. A negative bufsize, the default, means the system default of io.DEFAULT_BUFFER_SIZE will be used. stdin, stdout and stderr specify the executed programs' standard input, standard output and standard error file handles, respectively. Valid values are PIPE, an existing file descriptor (a positive integer), an existing file object, and None. PIPE indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. On POSIX, if preexec_fn is set to a callable object, this object will be called in the child process just before the child is executed. The use of preexec_fn is not thread safe, using it in the presence of threads could lead to a deadlock in the child process before the new executable is executed. If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. The default for close_fds varies by platform: Always true on POSIX. True when stdin/stdout/stderr are None on Windows, false otherwise. pass_fds is an optional sequence of file descriptors to keep open between the parent and child. Providing any pass_fds implicitly sets close_fds to true. if shell is true, the specified command will be executed through the shell. If cwd is not None, the current directory will be changed to cwd before the child is executed. On POSIX, if restore_signals is True all signals that Python sets to SIG_IGN are restored to SIG_DFL in the child process before the exec. Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This parameter does nothing on Windows. On POSIX, if start_new_session is True, the setsid() system call will be made in the child process prior to executing the command. If env is not None, it defines the environment variables for the new process. If universal_newlines is false, the file objects stdin, stdout and stderr are opened as binary files, and no line ending conversion is done. If universal_newlines is true, the file objects stdout and stderr are opened as a text files, but lines may be terminated by any of '\n', the Unix end-of-line convention, '\r', the old Macintosh convention or '\r\n', the Windows convention. All of these external representations are seen as '\n' by the Python program. Also, the newlines attribute of the file objects stdout, stdin and stderr are not updated by the communicate() method. The startupinfo and creationflags, if given, will be passed to the underlying CreateProcess() function. They can specify things such as appearance of the main window and priority for the new process. (Windows only) This module also defines some shortcut functions: call(*popenargs, **kwargs): Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: >>> retcode = subprocess.call(["ls", "-l"]) check_call(*popenargs, **kwargs): Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: >>> subprocess.check_call(["ls", "-l"]) 0 getstatusoutput(cmd): Return (status, output) of executing cmd in a shell. Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple (status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the returned output will contain output or error messages. A trailing newline is stripped from the output. The exit status for the command can be interpreted according to the rules for the C function wait(). Example: >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file or directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: not found') getoutput(cmd): Return output (stdout or stderr) of executing cmd in a shell. Like getstatusoutput(), except the exit status is ignored and the return value is a string containing the command's output. Example: >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' check_output(*popenargs, **kwargs): Run command with arguments and return its output. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> output = subprocess.check_output(["ls", "-l", "/dev/null"]) Exceptions ---------- Exceptions raised in the child process, before the new program has started to execute, will be re-raised in the parent. Additionally, the exception object will have one extra attribute called 'child_traceback', which is a string containing traceback information from the child's point of view. The most common exception raised is OSError. This occurs, for example, when trying to execute a non-existent file. Applications should prepare for OSErrors. A ValueError will be raised if Popen is called with invalid arguments. Exceptions defined within this module inherit from SubprocessError. check_call() and check_output() will raise CalledProcessError if the called process returns a non-zero return code. TimeoutExpired be raised if a timeout was specified and expired. Security -------- Unlike some other popen functions, this implementation will never call /bin/sh implicitly. This means that all characters, including shell metacharacters, can safely be passed to child processes. Popen objects ============= Instances of the Popen class have the following methods: poll() Check if child process has terminated. Returns returncode attribute. wait() Wait for child process to terminate. Returns returncode attribute. communicate(input=None) Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr). Note: The data read is buffered in memory, so do not use this method if the data size is large or unlimited. The following attributes are also available: stdin If the stdin argument is PIPE, this attribute is a file object that provides input to the child process. Otherwise, it is None. stdout If the stdout argument is PIPE, this attribute is a file object that provides output from the child process. Otherwise, it is None. stderr If the stderr argument is PIPE, this attribute is file object that provides error output from the child process. Otherwise, it is None. pid The process ID of the child process. returncode The child return code. A None value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (POSIX only). Replacing older functions with the subprocess module ==================================================== In this section, "a ==> b" means that b can be used as a replacement for a. Note: All functions in this section fail (more or less) silently if the executed program cannot be found; this module raises an OSError exception. In the following examples, we assume that the subprocess module is imported with "from subprocess import *". Replacing /bin/sh shell backquote --------------------------------- output=`mycmd myarg` ==> output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0] Replacing shell pipe line ------------------------- output=`dmesg | grep hda` ==> p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) output = p2.communicate()[0] Replacing os.system() --------------------- sts = os.system("mycmd" + " myarg") ==> p = Popen("mycmd" + " myarg", shell=True) pid, sts = os.waitpid(p.pid, 0) Note: * Calling the program through the shell is usually not required. * It's easier to look at the returncode attribute than the exitstatus. A more real-world example would look like this: try: retcode = call("mycmd" + " myarg", shell=True) if retcode < 0: print("Child was terminated by signal", -retcode, file=sys.stderr) else: print("Child returned", retcode, file=sys.stderr) except OSError as e: print("Execution failed:", e, file=sys.stderr) Replacing os.spawn* ------------------- P_NOWAIT example: pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg") ==> pid = Popen(["/bin/mycmd", "myarg"]).pid P_WAIT example: retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg") ==> retcode = call(["/bin/mycmd", "myarg"]) Vector example: os.spawnvp(os.P_NOWAIT, path, args) ==> Popen([path] + args[1:]) Environment example: os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env) ==> Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"}) """ import sys mswindows = (sys.platform == "win32") import io import os import time import traceback import gc import signal import builtins import warnings import errno try: from time import monotonic as _time except ImportError: from time import time as _time # Exception classes used by this module. class SubprocessError(Exception): pass class CalledProcessError(SubprocessError): """This exception is raised when a process run by check_call() or check_output() returns a non-zero exit status. The exit status will be stored in the returncode attribute; check_output() will also store the output in the output attribute. """ def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) class TimeoutExpired(SubprocessError): """This exception is raised when the timeout expires while waiting for a child process. """ def __init__(self, cmd, timeout, output=None): self.cmd = cmd self.timeout = timeout self.output = output def __str__(self): return ("Command '%s' timed out after %s seconds" % (self.cmd, self.timeout)) if mswindows: import threading import msvcrt import _winapi class STARTUPINFO: dwFlags = 0 hStdInput = None hStdOutput = None hStdError = None wShowWindow = 0 class pywintypes: error = IOError else: import select _has_poll = hasattr(select, 'poll') import _posixsubprocess _create_pipe = _posixsubprocess.cloexec_pipe # When select or poll has indicated that the file is writable, # we can write up to _PIPE_BUF bytes without risk of blocking. # POSIX defines PIPE_BUF as >= 512. _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", "getoutput", "check_output", "CalledProcessError", "DEVNULL"] if mswindows: from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE, SW_HIDE, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW) __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", "STD_ERROR_HANDLE", "SW_HIDE", "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"]) class Handle(int): closed = False def Close(self, CloseHandle=_winapi.CloseHandle): if not self.closed: self.closed = True CloseHandle(self) def Detach(self): if not self.closed: self.closed = True return int(self) raise ValueError("already closed") def __repr__(self): return "Handle(%d)" % int(self) __del__ = Close __str__ = __repr__ try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # This lists holds Popen instances for which the underlying process had not # exited at the time its __del__ method got called: those processes are wait()ed # for synchronously from _cleanup() when a new Popen object is created, to avoid # zombie processes. _active = [] def _cleanup(): for inst in _active[:]: res = inst._internal_poll(_deadstate=sys.maxsize) if res is not None: try: _active.remove(inst) except ValueError: # This can happen if two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. pass PIPE = -1 STDOUT = -2 DEVNULL = -3 def _eintr_retry_call(func, *args): while True: try: return func(*args) except InterruptedError: continue # XXX This function is only used by multiprocessing and the test suite, # but it's here so that it can be imported when Python is compiled without # threads. def _args_from_interpreter_flags(): """Return a list of command-line arguments reproducing the current settings in sys.flags and sys.warnoptions.""" flag_opt_map = { 'debug': 'd', # 'inspect': 'i', # 'interactive': 'i', 'optimize': 'O', 'dont_write_bytecode': 'B', 'no_user_site': 's', 'no_site': 'S', 'ignore_environment': 'E', 'verbose': 'v', 'bytes_warning': 'b', 'quiet': 'q', 'hash_randomization': 'R', } args = [] for flag, opt in flag_opt_map.items(): v = getattr(sys.flags, flag) if v > 0: args.append('-' + opt * v) for opt in sys.warnoptions: args.append('-W' + opt) return args def call(*popenargs, timeout=None, **kwargs): """Run command with arguments. Wait for command to complete or timeout, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) """ with Popen(*popenargs, **kwargs) as p: try: return p.wait(timeout=timeout) except: p.kill() p.wait() raise def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the call function. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd) return 0 def check_output(*popenargs, timeout=None, **kwargs): r"""Run command with arguments and return its output. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) b'ls: non_existent_file: No such file or directory\n' If universal_newlines=True is passed, the return value will be a string rather than bytes. """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') with Popen(*popenargs, stdout=PIPE, **kwargs) as process: try: output, unused_err = process.communicate(timeout=timeout) except TimeoutExpired: process.kill() output, unused_err = process.communicate() raise TimeoutExpired(process.args, timeout, output=output) except: process.kill() process.wait() raise retcode = process.poll() if retcode: raise CalledProcessError(retcode, process.args, output=output) return output def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules as the MS C runtime: 1) Arguments are delimited by white space, which is either a space or a tab. 2) A string surrounded by double quotation marks is interpreted as a single argument, regardless of white space contained within. A quoted string can be embedded in an argument. 3) A double quotation mark preceded by a backslash is interpreted as a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes is interpreted as a literal backslash. If the number of backslashes is odd, the last backslash escapes the next double quotation mark as described in rule 3. """ # See # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx # or search http://msdn.microsoft.com for # "Parsing C++ Command-Line Arguments" result = [] needquote = False for arg in seq: bs_buf = [] # Add a space to separate this argument from the others if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) or not arg if needquote: result.append('"') for c in arg: if c == '\\': # Don't know if we need to double yet. bs_buf.append(c) elif c == '"': # Double backslashes. result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') else: # Normal char if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, if any. if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) # Various tools for executing commands and looking at their output and status. # # NB This only works (and is only relevant) for POSIX. def getstatusoutput(cmd): """Return (status, output) of executing cmd in a shell. Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple (status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the returned output will contain output or error messages. A trailing newline is stripped from the output. The exit status for the command can be interpreted according to the rules for the C function wait(). Example: >>> import subprocess >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file or directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: not found') """ with os.popen('{ ' + cmd + '; } 2>&1', 'r') as pipe: try: text = pipe.read() sts = pipe.close() except: process = pipe._proc process.kill() process.wait() raise if sts is None: sts = 0 if text[-1:] == '\n': text = text[:-1] return sts, text def getoutput(cmd): """Return output (stdout or stderr) of executing cmd in a shell. Like getstatusoutput(), except the exit status is ignored and the return value is a string containing the command's output. Example: >>> import subprocess >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' """ return getstatusoutput(cmd)[1] _PLATFORM_DEFAULT_CLOSE_FDS = object() class Popen(object): def __init__(self, args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=()): """Create new Popen instance.""" _cleanup() self._child_created = False self._input = None self._communication_started = False if bufsize is None: bufsize = -1 # Restore default if not isinstance(bufsize, int): raise TypeError("bufsize must be an integer") if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows " "platforms") any_stdio_set = (stdin is not None or stdout is not None or stderr is not None) if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS: if any_stdio_set: close_fds = False else: close_fds = True elif close_fds and any_stdio_set: raise ValueError( "close_fds is not supported on Windows platforms" " if you redirect stdin/stdout/stderr") else: # POSIX if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS: close_fds = True if pass_fds and not close_fds: warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) close_fds = True if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows " "platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows " "platforms") self.args = args self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines # Input and output objects. The general principle is like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are -1 when not using PIPEs. The child objects are -1 # when not redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) # We wrap OS handles *before* launching the child, otherwise a # quickly terminating child could make our fds unwrappable # (see #8458). #fix me brython syntax error #if mswindows: # if p2cwrite != -1: # p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) # if c2pread != -1: # c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) # if errread != -1: # errread = msvcrt.open_osfhandle(errread.Detach(), 0) if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: self.stdin = io.TextIOWrapper(self.stdin, write_through=True) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: self.stdout = io.TextIOWrapper(self.stdout) if errread != -1: self.stderr = io.open(errread, 'rb', bufsize) if universal_newlines: self.stderr = io.TextIOWrapper(self.stderr) self._closed_child_pipe_fds = False try: self._execute_child(args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session) except: # Cleanup if the child failed starting. for f in filter(None, (self.stdin, self.stdout, self.stderr)): try: f.close() except EnvironmentError: pass # Ignore EBADF or other errors. if not self._closed_child_pipe_fds: to_close = [] if stdin == PIPE: to_close.append(p2cread) if stdout == PIPE: to_close.append(c2pwrite) if stderr == PIPE: to_close.append(errwrite) if hasattr(self, '_devnull'): to_close.append(self._devnull) for fd in to_close: try: os.close(fd) except EnvironmentError: pass raise def _translate_newlines(self, data, encoding): data = data.decode(encoding) return data.replace("\r\n", "\n").replace("\r", "\n") def __enter__(self): return self def __exit__(self, type, value, traceback): if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() if self.stdin: self.stdin.close() # Wait for the process to terminate, to avoid zombies. self.wait() def __del__(self, _maxsize=sys.maxsize, _active=_active): # If __init__ hasn't had a chance to execute (e.g. if it # was passed an undeclared keyword argument), we don't # have a _child_created attribute at all. if not getattr(self, '_child_created', False): # We didn't get to successfully create a child process. return # In case the child hasn't been waited on, check if it's done. self._internal_poll(_deadstate=_maxsize) if self.returncode is None and _active is not None: # Child is still running, keep us alive until we can wait on it. _active.append(self) def _get_devnull(self): if not hasattr(self, '_devnull'): self._devnull = os.open(os.devnull, os.O_RDWR) return self._devnull def communicate(self, input=None, timeout=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be bytes to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" if self._communication_started and input: raise ValueError("Cannot send input after starting communication") # Optimization: If we are not worried about timeouts, we haven't # started communicating, and we have one or zero pipes, using select() # or threads is unnecessary. if (timeout is None and not self._communication_started and [self.stdin, self.stdout, self.stderr].count(None) >= 2): stdout = None stderr = None if self.stdin: if input: try: self.stdin.write(input) except IOError as e: if e.errno != errno.EPIPE and e.errno != errno.EINVAL: raise self.stdin.close() elif self.stdout: stdout = _eintr_retry_call(self.stdout.read) self.stdout.close() elif self.stderr: stderr = _eintr_retry_call(self.stderr.read) self.stderr.close() self.wait() else: if timeout is not None: endtime = _time() + timeout else: endtime = None try: stdout, stderr = self._communicate(input, endtime, timeout) finally: self._communication_started = True sts = self.wait(timeout=self._remaining_time(endtime)) return (stdout, stderr) def poll(self): return self._internal_poll() def _remaining_time(self, endtime): """Convenience for _communicate when computing timeouts.""" if endtime is None: return None else: return endtime - _time() def _check_timeout(self, endtime, orig_timeout): """Convenience for checking if a timeout has expired.""" if endtime is None: return if _time() > endtime: raise TimeoutExpired(self.args, orig_timeout) if mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ if stdin is None and stdout is None and stderr is None: return (-1, -1, -1, -1, -1, -1) p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 if stdin is None: p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) if p2cread is None: p2cread, _ = _winapi.CreatePipe(None, 0) p2cread = Handle(p2cread) _winapi.CloseHandle(_) elif stdin == PIPE: p2cread, p2cwrite = _winapi.CreatePipe(None, 0) p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) elif stdin == DEVNULL: p2cread = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) if c2pwrite is None: _, c2pwrite = _winapi.CreatePipe(None, 0) c2pwrite = Handle(c2pwrite) _winapi.CloseHandle(_) elif stdout == PIPE: c2pread, c2pwrite = _winapi.CreatePipe(None, 0) c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) elif stdout == DEVNULL: c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) if errwrite is None: _, errwrite = _winapi.CreatePipe(None, 0) errwrite = Handle(errwrite) _winapi.CloseHandle(_) elif stderr == PIPE: errread, errwrite = _winapi.CreatePipe(None, 0) errread, errwrite = Handle(errread), Handle(errwrite) elif stderr == STDOUT: errwrite = c2pwrite elif stderr == DEVNULL: errwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" h = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, _winapi.GetCurrentProcess(), 0, 1, _winapi.DUPLICATE_SAME_ACCESS) return Handle(h) def _find_w9xpopen(self): """Find and return absolut path to w9xpopen.exe""" w9xpopen = os.path.join( os.path.dirname(_winapi.GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): # Eeek - file-not-found - possibly an embedding # situation - see if we can locate it in sys.exec_prefix w9xpopen = os.path.join(os.path.dirname(sys.base_exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is " "needed for Popen to work with your " "shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, unused_restore_signals, unused_start_new_session): """Execute program (MS Windows version)""" assert not pass_fds, "pass_fds not supported on Windows." if not isinstance(args, str): args = list2cmdline(args) # Process startup details if startupinfo is None: startupinfo = STARTUPINFO() if -1 not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW startupinfo.wShowWindow = _winapi.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = '{} /c "{}"'.format (comspec, args) if (_winapi.GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com"): # Win9x, or using command.com on NT. We need to # use the w9xpopen intermediate program. For more # information, see KB Q150956 # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp) w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) # Not passing CREATE_NEW_CONSOLE has been known to # cause random failures on win9x. Specifically a # dialog: "Your program accessed mem currently in # use at xxx" and a hopeful warning about the # stability of your system. Cost is Ctrl+C won't # kill children. creationflags |= _winapi.CREATE_NEW_CONSOLE # Start the process try: hp, ht, pid, tid = _winapi.CreateProcess(executable, args, # no special security None, None, int(not close_fds), creationflags, env, cwd, startupinfo) except pywintypes.error as e: # Translate pywintypes.error to WindowsError, which is # a subclass of OSError. FIXME: We should really # translate errno using _sys_errlist (or similar), but # how can this be done from Python? raise WindowsError(*e.args) finally: # Child is launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained in this process or else the # pipe will not close when the child process exits and the # ReadFile will hang. if p2cread != -1: p2cread.Close() if c2pwrite != -1: c2pwrite.Close() if errwrite != -1: errwrite.Close() if hasattr(self, '_devnull'): os.close(self._devnull) # Retain the process handle, but close the thread handle self._child_created = True self._handle = Handle(hp) self.pid = pid _winapi.CloseHandle(ht) def _internal_poll(self, _deadstate=None, _WaitForSingleObject=_winapi.WaitForSingleObject, _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, _GetExitCodeProcess=_winapi.GetExitCodeProcess): """Check if child process has terminated. Returns returncode attribute. This method is called by __del__, so it can only refer to objects in its local scope. """ if self.returncode is None: if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: self.returncode = _GetExitCodeProcess(self._handle) return self.returncode def wait(self, timeout=None, endtime=None): """Wait for child process to terminate. Returns returncode attribute.""" if endtime is not None: timeout = self._remaining_time(endtime) if timeout is None: timeout_millis = _winapi.INFINITE else: timeout_millis = int(timeout * 1000) if self.returncode is None: result = _winapi.WaitForSingleObject(self._handle, timeout_millis) if result == _winapi.WAIT_TIMEOUT: raise TimeoutExpired(self.args, timeout) self.returncode = _winapi.GetExitCodeProcess(self._handle) return self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) fh.close() def _communicate(self, input, endtime, orig_timeout): # Start reader threads feeding into a list hanging off of this # object, unless they've already been started. if self.stdout and not hasattr(self, "_stdout_buff"): self._stdout_buff = [] self.stdout_thread = \ threading.Thread(target=self._readerthread, args=(self.stdout, self._stdout_buff)) self.stdout_thread.daemon = True self.stdout_thread.start() if self.stderr and not hasattr(self, "_stderr_buff"): self._stderr_buff = [] self.stderr_thread = \ threading.Thread(target=self._readerthread, args=(self.stderr, self._stderr_buff)) self.stderr_thread.daemon = True self.stderr_thread.start() if self.stdin: if input is not None: try: self.stdin.write(input) except IOError as e: if e.errno != errno.EPIPE: raise self.stdin.close() # Wait for the reader threads, or time out. If we time out, the # threads remain reading and the fds left open in case the user # calls communicate again. if self.stdout is not None: self.stdout_thread.join(self._remaining_time(endtime)) if self.stdout_thread.is_alive(): raise TimeoutExpired(self.args, orig_timeout) if self.stderr is not None: self.stderr_thread.join(self._remaining_time(endtime)) if self.stderr_thread.is_alive(): raise TimeoutExpired(self.args, orig_timeout) # Collect the output from and close both pipes, now that we know # both have been read successfully. stdout = None stderr = None if self.stdout: stdout = self._stdout_buff self.stdout.close() if self.stderr: stderr = self._stderr_buff self.stderr.close() # All data exchanged. Translate lists into strings. if stdout is not None: stdout = stdout[0] if stderr is not None: stderr = stderr[0] return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ if sig == signal.SIGTERM: self.terminate() elif sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) elif sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) else: raise ValueError("Unsupported signal: {}".format(sig)) def terminate(self): """Terminates the process """ try: _winapi.TerminateProcess(self._handle, 1) except PermissionError: # ERROR_ACCESS_DENIED (winerror 5) is received when the # process already died. rc = _winapi.GetExitCodeProcess(self._handle) if rc == _winapi.STILL_ACTIVE: raise self.returncode = rc kill = terminate else: # # POSIX methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = _create_pipe() elif stdin == DEVNULL: p2cread = self._get_devnull() elif isinstance(stdin, int): p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = _create_pipe() elif stdout == DEVNULL: c2pwrite = self._get_devnull() elif isinstance(stdout, int): c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = _create_pipe() elif stderr == STDOUT: errwrite = c2pwrite elif stderr == DEVNULL: errwrite = self._get_devnull() elif isinstance(stderr, int): errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _close_fds(self, fds_to_keep): start_fd = 3 for fd in sorted(fds_to_keep): if fd >= start_fd: os.closerange(start_fd, fd) start_fd = fd + 1 if start_fd <= MAXFD: os.closerange(start_fd, MAXFD) def _execute_child(self, args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session): """Execute program (POSIX version)""" if isinstance(args, (str, bytes)): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable: args[0] = executable if executable is None: executable = args[0] orig_executable = executable # For transferring possible exec failure from child to parent. # Data format: "exception name:hex errno:description" # Pickle is not used; it is complex and involves memory allocation. errpipe_read, errpipe_write = _create_pipe() try: try: # We must avoid complex work that could involve # malloc or free in the child process to avoid # potential deadlocks, thus we do all this here. # and pass it to fork_exec() if env is not None: env_list = [os.fsencode(k) + b'=' + os.fsencode(v) for k, v in env.items()] else: env_list = None # Use execv instead of execve. executable = os.fsencode(executable) if os.path.dirname(executable): executable_list = (executable,) else: # This matches the behavior of os._execvpe(). executable_list = tuple( os.path.join(os.fsencode(dir), executable) for dir in os.get_exec_path(env)) fds_to_keep = set(pass_fds) fds_to_keep.add(errpipe_write) self.pid = _posixsubprocess.fork_exec( args, executable_list, close_fds, sorted(fds_to_keep), cwd, env_list, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, errpipe_read, errpipe_write, restore_signals, start_new_session, preexec_fn) self._child_created = True finally: # be sure the FD is closed no matter what os.close(errpipe_write) # self._devnull is not always defined. devnull_fd = getattr(self, '_devnull', None) if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd: os.close(p2cread) if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd: os.close(c2pwrite) if errwrite != -1 and errread != -1 and errwrite != devnull_fd: os.close(errwrite) if devnull_fd is not None: os.close(devnull_fd) # Prevent a double close of these fds from __init__ on error. self._closed_child_pipe_fds = True # Wait for exec to fail or succeed; possibly raising an # exception (limited in size) errpipe_data = bytearray() while True: part = _eintr_retry_call(os.read, errpipe_read, 50000) errpipe_data += part if not part or len(errpipe_data) > 50000: break finally: # be sure the FD is closed no matter what os.close(errpipe_read) if errpipe_data: try: _eintr_retry_call(os.waitpid, self.pid, 0) except OSError as e: if e.errno != errno.ECHILD: raise try: exception_name, hex_errno, err_msg = ( errpipe_data.split(b':', 2)) except ValueError: exception_name = b'RuntimeError' hex_errno = b'0' err_msg = (b'Bad exception data from child: ' + repr(errpipe_data)) child_exception_type = getattr( builtins, exception_name.decode('ascii'), RuntimeError) err_msg = err_msg.decode(errors="surrogatepass") if issubclass(child_exception_type, OSError) and hex_errno: errno_num = int(hex_errno, 16) child_exec_never_called = (err_msg == "noexec") if child_exec_never_called: err_msg = "" if errno_num != 0: err_msg = os.strerror(errno_num) if errno_num == errno.ENOENT: if child_exec_never_called: # The error must be from chdir(cwd). err_msg += ': ' + repr(cwd) else: err_msg += ': ' + repr(orig_executable) raise child_exception_type(errno_num, err_msg) raise child_exception_type(err_msg) def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED, _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED, _WEXITSTATUS=os.WEXITSTATUS): # This method is called (indirectly) by __del__, so it cannot # refer to anything outside of its local scope.""" if _WIFSIGNALED(sts): self.returncode = -_WTERMSIG(sts) elif _WIFEXITED(sts): self.returncode = _WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid, _WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD): """Check if child process has terminated. Returns returncode attribute. This method is called by __del__, so it cannot reference anything outside of the local scope (nor can any methods it calls). """ if self.returncode is None: try: pid, sts = _waitpid(self.pid, _WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except _os_error as e: if _deadstate is not None: self.returncode = _deadstate elif e.errno == _ECHILD: # This happens if SIGCLD is set to be ignored or # waiting for child processes has otherwise been # disabled for our process. This child is dead, we # can't get the status. # http://bugs.python.org/issue15756 self.returncode = 0 return self.returncode def _try_wait(self, wait_flags): try: (pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags) except OSError as e: if e.errno != errno.ECHILD: raise # This happens if SIGCLD is set to be ignored or waiting # for child processes has otherwise been disabled for our # process. This child is dead, we can't get the status. pid = self.pid sts = 0 return (pid, sts) def wait(self, timeout=None, endtime=None): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is not None: return self.returncode # endtime is preferred to timeout. timeout is only used for # printing. if endtime is not None or timeout is not None: if endtime is None: endtime = _time() + timeout elif timeout is None: timeout = self._remaining_time(endtime) if endtime is not None: # Enter a busy loop if we have a timeout. This busy loop was # cribbed from Lib/threading.py in Thread.wait() at r71065. delay = 0.0005 # 500 us -> initial delay of 1 ms while True: (pid, sts) = self._try_wait(os.WNOHANG) assert pid == self.pid or pid == 0 if pid == self.pid: self._handle_exitstatus(sts) break remaining = self._remaining_time(endtime) if remaining <= 0: raise TimeoutExpired(self.args, timeout) delay = min(delay * 2, remaining, .05) time.sleep(delay) else: while self.returncode is None: (pid, sts) = self._try_wait(0) # Check the pid and loop as waitpid has been known to return # 0 even without WNOHANG in odd situations. issue14396. if pid == self.pid: self._handle_exitstatus(sts) return self.returncode def _communicate(self, input, endtime, orig_timeout): if self.stdin and not self._communication_started: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if not input: self.stdin.close() if _has_poll: stdout, stderr = self._communicate_with_poll(input, endtime, orig_timeout) else: stdout, stderr = self._communicate_with_select(input, endtime, orig_timeout) self.wait(timeout=self._remaining_time(endtime)) # All data exchanged. Translate lists into strings. if stdout is not None: stdout = b''.join(stdout) if stderr is not None: stderr = b''.join(stderr) # Translate newlines, if requested. # This also turns bytes into strings. if self.universal_newlines: if stdout is not None: stdout = self._translate_newlines(stdout, self.stdout.encoding) if stderr is not None: stderr = self._translate_newlines(stderr, self.stderr.encoding) return (stdout, stderr) def _save_input(self, input): # This method is called from the _communicate_with_*() methods # so that if we time out while communicating, we can continue # sending input if we retry. if self.stdin and self._input is None: self._input_offset = 0 self._input = input if self.universal_newlines and input is not None: self._input = self._input.encode(self.stdin.encoding) def _communicate_with_poll(self, input, endtime, orig_timeout): stdout = None # Return stderr = None # Return if not self._communication_started: self._fd2file = {} poller = select.poll() def register_and_append(file_obj, eventmask): poller.register(file_obj.fileno(), eventmask) self._fd2file[file_obj.fileno()] = file_obj def close_unregister_and_remove(fd): poller.unregister(fd) self._fd2file[fd].close() self._fd2file.pop(fd) if self.stdin and input: register_and_append(self.stdin, select.POLLOUT) # Only create this mapping if we haven't already. if not self._communication_started: self._fd2output = {} if self.stdout: self._fd2output[self.stdout.fileno()] = [] if self.stderr: self._fd2output[self.stderr.fileno()] = [] select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI if self.stdout: register_and_append(self.stdout, select_POLLIN_POLLPRI) stdout = self._fd2output[self.stdout.fileno()] if self.stderr: register_and_append(self.stderr, select_POLLIN_POLLPRI) stderr = self._fd2output[self.stderr.fileno()] self._save_input(input) while self._fd2file: timeout = self._remaining_time(endtime) if timeout is not None and timeout < 0: raise TimeoutExpired(self.args, orig_timeout) try: ready = poller.poll(timeout) except select.error as e: if e.args[0] == errno.EINTR: continue raise self._check_timeout(endtime, orig_timeout) # XXX Rewrite these to use non-blocking I/O on the # file objects; they are no longer using C stdio! for fd, mode in ready: if mode & select.POLLOUT: chunk = self._input[self._input_offset : self._input_offset + _PIPE_BUF] try: self._input_offset += os.write(fd, chunk) except OSError as e: if e.errno == errno.EPIPE: close_unregister_and_remove(fd) else: raise else: if self._input_offset >= len(self._input): close_unregister_and_remove(fd) elif mode & select_POLLIN_POLLPRI: data = os.read(fd, 4096) if not data: close_unregister_and_remove(fd) self._fd2output[fd].append(data) else: # Ignore hang up or errors. close_unregister_and_remove(fd) return (stdout, stderr) def _communicate_with_select(self, input, endtime, orig_timeout): if not self._communication_started: self._read_set = [] self._write_set = [] if self.stdin and input: self._write_set.append(self.stdin) if self.stdout: self._read_set.append(self.stdout) if self.stderr: self._read_set.append(self.stderr) self._save_input(input) stdout = None # Return stderr = None # Return if self.stdout: if not self._communication_started: self._stdout_buff = [] stdout = self._stdout_buff if self.stderr: if not self._communication_started: self._stderr_buff = [] stderr = self._stderr_buff while self._read_set or self._write_set: timeout = self._remaining_time(endtime) if timeout is not None and timeout < 0: raise TimeoutExpired(self.args, orig_timeout) try: (rlist, wlist, xlist) = \ select.select(self._read_set, self._write_set, [], timeout) except select.error as e: if e.args[0] == errno.EINTR: continue raise # According to the docs, returning three empty lists indicates # that the timeout expired. if not (rlist or wlist or xlist): raise TimeoutExpired(self.args, orig_timeout) # We also check what time it is ourselves for good measure. self._check_timeout(endtime, orig_timeout) # XXX Rewrite these to use non-blocking I/O on the # file objects; they are no longer using C stdio! if self.stdin in wlist: chunk = self._input[self._input_offset : self._input_offset + _PIPE_BUF] try: bytes_written = os.write(self.stdin.fileno(), chunk) except OSError as e: if e.errno == errno.EPIPE: self.stdin.close() self._write_set.remove(self.stdin) else: raise else: self._input_offset += bytes_written if self._input_offset >= len(self._input): self.stdin.close() self._write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if not data: self.stdout.close() self._read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if not data: self.stderr.close() self._read_set.remove(self.stderr) stderr.append(data) return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process with SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process with SIGKILL """ self.send_signal(signal.SIGKILL)
agpl-3.0
jamesliu/mxnet
python/mxnet/callback.py
4
7827
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """Callback functions that can be used to track various status during epoch.""" from __future__ import absolute_import import logging import math import time from .model import save_checkpoint def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback def do_checkpoint(prefix, period=1): """A callback that saves a model checkpoint every few epochs. Each checkpoint is made up of a couple of binary files: a model description file and a parameters (weights and biases) file. The model description file is named `prefix`--symbol.json and the parameters file is named `prefix`-`epoch_number`.params Parameters ---------- prefix : str Prefix for the checkpoint filenames. period : int, optional Interval (number of epochs) between checkpoints. Default `period` is 1. Returns ------- callback : function A callback function that can be passed as `epoch_end_callback` to fit. Example ------- >>> module.fit(iterator, num_epoch=n_epoch, ... epoch_end_callback = mx.callback.do_checkpoint("mymodel", 1)) Start training with [cpu(0)] Epoch[0] Resetting Data Iterator Epoch[0] Time cost=0.100 Saved checkpoint to "mymodel-0001.params" Epoch[1] Resetting Data Iterator Epoch[1] Time cost=0.060 Saved checkpoint to "mymodel-0002.params" """ period = int(max(1, period)) def _callback(iter_no, sym, arg, aux): """The checkpoint function.""" if (iter_no + 1) % period == 0: save_checkpoint(prefix, iter_no + 1, sym, arg, aux) return _callback def log_train_metric(period, auto_reset=False): """Callback to log the training evaluation result every period. Parameters ---------- period : int The number of batch to log the training evaluation metric. auto_reset : bool Reset the metric after each log. Returns ------- callback : function The callback function that can be passed as iter_epoch_callback to fit. """ def _callback(param): """The checkpoint function.""" if param.nbatch % period == 0 and param.eval_metric is not None: name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Iter[%d] Batch[%d] Train-%s=%f', param.epoch, param.nbatch, name, value) if auto_reset: param.eval_metric.reset() return _callback class Speedometer(object): """Logs training speed and evaluation metrics periodically. Parameters ---------- batch_size: int Batch size of data. frequent: int Specifies how frequently training speed and evaluation metrics must be logged. Default behavior is to log once every 50 batches. auto_reset : bool Reset the evaluation metrics after each log. Example ------- >>> # Print training speed and evaluation metrics every ten batches. Batch size is one. >>> module.fit(iterator, num_epoch=n_epoch, ... batch_end_callback=mx.callback.Speedometer(1, 10)) Epoch[0] Batch [10] Speed: 1910.41 samples/sec Train-accuracy=0.200000 Epoch[0] Batch [20] Speed: 1764.83 samples/sec Train-accuracy=0.400000 Epoch[0] Batch [30] Speed: 1740.59 samples/sec Train-accuracy=0.500000 """ def __init__(self, batch_size, frequent=50, auto_reset=True): self.batch_size = batch_size self.frequent = frequent self.init = False self.tic = 0 self.last_count = 0 self.auto_reset = auto_reset def __call__(self, param): """Callback to Show speed.""" count = param.nbatch if self.last_count > count: self.init = False self.last_count = count if self.init: if count % self.frequent == 0: # #11504 try: speed = self.frequent * self.batch_size / (time.time() - self.tic) except ZeroDivisionError: speed = float('inf') if param.eval_metric is not None: name_value = param.eval_metric.get_name_value() if self.auto_reset: param.eval_metric.reset() msg = 'Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec' msg += '\t%s=%f'*len(name_value) logging.info(msg, param.epoch, count, speed, *sum(name_value, ())) else: logging.info("Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec", param.epoch, count, speed) self.tic = time.time() else: self.init = True self.tic = time.time() class ProgressBar(object): """Displays a progress bar, indicating the percentage of batches processed within each epoch. Parameters ---------- total: int total number of batches per epoch length: int number of chars to define maximum length of progress bar Examples -------- >>> progress_bar = mx.callback.ProgressBar(total=2) >>> mod.fit(data, num_epoch=5, batch_end_callback=progress_bar) [========--------] 50.0% [================] 100.0% """ def __init__(self, total, length=80): self.bar_len = length self.total = total def __call__(self, param): """Callback to Show progress bar.""" count = param.nbatch filled_len = int(round(self.bar_len * count / float(self.total))) percents = math.ceil(100.0 * count / float(self.total)) prog_bar = '=' * filled_len + '-' * (self.bar_len - filled_len) logging.info('[%s] %s%s\r', prog_bar, percents, '%') class LogValidationMetricsCallback(object): """Just logs the eval metrics at the end of an epoch.""" def __call__(self, param): if not param.eval_metric: return name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Epoch[%d] Validation-%s=%f', param.epoch, name, value)
apache-2.0
Comunitea/OCB
openerp/addons/base/res/res_currency.py
35
15615
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import re import time import math from openerp import api, fields as fields2 from openerp import tools from openerp.osv import fields, osv from openerp.tools import float_round, float_is_zero, float_compare from openerp.tools.translate import _ import simplejson as json CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?') class res_currency(osv.osv): def _current_rate(self, cr, uid, ids, name, arg, context=None): return self._get_current_rate(cr, uid, ids, context=context) def _current_rate_silent(self, cr, uid, ids, name, arg, context=None): return self._get_current_rate(cr, uid, ids, raise_on_no_rate=False, context=context) def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None): if context is None: context = {} res = {} date = context.get('date') or fields2.Datetime.now() for id in ids: cr.execute('SELECT rate FROM res_currency_rate ' 'WHERE currency_id = %s ' 'AND name <= %s ' 'ORDER BY name desc LIMIT 1', (id, date)) if cr.rowcount: res[id] = cr.fetchone()[0] elif not raise_on_no_rate: res[id] = 0 else: currency = self.browse(cr, uid, id, context=context) raise osv.except_osv(_('Error!'),_("No currency rate associated for currency '%s' for the given period" % (currency.name))) return res _name = "res.currency" _description = "Currency" _columns = { # Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code. 'name': fields.char('Currency', size=3, required=True, help="Currency Code (ISO 4217)"), 'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."), 'rate': fields.function(_current_rate, string='Current Rate', digits=(12,6), help='The rate of the currency to the currency of rate 1.'), # Do not use for computation ! Same as rate field with silent failing 'rate_silent': fields.function(_current_rate_silent, string='Current Rate', digits=(12,6), help='The rate of the currency to the currency of rate 1 (0 if no rate defined).'), 'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'), 'accuracy': fields.integer('Computational Accuracy'), 'rounding': fields.float('Rounding Factor', digits=(12,6)), 'active': fields.boolean('Active'), 'company_id':fields.many2one('res.company', 'Company'), 'base': fields.boolean('Base'), 'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.") } _defaults = { 'active': 1, 'position' : 'after', 'rounding': 0.01, 'accuracy': 4, 'company_id': False, } _sql_constraints = [ # this constraint does not cover all cases due to SQL NULL handling for company_id, # so it is complemented with a unique index (see below). The constraint and index # share the same prefix so that IntegrityError triggered by the index will be caught # and reported to the user with the constraint's error message. ('unique_name_company_id', 'unique (name, company_id)', 'The currency code must be unique per company!'), ] _order = "name" def init(self, cr): # CONSTRAINT/UNIQUE INDEX on (name,company_id) # /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92 # only support field names in constraint definitions, and we need a function here: # we need to special-case company_id to treat all NULL company_id as equal, otherwise # we would allow duplicate "global" currencies (all having company_id == NULL) cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""") if not cr.fetchone(): cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx ON res_currency (name, (COALESCE(company_id,-1)))""") date = fields2.Date(compute='compute_date') @api.one @api.depends('rate_ids.name') def compute_date(self): self.date = self.rate_ids[:1].name def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args = [] results = super(res_currency,self)\ .name_search(cr, user, name, args, operator=operator, context=context, limit=limit) if not results: name_match = CURRENCY_DISPLAY_PATTERN.match(name) if name_match: results = super(res_currency,self)\ .name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit) return results def name_get(self, cr, uid, ids, context=None): if not ids: return [] if isinstance(ids, (int, long)): ids = [ids] reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write') return [(x['id'], tools.ustr(x['name'])) for x in reads] @api.v8 def round(self, amount): """ Return `amount` rounded according to currency `self`. """ return float_round(amount, precision_rounding=self.rounding) @api.v7 def round(self, cr, uid, currency, amount): """Return ``amount`` rounded according to ``currency``'s rounding rules. :param Record currency: currency for which we are rounding :param float amount: the amount to round :return: rounded float """ return float_round(amount, precision_rounding=currency.rounding) @api.v8 def compare_amounts(self, amount1, amount2): """ Compare `amount1` and `amount2` after rounding them according to `self`'s precision. An amount is considered lower/greater than another amount if their rounded value is different. This is not the same as having a non-zero difference! For example 1.432 and 1.431 are equal at 2 digits precision, so this method would return 0. However 0.006 and 0.002 are considered different (returns 1) because they respectively round to 0.01 and 0.0, even though 0.006-0.002 = 0.004 which would be considered zero at 2 digits precision. """ return float_compare(amount1, amount2, precision_rounding=self.rounding) @api.v7 def compare_amounts(self, cr, uid, currency, amount1, amount2): """Compare ``amount1`` and ``amount2`` after rounding them according to the given currency's precision.. An amount is considered lower/greater than another amount if their rounded value is different. This is not the same as having a non-zero difference! For example 1.432 and 1.431 are equal at 2 digits precision, so this method would return 0. However 0.006 and 0.002 are considered different (returns 1) because they respectively round to 0.01 and 0.0, even though 0.006-0.002 = 0.004 which would be considered zero at 2 digits precision. :param Record currency: currency for which we are rounding :param float amount1: first amount to compare :param float amount2: second amount to compare :return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than, equal to, or greater than ``amount2``, according to ``currency``'s rounding. """ return float_compare(amount1, amount2, precision_rounding=currency.rounding) @api.v8 def is_zero(self, amount): """ Return true if `amount` is small enough to be treated as zero according to currency `self`'s rounding rules. Warning: ``is_zero(amount1-amount2)`` is not always equivalent to ``compare_amounts(amount1,amount2) == 0``, as the former will round after computing the difference, while the latter will round before, giving different results, e.g., 0.006 and 0.002 at 2 digits precision. """ return float_is_zero(amount, precision_rounding=self.rounding) @api.v7 def is_zero(self, cr, uid, currency, amount): """Returns true if ``amount`` is small enough to be treated as zero according to ``currency``'s rounding rules. Warning: ``is_zero(amount1-amount2)`` is not always equivalent to ``compare_amounts(amount1,amount2) == 0``, as the former will round after computing the difference, while the latter will round before, giving different results for e.g. 0.006 and 0.002 at 2 digits precision. :param Record currency: currency for which we are rounding :param float amount: amount to compare with currency's zero """ return float_is_zero(amount, precision_rounding=currency.rounding) def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None): if context is None: context = {} ctx = context.copy() from_currency = self.browse(cr, uid, from_currency.id, context=ctx) to_currency = self.browse(cr, uid, to_currency.id, context=ctx) if from_currency.rate == 0 or to_currency.rate == 0: date = context.get('date', time.strftime('%Y-%m-%d')) if from_currency.rate == 0: currency_symbol = from_currency.symbol else: currency_symbol = to_currency.symbol raise osv.except_osv(_('Error'), _('No rate found \n' \ 'for the currency: %s \n' \ 'at the date: %s') % (currency_symbol, date)) return to_currency.rate/from_currency.rate def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None): if (to_currency.id == from_currency.id): if round: return self.round(cr, uid, to_currency, from_amount) else: return from_amount else: rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context) if round: return self.round(cr, uid, to_currency, from_amount * rate) else: return from_amount * rate @api.v7 def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount, round=True, context=None): context = context or {} if not from_currency_id: from_currency_id = to_currency_id if not to_currency_id: to_currency_id = from_currency_id xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context) from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1] to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1] return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context) @api.v8 def compute(self, from_amount, to_currency, round=True): """ Convert `from_amount` from currency `self` to `to_currency`. """ assert self, "compute from unknown currency" assert to_currency, "compute to unknown currency" # apply conversion rate if self == to_currency: to_amount = from_amount else: to_amount = from_amount * self._get_conversion_rate(self, to_currency) # apply rounding return to_currency.round(to_amount) if round else to_amount def get_format_currencies_js_function(self, cr, uid, context=None): """ Returns a string that can be used to instanciate a javascript function that formats numbers as currencies. That function expects the number as first parameter and the currency id as second parameter. In case of failure it returns undefined.""" function = "" for row in self.search_read(cr, uid, domain=[], fields=['id', 'name', 'symbol', 'rounding', 'position'], context=context): digits = int(math.ceil(math.log10(1 / row['rounding']))) symbol = row['symbol'] or row['name'] format_number_str = "openerp.web.format_value(arguments[0], {type: 'float', digits: [69," + str(digits) + "]}, 0.00)" if row['position'] == 'after': return_str = "return " + format_number_str + " + '\\xA0' + " + json.dumps(symbol) + ";" else: return_str = "return " + json.dumps(symbol) + " + '\\xA0' + " + format_number_str + ";" function += "if (arguments[1] === " + str(row['id']) + ") { " + return_str + " }" return function class res_currency_rate(osv.osv): _name = "res.currency.rate" _description = "Currency Rate" _columns = { 'name': fields.datetime('Date', required=True, select=True), 'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'), 'currency_id': fields.many2one('res.currency', 'Currency', readonly=True), } _defaults = { 'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'), } _order = "name desc" def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80): if operator in ['=', '!=']: try: date_format = '%Y-%m-%d' if context.get('lang'): lang_obj = self.pool['res.lang'] lang_ids = lang_obj.search(cr, user, [('code', '=', context['lang'])], context=context) if lang_ids: date_format = lang_obj.browse(cr, user, lang_ids[0], context=context).date_format name = time.strftime('%Y-%m-%d', time.strptime(name, date_format)) except ValueError: try: args.append(('rate', operator, float(name))) except ValueError: return [] name = '' operator = 'ilike' return super(res_currency_rate, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
abhishekgahlot/youtube-dl
youtube_dl/extractor/nfl.py
76
6197
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, int_or_none, remove_end, ) class NFLIE(InfoExtractor): IE_NAME = 'nfl.com' _VALID_URL = r'''(?x)https?:// (?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/ (?:.+?/)* (?P<id>(?:[a-z0-9]{16}|\w{8}\-(?:\w{4}\-){3}\w{12}))''' _TESTS = [ { 'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights', 'md5': '394ef771ddcd1354f665b471d78ec4c6', 'info_dict': { 'id': '0ap3000000398478', 'ext': 'mp4', 'title': 'Week 3: Redskins vs. Eagles highlights', 'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478', 'upload_date': '20140921', 'timestamp': 1411337580, 'thumbnail': 're:^https?://.*\.jpg$', } }, { 'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266', 'md5': 'cf85bdb4bc49f6e9d3816d130c78279c', 'info_dict': { 'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266', 'ext': 'mp4', 'title': 'LIVE: Post Game vs. Browns', 'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8', 'upload_date': '20131229', 'timestamp': 1388354455, 'thumbnail': 're:^https?://.*\.jpg$', } }, { 'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish', 'info_dict': { 'id': '0ap3000000467607', 'ext': 'mp4', 'title': 'Frustrations flare on the field', 'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.', 'timestamp': 1422850320, 'upload_date': '20150202', }, }, { 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood', 'only_matching': True, } ] @staticmethod def prepend_host(host, url): if not url.startswith('http'): if not url.startswith('/'): url = '/%s' % url url = 'http://{0:}{1:}'.format(host, url) return url @staticmethod def format_from_stream(stream, protocol, host, path_prefix='', preference=0, note=None): url = '{protocol:}://{host:}/{prefix:}{path:}'.format( protocol=protocol, host=host, prefix=path_prefix, path=stream.get('path'), ) return { 'url': url, 'vbr': int_or_none(stream.get('rate', 0), 1000), 'preference': preference, 'format_note': note, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, host = mobj.group('id'), mobj.group('host') webpage = self._download_webpage(url, video_id) config_url = NFLIE.prepend_host(host, self._search_regex( r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL', default='static/content/static/config/video/config.json')) # For articles, the id in the url is not the video id video_id = self._search_regex( r'contentId\s*:\s*"([^"]+)"', webpage, 'video id', default=video_id) config = self._download_json(config_url, video_id, note='Downloading player config') url_template = NFLIE.prepend_host( host, '{contentURLTemplate:}'.format(**config)) video_data = self._download_json( url_template.format(id=video_id), video_id) formats = [] cdn_data = video_data.get('cdnData', {}) streams = cdn_data.get('bitrateInfo', []) if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM': parts = compat_urllib_parse_urlparse(cdn_data.get('uri')) protocol, host = parts.scheme, parts.netloc for stream in streams: formats.append( NFLIE.format_from_stream(stream, protocol, host)) else: cdns = config.get('cdns') if not cdns: raise ExtractorError('Failed to get CDN data', expected=True) for name, cdn in cdns.items(): # LimeLight streams don't seem to work if cdn.get('name') == 'LIMELIGHT': continue protocol = cdn.get('protocol') host = remove_end(cdn.get('host', ''), '/') if not (protocol and host): continue prefix = cdn.get('pathprefix', '') if prefix and not prefix.endswith('/'): prefix = '%s/' % prefix preference = 0 if protocol == 'rtmp': preference = -2 elif 'prog' in name.lower(): preference = 1 for stream in streams: formats.append( NFLIE.format_from_stream(stream, protocol, host, prefix, preference, name)) self._sort_formats(formats) thumbnail = None for q in ('xl', 'l', 'm', 's', 'xs'): thumbnail = video_data.get('imagePaths', {}).get(q) if thumbnail: break return { 'id': video_id, 'title': video_data.get('headline'), 'formats': formats, 'description': video_data.get('caption'), 'duration': video_data.get('duration'), 'thumbnail': thumbnail, 'timestamp': int_or_none(video_data.get('posted'), 1000), }
unlicense
dagbldr/dagbldr
dagbldr/nodes/tests/test_stochastic_nodes.py
2
2939
import numpy as np import theano from theano import tensor from nose.tools import assert_raises from numpy.testing import assert_almost_equal from dagbldr.datasets import load_digits from dagbldr import del_shared from dagbldr.utils import convert_to_one_hot from dagbldr.nodes import linear from dagbldr.nodes import softplus from dagbldr.nodes import gaussian_sample from dagbldr.nodes import gaussian_log_sample # Common between tests digits = load_digits() X = digits["data"].astype("float32") y = digits["target"] n_classes = len(set(y)) y = convert_to_one_hot(y, n_classes).astype("float32") X_sym = tensor.fmatrix() y_sym = tensor.fmatrix() def test_gaussian_sample(): del_shared() random_state = np.random.RandomState(1999) mu = linear([X_sym], [X.shape[1]], proj_dim=100, name='mu', random_state=random_state) sigma = softplus([X_sym], [X.shape[1]], proj_dim=100, name='sigma', random_state=random_state) random_state = np.random.RandomState(1999) r1 = gaussian_sample([mu], [sigma], name="samp1", random_state=random_state) random_state = np.random.RandomState(1999) r2 = gaussian_sample([mu], [sigma], name="samp2", random_state=random_state) random_state = np.random.RandomState(42) r3 = gaussian_sample([mu], [sigma], name="samp3", random_state=random_state) sample_function = theano.function([X_sym], [r1, r2, r3], mode="FAST_COMPILE") s_r1, s_r2, s_r3 = sample_function(X[:100]) assert_almost_equal(s_r1, s_r2) assert_raises(AssertionError, assert_almost_equal, s_r1, s_r3) ss_r1, ss_r2, ss_r3 = sample_function(X[:100]) assert_raises(AssertionError, assert_almost_equal, s_r1, ss_r1) def test_gaussian_log_sample(): del_shared() random_state = np.random.RandomState(1999) mu = linear([X_sym], [X.shape[1]], proj_dim=100, name='mu', random_state=random_state) sigma = linear([X_sym], [X.shape[1]], proj_dim=100, name='sigma', random_state=random_state) random_state = np.random.RandomState(1999) r1 = gaussian_log_sample([mu], [sigma], name="samp1", random_state=random_state) random_state = np.random.RandomState(1999) r2 = gaussian_log_sample([mu], [sigma], name="samp2", random_state=random_state) random_state = np.random.RandomState(42) r3 = gaussian_log_sample([mu], [sigma], name="samp3", random_state=random_state) sample_function = theano.function([X_sym], [r1, r2, r3], mode="FAST_COMPILE") s_r1, s_r2, s_r3 = sample_function(X[:100]) assert_almost_equal(s_r1, s_r2) assert_raises(AssertionError, assert_almost_equal, s_r1, s_r3) ss_r1, ss_r2, ss_r3 = sample_function(X[:100]) assert_raises(AssertionError, assert_almost_equal, s_r1, ss_r1)
bsd-3-clause
aledbf/ingress-nginx
hack/add-namespace.py
3
1420
#!/usr/bin/env python3 # Copyright 2020 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from ruamel.yaml import YAML yaml=YAML() yaml.indent(mapping=2, sequence=4, offset=2) for manifest in yaml.load_all(sys.stdin.read()): if manifest: # helm template does not have support for namespace declaration if ('metadata' in manifest and 'namespace' not in manifest['metadata'] and manifest['kind'] != 'Namespace' and manifest['kind'] != 'ClusterRole' and manifest['kind'] != 'ClusterRoleBinding' and manifest['kind'] != 'ValidatingWebhookConfiguration'): manifest['metadata']['namespace'] = sys.argv[1] # respect existing replicas definition if 'spec' in manifest and 'replicas' in manifest['spec']: del manifest['spec']['replicas'] print('---') yaml.dump(manifest, sys.stdout)
apache-2.0
pepeantena4040/MiSitioWeb
lib/toaster/bldcollector/views.py
9
2744
# # BitBake Toaster Implementation # # Copyright (C) 2014 Intel Corporation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from django.views.decorators.cache import cache_control from django.core.urlresolvers import reverse from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.http import HttpResponseBadRequest, HttpResponse from django.utils import timezone from django.utils.html import escape from datetime import timedelta from django.utils import formats from toastergui.templatetags.projecttags import json as jsonfilter import json import os import tempfile import subprocess import toastermain from django.views.decorators.csrf import csrf_exempt @csrf_exempt def eventfile(request): """ Receives a file by POST, and runs toaster-eventreply on this file """ if request.method != "POST": return HttpResponseBadRequest("This API only accepts POST requests. Post a file with:\n\ncurl -F eventlog=@bitbake_eventlog.json %s\n" % request.build_absolute_uri(reverse('eventfile')), content_type="text/plain;utf8") # write temporary file (handle, abstemppath) = tempfile.mkstemp(dir="/tmp/") with os.fdopen(handle, "w") as tmpfile: for chunk in request.FILES['eventlog'].chunks(): tmpfile.write(chunk) tmpfile.close() # compute the path to "bitbake/bin/toaster-eventreplay" from os.path import dirname as DN import_script = os.path.join(DN(DN(DN(DN(os.path.abspath(__file__))))), "bin/toaster-eventreplay") if not os.path.exists(import_script): raise Exception("script missing %s" % import_script) scriptenv = os.environ.copy() scriptenv["DATABASE_URL"] = toastermain.settings.getDATABASE_URL() # run the data loading process and return the results importer = subprocess.Popen([import_script, abstemppath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=scriptenv) (out, err) = importer.communicate() if importer.returncode == 0: os.remove(abstemppath) return HttpResponse("== Retval %d\n== STDOUT\n%s\n\n== STDERR\n%s" % (importer.returncode, out, err), content_type="text/plain;utf8")
gpl-2.0
CollabQ/CollabQ
.google_appengine/google/appengine/api/namespace_manager/__init__.py
1
2505
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Control the namespacing system used by various APIs. Each API call can specify an alternate namespace, but the functions here can be used to change the default namespace. The default is set before user code begins executing. """ import os ENV_DEFAULT_NAMESPACE = 'HTTP_X_APPENGINE_DEFAULT_NAMESPACE' ENV_CURRENT_NAMESPACE = '__INTERNAL_CURRENT_NAMESPACE' def set_namespace(namespace): """Set the default namespace to use for future calls, for this request only. Args: namespace: A string naming the new namespace to use. The empty string specifies the root namespace for this app. """ os.environ[ENV_CURRENT_NAMESPACE] = namespace def set_request_namespace(namespace): """Deprecated. Use set_namespace(namespace).""" return set_namespace(namespace) def get_namespace(): """Get the name of the current default namespace. The empty string indicates that the root namespace is the default. """ return os.getenv(ENV_CURRENT_NAMESPACE, '') def get_request_namespace(): """Deprecated. Use get_namespace().""" return get_namespace() def _enable_request_namespace(): """Automatically enable namespace to default for domain. Calling this function will automatically default the namespace to the chosen Google Apps domain for the current request. """ if ENV_CURRENT_NAMESPACE not in os.environ: if ENV_DEFAULT_NAMESPACE in os.environ: os.environ[ENV_CURRENT_NAMESPACE] = os.environ[ENV_DEFAULT_NAMESPACE] else: os.environ[ENV_CURRENT_NAMESPACE] = '' def _add_name_space(request, namespace=None): """Add a name_space field to a request. Args: request: A protocol buffer supporting the set_name_space() operation. namespace: The name of the namespace part. If None, use the default namespace. """ if namespace is None: request.set_name_space(get_namespace()) else: request.set_name_space(namespace)
apache-2.0
leleobhz/phonetica
praatwrapper/praatwrapper.py
1
1499
# -*- coding: utf-8 -*- # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import os import sys import re import pexpect import psyco psyco.full() class PraatError(Exception): def __init__(self,erro): self.erro = erro def __str___(self): # return repr(self.erro, Exception) return repr(self.erro) class praat(): def __init__(self): self._praat = pexpect.spawn ('praat -') self._praat.expect ('Praat > ') def __del__(self): self._praat.close() def query (self, command): self._praat.sendline (command) errorlevel = self._praat.expect ([command, 'Error:']) if errorlevel == 1: try: raise PraatError, str(self._praat.readline()) except PraatError, e: print 'PraatError: \n\t', e.erro self._praat.expect ('\r\n') self._praat.expect ('Praat > ') return re.sub('\r\n$','',self._praat.before) # END OF CODE
gpl-2.0
daterrell2/pint_journal_project
pints_user/views.py
2
3554
from django.shortcuts import render, redirect from django.core.urlresolvers import reverse from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from pints_user.forms import UserForm from pints_user.models import UserProfile def register(request): registered = False if request.method == 'POST': form = UserForm(data=request.POST) #profile_form = UserProfileForm(data=request.POST) if form.is_valid(): new_user = User() new_user.username = form.cleaned_data['username'] new_user.email = form.cleaned_data['email'] new_user.set_password(form.cleaned_data['password1']) new_user.save() if 'picture' in request.FILES: new_user_profile = UserProfile(user=new_user) new_user_profile.picture = request.FILES['picture'] new_user_profile.save() registered = True # authenticate and log in new user logged_in = authenticate(username=new_user.username, password=request.POST.get('password1')) try: login(request, logged_in) return redirect('pints_main.views.index') except: return redirect('user_login') else: print form.errors else: form = UserForm() return render( request, 'pints_user/register.html', {'form' : form, 'registered' : registered} ) def user_login(request): #redirect if user is already logged in if request.user.is_authenticated(): return redirect('pints_main.views.index') username, error_message = '', '' if request.method == 'POST': username = request.POST.get('username') password = request.POST.get('password') next_page = request.POST.get('next') # built-in django user authentication user = authenticate(username=username, password=password) if user: #valid user and active account: log in and redirect to referer if user.is_active: login(request, user) if next_page: return redirect(next_page) else: return redirect('pints_main.views.main_page') # inactive account else: redirect('pints_main.welcome') # invalid credentials else: print "Invalid login details: {0}, {1}".format(username, password) error_message = 'Invalid Login' return render( request, 'pints_user/login.html', { 'username' : username, 'error_message' : error_message, "next" : next_page } ) else: # initial GET request. 'next' will be hidden input on login form next_page = request.GET.get('next') if not next_page or next_page == reverse(register): next_page = '/' return render( request, 'pints_user/login.html', { 'username' : username, 'error_message' : error_message, 'next' : next_page } ) @login_required def user_logout(request): logout(request) return redirect('pints_main.views.welcome')
mit
afloren/nipype
nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py
2
2188
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.semtools.diffusion.diffusion import DWIConvert def test_DWIConvert_inputs(): input_map = dict(args=dict(argstr='%s', ), conversionMode=dict(argstr='--conversionMode %s', ), environ=dict(nohash=True, usedefault=True, ), fMRI=dict(argstr='--fMRI ', ), fslNIFTIFile=dict(argstr='--fslNIFTIFile %s', ), gradientVectorFile=dict(argstr='--gradientVectorFile %s', hash_files=False, ), ignore_exception=dict(nohash=True, usedefault=True, ), inputBValues=dict(argstr='--inputBValues %s', ), inputBVectors=dict(argstr='--inputBVectors %s', ), inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), inputVolume=dict(argstr='--inputVolume %s', ), outputBValues=dict(argstr='--outputBValues %s', hash_files=False, ), outputBVectors=dict(argstr='--outputBVectors %s', hash_files=False, ), outputDirectory=dict(argstr='--outputDirectory %s', hash_files=False, ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), terminal_output=dict(nohash=True, ), useBMatrixGradientDirections=dict(argstr='--useBMatrixGradientDirections ', ), useIdentityMeaseurementFrame=dict(argstr='--useIdentityMeaseurementFrame ', ), writeProtocolGradientsFile=dict(argstr='--writeProtocolGradientsFile ', ), ) inputs = DWIConvert.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_DWIConvert_outputs(): output_map = dict(gradientVectorFile=dict(), outputBValues=dict(), outputBVectors=dict(), outputDirectory=dict(), outputVolume=dict(), ) outputs = DWIConvert.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
CryCLL/tmoHacko
requests/__init__.py
69
3507
# -*- coding: utf-8 -*- # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> 'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('http://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key2": "value2", "key1": "value1" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <http://python-requests.org>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ import urllib3 import chardet import warnings from .exceptions import RequestsDependencyWarning def check_compatibility(urllib3_version, chardet_version): urllib3_version = urllib3_version.split('.') assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: urllib3_version.append('0') # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1, <= 1.22 assert major == 1 assert minor >= 21 assert minor <= 22 # Check chardet for compatibility. major, minor, patch = chardet_version.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet >= 3.0.2, < 3.1.0 assert major == 3 assert minor < 1 assert patch >= 2 # Check imported dependencies for compatibility. try: check_compatibility(urllib3.__version__, chardet.__version__) except (AssertionError, ValueError): warnings.warn("urllib3 ({0}) or chardet ({1}) doesn't match a supported " "version!".format(urllib3.__version__, chardet.__version__), RequestsDependencyWarning) # Attempt to enable urllib3's SNI support, if possible try: from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass # urllib3's DependencyWarnings should be silenced. from urllib3.exceptions import DependencyWarning warnings.simplefilter('ignore', DependencyWarning) from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __build__, __author__, __author_email__, __license__ from .__version__ import __copyright__, __cake__ from . import utils from . import packages from .models import Request, Response, PreparedRequest from .api import request, get, head, post, patch, put, delete, options from .sessions import session, Session from .status_codes import codes from .exceptions import ( RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, ConnectionError, FileModeWarning, ConnectTimeout, ReadTimeout ) # Set default logging handler to avoid "No handler found" warnings. import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter('default', FileModeWarning, append=True)
mit
outsmartit/foundout6
node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
896
91092
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Notes: # # This is all roughly based on the Makefile system used by the Linux # kernel, but is a non-recursive make -- we put the entire dependency # graph in front of make and let it figure it out. # # The code below generates a separate .mk file for each target, but # all are sourced by the top-level Makefile. This means that all # variables in .mk-files clobber one another. Be careful to use := # where appropriate for immediate evaluation, and similarly to watch # that you're not relying on a variable value to last beween different # .mk files. # # TODOs: # # Global settings and utility functions are currently stuffed in the # toplevel Makefile. It may make sense to generate some .mk files on # the side to keep the the files readable. import os import re import sys import subprocess import gyp import gyp.common import gyp.xcode_emulation from gyp.common import GetEnvironFallback from gyp.common import GypError generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'SHARED_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni', 'SHARED_INTERMEDIATE_DIR': '$(obj)/gen', 'PRODUCT_DIR': '$(builddir)', 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. 'RULE_INPUT_PATH': '$(abspath $<)', 'RULE_INPUT_EXT': '$(suffix $<)', 'RULE_INPUT_NAME': '$(notdir $<)', 'CONFIGURATION_NAME': '$(BUILDTYPE)', } # Make supports multiple toolsets generator_supports_multiple_toolsets = True # Request sorted dependencies in the order from dependents to dependencies. generator_wants_sorted_dependencies = False # Placates pylint. generator_additional_non_configuration_keys = [] generator_additional_path_sections = [] generator_extra_sources_for_rules = [] generator_filelist_paths = None def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" flavor = gyp.common.GetFlavor(params) if flavor == 'mac': default_variables.setdefault('OS', 'mac') default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') default_variables.setdefault('SHARED_LIB_DIR', generator_default_variables['PRODUCT_DIR']) default_variables.setdefault('LIB_DIR', generator_default_variables['PRODUCT_DIR']) # Copy additional generator configuration data from Xcode, which is shared # by the Mac Make generator. import gyp.generator.xcode as xcode_generator global generator_additional_non_configuration_keys generator_additional_non_configuration_keys = getattr(xcode_generator, 'generator_additional_non_configuration_keys', []) global generator_additional_path_sections generator_additional_path_sections = getattr(xcode_generator, 'generator_additional_path_sections', []) global generator_extra_sources_for_rules generator_extra_sources_for_rules = getattr(xcode_generator, 'generator_extra_sources_for_rules', []) COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'}) else: operating_system = flavor if flavor == 'android': operating_system = 'linux' # Keep this legacy behavior for now. default_variables.setdefault('OS', operating_system) default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)') default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)') def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) android_ndk_version = generator_flags.get('android_ndk_version', None) # Android NDK requires a strict link order. if android_ndk_version: global generator_wants_sorted_dependencies generator_wants_sorted_dependencies = True output_dir = params['options'].generator_output or \ params['options'].toplevel_dir builddir_name = generator_flags.get('output_dir', 'out') qualified_out_dir = os.path.normpath(os.path.join( output_dir, builddir_name, 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': params['options'].toplevel_dir, 'qualified_out_dir': qualified_out_dir, } # The .d checking code below uses these functions: # wildcard, sort, foreach, shell, wordlist # wildcard can handle spaces, the rest can't. # Since I could find no way to make foreach work with spaces in filenames # correctly, the .d files have spaces replaced with another character. The .d # file for # Chromium\ Framework.framework/foo # is for example # out/Release/.deps/out/Release/Chromium?Framework.framework/foo # This is the replacement character. SPACE_REPLACEMENT = '?' LINK_COMMANDS_LINUX = """\ quiet_cmd_alink = AR($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) quiet_cmd_alink_thin = AR($(TOOLSET)) $@ cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) # Due to circular dependencies between libraries :(, we wrap the # special "figure out circular dependencies" flags around the entire # input list during linking. quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) # We support two kinds of shared objects (.so): # 1) shared_library, which is just bundling together many dependent libraries # into a link line. # 2) loadable_module, which is generating a module intended for dlopen(). # # They differ only slightly: # In the former case, we want to package all dependent code into the .so. # In the latter case, we want to package just the API exposed by the # outermost module. # This means shared_library uses --whole-archive, while loadable_module doesn't. # (Note that --whole-archive is incompatible with the --start-group used in # normal linking.) # Other shared-object link notes: # - Set SONAME to the library filename so our binaries don't reference # the local, absolute paths used on the link command-line. quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) """ LINK_COMMANDS_MAC = """\ quiet_cmd_alink = LIBTOOL-STATIC $@ cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^) quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) """ LINK_COMMANDS_ANDROID = """\ quiet_cmd_alink = AR($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) quiet_cmd_alink_thin = AR($(TOOLSET)) $@ cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) # Due to circular dependencies between libraries :(, we wrap the # special "figure out circular dependencies" flags around the entire # input list during linking. quiet_cmd_link = LINK($(TOOLSET)) $@ quiet_cmd_link_host = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) # Other shared-object link notes: # - Set SONAME to the library filename so our binaries don't reference # the local, absolute paths used on the link command-line. quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) """ LINK_COMMANDS_AIX = """\ quiet_cmd_alink = AR($(TOOLSET)) $@ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^) quiet_cmd_alink_thin = AR($(TOOLSET)) $@ cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^) quiet_cmd_link = LINK($(TOOLSET)) $@ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) quiet_cmd_solink = SOLINK($(TOOLSET)) $@ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) """ # Header of toplevel Makefile. # This should go into the build tree, but it's easier to keep it here for now. SHARED_HEADER = ("""\ # We borrow heavily from the kernel build setup, though we are simpler since # we don't have Kconfig tweaking settings on us. # The implicit make rules have it looking for RCS files, among other things. # We instead explicitly write all the rules we care about. # It's even quicker (saves ~200ms) to pass -r on the command line. MAKEFLAGS=-r # The source directory tree. srcdir := %(srcdir)s abs_srcdir := $(abspath $(srcdir)) # The name of the builddir. builddir_name ?= %(builddir)s # The V=1 flag on command line makes us verbosely print command lines. ifdef V quiet= else quiet=quiet_ endif # Specify BUILDTYPE=Release on the command line for a release build. BUILDTYPE ?= %(default_configuration)s # Directory all our build output goes into. # Note that this must be two directories beneath src/ for unit tests to pass, # as they reach into the src/ directory for data with relative paths. builddir ?= $(builddir_name)/$(BUILDTYPE) abs_builddir := $(abspath $(builddir)) depsdir := $(builddir)/.deps # Object output directory. obj := $(builddir)/obj abs_obj := $(abspath $(obj)) # We build up a list of every single one of the targets so we can slurp in the # generated dependency rule Makefiles in one pass. all_deps := %(make_global_settings)s CC.target ?= %(CC.target)s CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS) CXX.target ?= %(CXX.target)s CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS) LINK.target ?= %(LINK.target)s LDFLAGS.target ?= $(LDFLAGS) AR.target ?= $(AR) # C++ apps need to be linked with g++. LINK ?= $(CXX.target) # TODO(evan): move all cross-compilation logic to gyp-time so we don't need # to replicate this environment fallback in make as well. CC.host ?= %(CC.host)s CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host) CXX.host ?= %(CXX.host)s CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host) LINK.host ?= %(LINK.host)s LDFLAGS.host ?= AR.host ?= %(AR.host)s # Define a dir function that can handle spaces. # http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions # "leading spaces cannot appear in the text of the first argument as written. # These characters can be put into the argument value by variable substitution." empty := space := $(empty) $(empty) # http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1) unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1) dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1))) # Flags to make gcc output dependency info. Note that you need to be # careful here to use the flags that ccache and distcc can understand. # We write to a dep file on the side first and then rename at the end # so we can't end up with a broken dep file. depfile = $(depsdir)/$(call replace_spaces,$@).d DEPFLAGS = -MMD -MF $(depfile).raw # We have to fixup the deps output in a few ways. # (1) the file output should mention the proper .o file. # ccache or distcc lose the path to the target, so we convert a rule of # the form: # foobar.o: DEP1 DEP2 # into # path/to/foobar.o: DEP1 DEP2 # (2) we want missing files not to cause us to fail to build. # We want to rewrite # foobar.o: DEP1 DEP2 \\ # DEP3 # to # DEP1: # DEP2: # DEP3: # so if the files are missing, they're just considered phony rules. # We have to do some pretty insane escaping to get those backslashes # and dollar signs past make, the shell, and sed at the same time. # Doesn't work with spaces, but that's fine: .d files have spaces in # their names replaced with other characters.""" r""" define fixup_dep # The depfile may not exist if the input file didn't have any #includes. touch $(depfile).raw # Fixup path as in (1). sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile) # Add extra rules as in (2). # We remove slashes and replace spaces with new lines; # remove blank lines; # delete the first line and append a colon to the remaining lines. sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\ grep -v '^$$' |\ sed -e 1d -e 's|$$|:|' \ >> $(depfile) rm $(depfile).raw endef """ """ # Command definitions: # - cmd_foo is the actual command to run; # - quiet_cmd_foo is the brief-output summary of the command. quiet_cmd_cc = CC($(TOOLSET)) $@ cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $< quiet_cmd_cxx = CXX($(TOOLSET)) $@ cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< %(extra_commands)s quiet_cmd_touch = TOUCH $@ cmd_touch = touch $@ quiet_cmd_copy = COPY $@ # send stderr to /dev/null to ignore messages when linking directories. cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@" %(link_commands)s """ r""" # Define an escape_quotes function to escape single quotes. # This allows us to handle quotes properly as long as we always use # use single quotes and escape_quotes. escape_quotes = $(subst ','\'',$(1)) # This comment is here just to include a ' to unconfuse syntax highlighting. # Define an escape_vars function to escape '$' variable syntax. # This allows us to read/write command lines with shell variables (e.g. # $LD_LIBRARY_PATH), without triggering make substitution. escape_vars = $(subst $$,$$$$,$(1)) # Helper that expands to a shell command to echo a string exactly as it is in # make. This uses printf instead of echo because printf's behaviour with respect # to escape sequences is more portable than echo's across different shells # (e.g., dash, bash). exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))' """ """ # Helper to compare the command we're about to run against the command # we logged the last time we ran the command. Produces an empty # string (false) when the commands match. # Tricky point: Make has no string-equality test function. # The kernel uses the following, but it seems like it would have false # positives, where one string reordered its arguments. # arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\ # $(filter-out $(cmd_$@), $(cmd_$(1)))) # We instead substitute each for the empty string into the other, and # say they're equal if both substitutions produce the empty string. # .d files contain """ + SPACE_REPLACEMENT + \ """ instead of spaces, take that into account. command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\ $(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1)))) # Helper that is non-empty when a prerequisite changes. # Normally make does this implicitly, but we force rules to always run # so we can check their command lines. # $? -- new prerequisites # $| -- order-only dependencies prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?)) # Helper that executes all postbuilds until one fails. define do_postbuilds @E=0;\\ for p in $(POSTBUILDS); do\\ eval $$p;\\ E=$$?;\\ if [ $$E -ne 0 ]; then\\ break;\\ fi;\\ done;\\ if [ $$E -ne 0 ]; then\\ rm -rf "$@";\\ exit $$E;\\ fi endef # do_cmd: run a command via the above cmd_foo names, if necessary. # Should always run for a given target to handle command-line changes. # Second argument, if non-zero, makes it do asm/C/C++ dependency munging. # Third argument, if non-zero, makes it do POSTBUILDS processing. # Note: We intentionally do NOT call dirx for depfile, since it contains """ + \ SPACE_REPLACEMENT + """ for # spaces already and dirx strips the """ + SPACE_REPLACEMENT + \ """ characters. define do_cmd $(if $(or $(command_changed),$(prereq_changed)), @$(call exact_echo, $($(quiet)cmd_$(1))) @mkdir -p "$(call dirx,$@)" "$(dir $(depfile))" $(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))), @$(cmd_$(1)) @echo " $(quiet_cmd_$(1)): Finished", @$(cmd_$(1)) ) @$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile) @$(if $(2),$(fixup_dep)) $(if $(and $(3), $(POSTBUILDS)), $(call do_postbuilds) ) ) endef # Declare the "%(default_target)s" target first so it is the default, # even though we don't have the deps yet. .PHONY: %(default_target)s %(default_target)s: # make looks for ways to re-generate included makefiles, but in our case, we # don't have a direct way. Explicitly telling make that it has nothing to do # for them makes it go faster. %%.d: ; # Use FORCE_DO_CMD to force a target to run. Should be coupled with # do_cmd. .PHONY: FORCE_DO_CMD FORCE_DO_CMD: """) SHARED_HEADER_MAC_COMMANDS = """ quiet_cmd_objc = CXX($(TOOLSET)) $@ cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $< quiet_cmd_objcxx = CXX($(TOOLSET)) $@ cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $< # Commands for precompiled header files. quiet_cmd_pch_c = CXX($(TOOLSET)) $@ cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< quiet_cmd_pch_cc = CXX($(TOOLSET)) $@ cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< quiet_cmd_pch_m = CXX($(TOOLSET)) $@ cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $< quiet_cmd_pch_mm = CXX($(TOOLSET)) $@ cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $< # gyp-mac-tool is written next to the root Makefile by gyp. # Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd # already. quiet_cmd_mac_tool = MACTOOL $(4) $< cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@" quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@ cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4) quiet_cmd_infoplist = INFOPLIST $@ cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@" """ def WriteRootHeaderSuffixRules(writer): extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower) writer.write('# Suffix rules, putting all outputs into $(obj).\n') for ext in extensions: writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext) writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) writer.write('\n# Try building from generated source, too.\n') for ext in extensions: writer.write( '$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext) writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) writer.write('\n') for ext in extensions: writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext) writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) writer.write('\n') SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\ # Suffix rules, putting all outputs into $(obj). """) SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\ # Try building from generated source, too. """) SHARED_FOOTER = """\ # "all" is a concatenation of the "all" targets from all the included # sub-makefiles. This is just here to clarify. all: # Add in dependency-tracking rules. $(all_deps) is the list of every single # target in our tree. Only consider the ones with .d (dependency) info: d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d)) ifneq ($(d_files),) include $(d_files) endif """ header = """\ # This file is generated by gyp; do not edit. """ # Maps every compilable file extension to the do_cmd that compiles it. COMPILABLE_EXTENSIONS = { '.c': 'cc', '.cc': 'cxx', '.cpp': 'cxx', '.cxx': 'cxx', '.s': 'cc', '.S': 'cc', } def Compilable(filename): """Return true if the file is compilable (should be in OBJS).""" for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS): if res: return True return False def Linkable(filename): """Return true if the file is linkable (should be on the link line).""" return filename.endswith('.o') def Target(filename): """Translate a compilable filename to its .o target.""" return os.path.splitext(filename)[0] + '.o' def EscapeShellArgument(s): """Quotes an argument so that it will be interpreted literally by a POSIX shell. Taken from http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python """ return "'" + s.replace("'", "'\\''") + "'" def EscapeMakeVariableExpansion(s): """Make has its own variable expansion syntax using $. We must escape it for string to be interpreted literally.""" return s.replace('$', '$$') def EscapeCppDefine(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = EscapeShellArgument(s) s = EscapeMakeVariableExpansion(s) # '#' characters must be escaped even embedded in a string, else Make will # treat it as the start of a comment. return s.replace('#', r'\#') def QuoteIfNecessary(string): """TODO: Should this ideally be replaced with one or more of the above functions?""" if '"' in string: string = '"' + string.replace('"', '\\"') + '"' return string def StringToMakefileVariable(string): """Convert a string to a value that is acceptable as a make variable name.""" return re.sub('[^a-zA-Z0-9_]', '_', string) srcdir_prefix = '' def Sourceify(path): """Convert a path to its source directory form.""" if '$(' in path: return path if os.path.isabs(path): return path return srcdir_prefix + path def QuoteSpaces(s, quote=r'\ '): return s.replace(' ', quote) # TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py. def _ValidateSourcesForOSX(spec, all_sources): """Makes sure if duplicate basenames are not specified in the source list. Arguments: spec: The target dictionary containing the properties of the target. """ if spec.get('type', None) != 'static_library': return basenames = {} for source in all_sources: name, ext = os.path.splitext(source) is_compiled_file = ext in [ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] if not is_compiled_file: continue basename = os.path.basename(name) # Don't include extension. basenames.setdefault(basename, []).append(source) error = '' for basename, files in basenames.iteritems(): if len(files) > 1: error += ' %s: %s\n' % (basename, ' '.join(files)) if error: print('static library %s has several files with the same basename:\n' % spec['target_name'] + error + 'libtool on OS X will generate' + ' warnings for them.') raise GypError('Duplicate basenames in sources section, see list above') # Map from qualified target to path to output. target_outputs = {} # Map from qualified target to any linkable output. A subset # of target_outputs. E.g. when mybinary depends on liba, we want to # include liba in the linker line; when otherbinary depends on # mybinary, we just want to build mybinary first. target_link_deps = {} class MakefileWriter(object): """MakefileWriter packages up the writing of one target-specific foobar.mk. Its only real entry point is Write(), and is mostly used for namespacing. """ def __init__(self, generator_flags, flavor): self.generator_flags = generator_flags self.flavor = flavor self.suffix_rules_srcdir = {} self.suffix_rules_objdir1 = {} self.suffix_rules_objdir2 = {} # Generate suffix rules for all compilable extensions. for ext in COMPILABLE_EXTENSIONS.keys(): # Suffix rules for source folder. self.suffix_rules_srcdir.update({ext: ("""\ $(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD @$(call do_cmd,%s,1) """ % (ext, COMPILABLE_EXTENSIONS[ext]))}) # Suffix rules for generated source files. self.suffix_rules_objdir1.update({ext: ("""\ $(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD @$(call do_cmd,%s,1) """ % (ext, COMPILABLE_EXTENSIONS[ext]))}) self.suffix_rules_objdir2.update({ext: ("""\ $(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD @$(call do_cmd,%s,1) """ % (ext, COMPILABLE_EXTENSIONS[ext]))}) def Write(self, qualified_target, base_path, output_filename, spec, configs, part_of_all): """The main entry point: writes a .mk file for a single target. Arguments: qualified_target: target we're generating base_path: path relative to source root we're building in, used to resolve target-relative paths output_filename: output .mk file name to write spec, configs: gyp info part_of_all: flag indicating this target is part of 'all' """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) self.qualified_target = qualified_target self.path = base_path self.target = spec['target_name'] self.type = spec['type'] self.toolset = spec['toolset'] self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) if self.flavor == 'mac': self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) else: self.xcode_settings = None deps, link_deps = self.ComputeDeps(spec) # Some of the generation below can add extra output, sources, or # link dependencies. All of the out params of the functions that # follow use names like extra_foo. extra_outputs = [] extra_sources = [] extra_link_deps = [] extra_mac_bundle_resources = [] mac_bundle_deps = [] if self.is_mac_bundle: self.output = self.ComputeMacBundleOutput(spec) self.output_binary = self.ComputeMacBundleBinaryOutput(spec) else: self.output = self.output_binary = self.ComputeOutput(spec) self.is_standalone_static_library = bool( spec.get('standalone_static_library', 0)) self._INSTALLABLE_TARGETS = ('executable', 'loadable_module', 'shared_library') if (self.is_standalone_static_library or self.type in self._INSTALLABLE_TARGETS): self.alias = os.path.basename(self.output) install_path = self._InstallableTargetInstallPath() else: self.alias = self.output install_path = self.output self.WriteLn("TOOLSET := " + self.toolset) self.WriteLn("TARGET := " + self.target) # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: self.WriteActions(spec['actions'], extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all) # Rules must be early like actions. if 'rules' in spec: self.WriteRules(spec['rules'], extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all) if 'copies' in spec: self.WriteCopies(spec['copies'], extra_outputs, part_of_all) # Bundle resources. if self.is_mac_bundle: all_mac_bundle_resources = ( spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources) self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps) self.WriteMacInfoPlist(mac_bundle_deps) # Sources. all_sources = spec.get('sources', []) + extra_sources if all_sources: if self.flavor == 'mac': # libtool on OS X generates warnings for duplicate basenames in the same # target. _ValidateSourcesForOSX(spec, all_sources) self.WriteSources( configs, deps, all_sources, extra_outputs, extra_link_deps, part_of_all, gyp.xcode_emulation.MacPrefixHeader( self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)), self.Pchify)) sources = filter(Compilable, all_sources) if sources: self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1) extensions = set([os.path.splitext(s)[1] for s in sources]) for ext in extensions: if ext in self.suffix_rules_srcdir: self.WriteLn(self.suffix_rules_srcdir[ext]) self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2) for ext in extensions: if ext in self.suffix_rules_objdir1: self.WriteLn(self.suffix_rules_objdir1[ext]) for ext in extensions: if ext in self.suffix_rules_objdir2: self.WriteLn(self.suffix_rules_objdir2[ext]) self.WriteLn('# End of this set of suffix rules') # Add dependency from bundle to bundle binary. if self.is_mac_bundle: mac_bundle_deps.append(self.output_binary) self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps, mac_bundle_deps, extra_outputs, part_of_all) # Update global list of target outputs, used in dependency tracking. target_outputs[qualified_target] = install_path # Update global list of link dependencies. if self.type in ('static_library', 'shared_library'): target_link_deps[qualified_target] = self.output_binary # Currently any versions have the same effect, but in future the behavior # could be different. if self.generator_flags.get('android_ndk_version', None): self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps) self.fp.close() def WriteSubMake(self, output_filename, makefile_path, targets, build_dir): """Write a "sub-project" Makefile. This is a small, wrapper Makefile that calls the top-level Makefile to build the targets from a single gyp file (i.e. a sub-project). Arguments: output_filename: sub-project Makefile name to write makefile_path: path to the top-level Makefile targets: list of "all" targets for this sub-project build_dir: build output directory, relative to the sub-project """ gyp.common.EnsureDirExists(output_filename) self.fp = open(output_filename, 'w') self.fp.write(header) # For consistency with other builders, put sub-project build output in the # sub-project dir (see test/subdirectory/gyptest-subdir-all.py). self.WriteLn('export builddir_name ?= %s' % os.path.join(os.path.dirname(output_filename), build_dir)) self.WriteLn('.PHONY: all') self.WriteLn('all:') if makefile_path: makefile_path = ' -C ' + makefile_path self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets))) self.fp.close() def WriteActions(self, actions, extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all): """Write Makefile code for any 'actions' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these actions (used to make other pieces dependent on these actions) part_of_all: flag indicating this target is part of 'all' """ env = self.GetSortedXcodeEnv() for action in actions: name = StringToMakefileVariable('%s_%s' % (self.qualified_target, action['action_name'])) self.WriteLn('### Rules for action "%s":' % action['action_name']) inputs = action['inputs'] outputs = action['outputs'] # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set() for out in outputs: dir = os.path.split(out)[0] if dir: dirs.add(dir) if int(action.get('process_outputs_as_sources', False)): extra_sources += outputs if int(action.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs # Write the actual command. action_commands = action['action'] if self.flavor == 'mac': action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env) for command in action_commands] command = gyp.common.EncodePOSIXShellList(action_commands) if 'message' in action: self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message'])) else: self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name)) if len(dirs) > 0: command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command cd_action = 'cd %s; ' % Sourceify(self.path or '.') # command and cd_action get written to a toplevel variable called # cmd_foo. Toplevel variables can't handle things that change per # makefile like $(TARGET), so hardcode the target. command = command.replace('$(TARGET)', self.target) cd_action = cd_action.replace('$(TARGET)', self.target) # Set LD_LIBRARY_PATH in case the action runs an executable from this # build which links to shared libs from this build. # actions run on the host, so they should in theory only use host # libraries, but until everything is made cross-compile safe, also use # target libraries. # TODO(piman): when everything is cross-compile safe, remove lib.target self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:' '$(builddir)/lib.target:$$LD_LIBRARY_PATH; ' 'export LD_LIBRARY_PATH; ' '%s%s' % (name, cd_action, command)) self.WriteLn() outputs = map(self.Absolutify, outputs) # The makefile rules are all relative to the top dir, but the gyp actions # are defined relative to their containing dir. This replaces the obj # variable for the action rule with an absolute version so that the output # goes in the right place. # Only write the 'obj' and 'builddir' rules for the "primary" output (:1); # it's superfluous for the "extra outputs", and this avoids accidentally # writing duplicate dummy rules for those outputs. # Same for environment. self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0])) self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0])) self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv()) for input in inputs: assert ' ' not in input, ( "Spaces in action input filenames not supported (%s)" % input) for output in outputs: assert ' ' not in output, ( "Spaces in action output filenames not supported (%s)" % output) # See the comment in WriteCopies about expanding env vars. outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs] inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs] self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)), part_of_all=part_of_all, command=name) # Stuff the outputs in a variable so we can refer to them later. outputs_variable = 'action_%s_outputs' % name self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs))) extra_outputs.append('$(%s)' % outputs_variable) self.WriteLn() self.WriteLn() def WriteRules(self, rules, extra_sources, extra_outputs, extra_mac_bundle_resources, part_of_all): """Write Makefile code for any 'rules' from the gyp input. extra_sources: a list that will be filled in with newly generated source files, if any extra_outputs: a list that will be filled in with any outputs of these rules (used to make other pieces dependent on these rules) part_of_all: flag indicating this target is part of 'all' """ env = self.GetSortedXcodeEnv() for rule in rules: name = StringToMakefileVariable('%s_%s' % (self.qualified_target, rule['rule_name'])) count = 0 self.WriteLn('### Generated for rule %s:' % name) all_outputs = [] for rule_source in rule.get('rule_sources', []): dirs = set() (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) (rule_source_root, rule_source_ext) = \ os.path.splitext(rule_source_basename) outputs = [self.ExpandInputRoot(out, rule_source_root, rule_source_dirname) for out in rule['outputs']] for out in outputs: dir = os.path.dirname(out) if dir: dirs.add(dir) if int(rule.get('process_outputs_as_sources', False)): extra_sources += outputs if int(rule.get('process_outputs_as_mac_bundle_resources', False)): extra_mac_bundle_resources += outputs inputs = map(Sourceify, map(self.Absolutify, [rule_source] + rule.get('inputs', []))) actions = ['$(call do_cmd,%s_%d)' % (name, count)] if name == 'resources_grit': # HACK: This is ugly. Grit intentionally doesn't touch the # timestamp of its output file when the file doesn't change, # which is fine in hash-based dependency systems like scons # and forge, but not kosher in the make world. After some # discussion, hacking around it here seems like the least # amount of pain. actions += ['@touch --no-create $@'] # See the comment in WriteCopies about expanding env vars. outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs] inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs] outputs = map(self.Absolutify, outputs) all_outputs += outputs # Only write the 'obj' and 'builddir' rules for the "primary" output # (:1); it's superfluous for the "extra outputs", and this avoids # accidentally writing duplicate dummy rules for those outputs. self.WriteLn('%s: obj := $(abs_obj)' % outputs[0]) self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0]) self.WriteMakeRule(outputs, inputs, actions, command="%s_%d" % (name, count)) # Spaces in rule filenames are not supported, but rule variables have # spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)'). # The spaces within the variables are valid, so remove the variables # before checking. variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)') for output in outputs: output = re.sub(variables_with_spaces, '', output) assert ' ' not in output, ( "Spaces in rule filenames not yet supported (%s)" % output) self.WriteLn('all_deps += %s' % ' '.join(outputs)) action = [self.ExpandInputRoot(ac, rule_source_root, rule_source_dirname) for ac in rule['action']] mkdirs = '' if len(dirs) > 0: mkdirs = 'mkdir -p %s; ' % ' '.join(dirs) cd_action = 'cd %s; ' % Sourceify(self.path or '.') # action, cd_action, and mkdirs get written to a toplevel variable # called cmd_foo. Toplevel variables can't handle things that change # per makefile like $(TARGET), so hardcode the target. if self.flavor == 'mac': action = [gyp.xcode_emulation.ExpandEnvVars(command, env) for command in action] action = gyp.common.EncodePOSIXShellList(action) action = action.replace('$(TARGET)', self.target) cd_action = cd_action.replace('$(TARGET)', self.target) mkdirs = mkdirs.replace('$(TARGET)', self.target) # Set LD_LIBRARY_PATH in case the rule runs an executable from this # build which links to shared libs from this build. # rules run on the host, so they should in theory only use host # libraries, but until everything is made cross-compile safe, also use # target libraries. # TODO(piman): when everything is cross-compile safe, remove lib.target self.WriteLn( "cmd_%(name)s_%(count)d = LD_LIBRARY_PATH=" "$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; " "export LD_LIBRARY_PATH; " "%(cd_action)s%(mkdirs)s%(action)s" % { 'action': action, 'cd_action': cd_action, 'count': count, 'mkdirs': mkdirs, 'name': name, }) self.WriteLn( 'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % { 'count': count, 'name': name, }) self.WriteLn() count += 1 outputs_variable = 'rule_%s_outputs' % name self.WriteList(all_outputs, outputs_variable) extra_outputs.append('$(%s)' % outputs_variable) self.WriteLn('### Finished generating for rule: %s' % name) self.WriteLn() self.WriteLn('### Finished generating for all rules') self.WriteLn('') def WriteCopies(self, copies, extra_outputs, part_of_all): """Write Makefile code for any 'copies' from the gyp input. extra_outputs: a list that will be filled in with any outputs of this action (used to make other pieces dependent on this action) part_of_all: flag indicating this target is part of 'all' """ self.WriteLn('### Generated for copy rule.') variable = StringToMakefileVariable(self.qualified_target + '_copies') outputs = [] for copy in copies: for path in copy['files']: # Absolutify() may call normpath, and will strip trailing slashes. path = Sourceify(self.Absolutify(path)) filename = os.path.split(path)[1] output = Sourceify(self.Absolutify(os.path.join(copy['destination'], filename))) # If the output path has variables in it, which happens in practice for # 'copies', writing the environment as target-local doesn't work, # because the variables are already needed for the target name. # Copying the environment variables into global make variables doesn't # work either, because then the .d files will potentially contain spaces # after variable expansion, and .d file handling cannot handle spaces. # As a workaround, manually expand variables at gyp time. Since 'copies' # can't run scripts, there's no need to write the env then. # WriteDoCmd() will escape spaces for .d files. env = self.GetSortedXcodeEnv() output = gyp.xcode_emulation.ExpandEnvVars(output, env) path = gyp.xcode_emulation.ExpandEnvVars(path, env) self.WriteDoCmd([output], [path], 'copy', part_of_all) outputs.append(output) self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs)))) extra_outputs.append('$(%s)' % variable) self.WriteLn() def WriteMacBundleResources(self, resources, bundle_deps): """Writes Makefile code for 'mac_bundle_resources'.""" self.WriteLn('### Generated for mac_bundle_resources') for output, res in gyp.xcode_emulation.GetMacBundleResources( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, map(Sourceify, map(self.Absolutify, resources))): _, ext = os.path.splitext(output) if ext != '.xcassets': # Make does not supports '.xcassets' emulation. self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource', part_of_all=True) bundle_deps.append(output) def WriteMacInfoPlist(self, bundle_deps): """Write Makefile code for bundle Info.plist files.""" info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( generator_default_variables['PRODUCT_DIR'], self.xcode_settings, lambda p: Sourceify(self.Absolutify(p))) if not info_plist: return if defines: # Create an intermediate file to store preprocessed results. intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' + os.path.basename(info_plist)) self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D', quoter=EscapeCppDefine) self.WriteMakeRule([intermediate_plist], [info_plist], ['$(call do_cmd,infoplist)', # "Convert" the plist so that any weird whitespace changes from the # preprocessor do not affect the XML parser in mac_tool. '@plutil -convert xml1 $@ $@']) info_plist = intermediate_plist # plists can contain envvars and substitute them into the file. self.WriteSortedXcodeEnv( out, self.GetSortedXcodeEnv(additional_settings=extra_env)) self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist', part_of_all=True) bundle_deps.append(out) def WriteSources(self, configs, deps, sources, extra_outputs, extra_link_deps, part_of_all, precompiled_header): """Write Makefile code for any 'sources' from the gyp input. These are source files necessary to build the current target. configs, deps, sources: input from gyp. extra_outputs: a list of extra outputs this action should be dependent on; used to serialize action/rules before compilation extra_link_deps: a list that will be filled in with any outputs of compilation (to be used in link lines) part_of_all: flag indicating this target is part of 'all' """ # Write configuration-specific variables for CFLAGS, etc. for configname in sorted(configs.keys()): config = configs[configname] self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D', quoter=EscapeCppDefine) if self.flavor == 'mac': cflags = self.xcode_settings.GetCflags(configname) cflags_c = self.xcode_settings.GetCflagsC(configname) cflags_cc = self.xcode_settings.GetCflagsCC(configname) cflags_objc = self.xcode_settings.GetCflagsObjC(configname) cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname) else: cflags = config.get('cflags') cflags_c = config.get('cflags_c') cflags_cc = config.get('cflags_cc') self.WriteLn("# Flags passed to all source files."); self.WriteList(cflags, 'CFLAGS_%s' % configname) self.WriteLn("# Flags passed to only C files."); self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname) self.WriteLn("# Flags passed to only C++ files."); self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname) if self.flavor == 'mac': self.WriteLn("# Flags passed to only ObjC files."); self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname) self.WriteLn("# Flags passed to only ObjC++ files."); self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname) includes = config.get('include_dirs') if includes: includes = map(Sourceify, map(self.Absolutify, includes)) self.WriteList(includes, 'INCS_%s' % configname, prefix='-I') compilable = filter(Compilable, sources) objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable))) self.WriteList(objs, 'OBJS') for obj in objs: assert ' ' not in obj, ( "Spaces in object filenames not supported (%s)" % obj) self.WriteLn('# Add to the list of files we specially track ' 'dependencies for.') self.WriteLn('all_deps += $(OBJS)') self.WriteLn() # Make sure our dependencies are built first. if deps: self.WriteMakeRule(['$(OBJS)'], deps, comment = 'Make sure our dependencies are built ' 'before any of us.', order_only = True) # Make sure the actions and rules run first. # If they generate any extra headers etc., the per-.o file dep tracking # will catch the proper rebuilds, so order only is still ok here. if extra_outputs: self.WriteMakeRule(['$(OBJS)'], extra_outputs, comment = 'Make sure our actions/rules run ' 'before any of us.', order_only = True) pchdeps = precompiled_header.GetObjDependencies(compilable, objs ) if pchdeps: self.WriteLn('# Dependencies from obj files to their precompiled headers') for source, obj, gch in pchdeps: self.WriteLn('%s: %s' % (obj, gch)) self.WriteLn('# End precompiled header dependencies') if objs: extra_link_deps.append('$(OBJS)') self.WriteLn("""\ # CFLAGS et al overrides must be target-local. # See "Target-specific Variable Values" in the GNU Make manual.""") self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)") self.WriteLn("$(OBJS): GYP_CFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('c') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_C_$(BUILDTYPE))") self.WriteLn("$(OBJS): GYP_CXXFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('cc') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_CC_$(BUILDTYPE))") if self.flavor == 'mac': self.WriteLn("$(OBJS): GYP_OBJCFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('m') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_C_$(BUILDTYPE)) " "$(CFLAGS_OBJC_$(BUILDTYPE))") self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := " "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "%s " % precompiled_header.GetInclude('mm') + "$(CFLAGS_$(BUILDTYPE)) " "$(CFLAGS_CC_$(BUILDTYPE)) " "$(CFLAGS_OBJCC_$(BUILDTYPE))") self.WritePchTargets(precompiled_header.GetPchBuildCommands()) # If there are any object files in our input file list, link them into our # output. extra_link_deps += filter(Linkable, sources) self.WriteLn() def WritePchTargets(self, pch_commands): """Writes make rules to compile prefix headers.""" if not pch_commands: return for gch, lang_flag, lang, input in pch_commands: extra_flags = { 'c': '$(CFLAGS_C_$(BUILDTYPE))', 'cc': '$(CFLAGS_CC_$(BUILDTYPE))', 'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))', 'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))', }[lang] var_name = { 'c': 'GYP_PCH_CFLAGS', 'cc': 'GYP_PCH_CXXFLAGS', 'm': 'GYP_PCH_OBJCFLAGS', 'mm': 'GYP_PCH_OBJCXXFLAGS', }[lang] self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) + "$(DEFS_$(BUILDTYPE)) " "$(INCS_$(BUILDTYPE)) " "$(CFLAGS_$(BUILDTYPE)) " + extra_flags) self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input)) self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang) self.WriteLn('') assert ' ' not in gch, ( "Spaces in gch filenames not supported (%s)" % gch) self.WriteLn('all_deps += %s' % gch) self.WriteLn('') def ComputeOutputBasename(self, spec): """Return the 'output basename' of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce 'libfoobar.so' """ assert not self.is_mac_bundle if self.flavor == 'mac' and self.type in ( 'static_library', 'executable', 'shared_library', 'loadable_module'): return self.xcode_settings.GetExecutablePath() target = spec['target_name'] target_prefix = '' target_ext = '' if self.type == 'static_library': if target[:3] == 'lib': target = target[3:] target_prefix = 'lib' target_ext = '.a' elif self.type in ('loadable_module', 'shared_library'): if target[:3] == 'lib': target = target[3:] target_prefix = 'lib' target_ext = '.so' elif self.type == 'none': target = '%s.stamp' % target elif self.type != 'executable': print ("ERROR: What output file should be generated?", "type", self.type, "target", target) target_prefix = spec.get('product_prefix', target_prefix) target = spec.get('product_name', target) product_ext = spec.get('product_extension') if product_ext: target_ext = '.' + product_ext return target_prefix + target + target_ext def _InstallImmediately(self): return self.toolset == 'target' and self.flavor == 'mac' and self.type in ( 'static_library', 'executable', 'shared_library', 'loadable_module') def ComputeOutput(self, spec): """Return the 'output' (full output path) of a gyp spec. E.g., the loadable module 'foobar' in directory 'baz' will produce '$(obj)/baz/libfoobar.so' """ assert not self.is_mac_bundle path = os.path.join('$(obj).' + self.toolset, self.path) if self.type == 'executable' or self._InstallImmediately(): path = '$(builddir)' path = spec.get('product_dir', path) return os.path.join(path, self.ComputeOutputBasename(spec)) def ComputeMacBundleOutput(self, spec): """Return the 'output' (full output path) to a bundle output directory.""" assert self.is_mac_bundle path = generator_default_variables['PRODUCT_DIR'] return os.path.join(path, self.xcode_settings.GetWrapperName()) def ComputeMacBundleBinaryOutput(self, spec): """Return the 'output' (full output path) to the binary in a bundle.""" path = generator_default_variables['PRODUCT_DIR'] return os.path.join(path, self.xcode_settings.GetExecutablePath()) def ComputeDeps(self, spec): """Compute the dependencies of a gyp spec. Returns a tuple (deps, link_deps), where each is a list of filenames that will need to be put in front of make for either building (deps) or linking (link_deps). """ deps = [] link_deps = [] if 'dependencies' in spec: deps.extend([target_outputs[dep] for dep in spec['dependencies'] if target_outputs[dep]]) for dep in spec['dependencies']: if dep in target_link_deps: link_deps.append(target_link_deps[dep]) deps.extend(link_deps) # TODO: It seems we need to transitively link in libraries (e.g. -lfoo)? # This hack makes it work: # link_deps.extend(spec.get('libraries', [])) return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) def WriteDependencyOnExtraOutputs(self, target, extra_outputs): self.WriteMakeRule([self.output_binary], extra_outputs, comment = 'Build our special outputs first.', order_only = True) def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps, extra_outputs, part_of_all): """Write Makefile code to produce the final target of the gyp spec. spec, configs: input from gyp. deps, link_deps: dependency lists; see ComputeDeps() extra_outputs: any extra outputs that our target should depend on part_of_all: flag indicating this target is part of 'all' """ self.WriteLn('### Rules for final target.') if extra_outputs: self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs) self.WriteMakeRule(extra_outputs, deps, comment=('Preserve order dependency of ' 'special output on deps.'), order_only = True) target_postbuilds = {} if self.type != 'none': for configname in sorted(configs.keys()): config = configs[configname] if self.flavor == 'mac': ldflags = self.xcode_settings.GetLdflags(configname, generator_default_variables['PRODUCT_DIR'], lambda p: Sourceify(self.Absolutify(p))) # TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on. gyp_to_build = gyp.common.InvertRelativePath(self.path) target_postbuild = self.xcode_settings.AddImplicitPostbuilds( configname, QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output))), QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output_binary)))) if target_postbuild: target_postbuilds[configname] = target_postbuild else: ldflags = config.get('ldflags', []) # Compute an rpath for this output if needed. if any(dep.endswith('.so') or '.so.' in dep for dep in deps): # We want to get the literal string "$ORIGIN" into the link command, # so we need lots of escaping. ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset) ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' % self.toolset) library_dirs = config.get('library_dirs', []) ldflags += [('-L%s' % library_dir) for library_dir in library_dirs] self.WriteList(ldflags, 'LDFLAGS_%s' % configname) if self.flavor == 'mac': self.WriteList(self.xcode_settings.GetLibtoolflags(configname), 'LIBTOOLFLAGS_%s' % configname) libraries = spec.get('libraries') if libraries: # Remove duplicate entries libraries = gyp.common.uniquer(libraries) if self.flavor == 'mac': libraries = self.xcode_settings.AdjustLibraries(libraries) self.WriteList(libraries, 'LIBS') self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary)) self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary)) if self.flavor == 'mac': self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary)) # Postbuild actions. Like actions, but implicitly depend on the target's # output. postbuilds = [] if self.flavor == 'mac': if target_postbuilds: postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))') postbuilds.extend( gyp.xcode_emulation.GetSpecPostbuildCommands(spec)) if postbuilds: # Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE), # so we must output its definition first, since we declare variables # using ":=". self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv()) for configname in target_postbuilds: self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' % (QuoteSpaces(self.output), configname, gyp.common.EncodePOSIXShellList(target_postbuilds[configname]))) # Postbuilds expect to be run in the gyp file's directory, so insert an # implicit postbuild to cd to there. postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path])) for i in xrange(len(postbuilds)): if not postbuilds[i].startswith('$'): postbuilds[i] = EscapeShellArgument(postbuilds[i]) self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output)) self.WriteLn('%s: POSTBUILDS := %s' % ( QuoteSpaces(self.output), ' '.join(postbuilds))) # A bundle directory depends on its dependencies such as bundle resources # and bundle binary. When all dependencies have been built, the bundle # needs to be packaged. if self.is_mac_bundle: # If the framework doesn't contain a binary, then nothing depends # on the actions -- make the framework depend on them directly too. self.WriteDependencyOnExtraOutputs(self.output, extra_outputs) # Bundle dependencies. Note that the code below adds actions to this # target, so if you move these two lines, move the lines below as well. self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS') self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output)) # After the framework is built, package it. Needs to happen before # postbuilds, since postbuilds depend on this. if self.type in ('shared_library', 'loadable_module'): self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' % self.xcode_settings.GetFrameworkVersion()) # Bundle postbuilds can depend on the whole bundle, so run them after # the bundle is packaged, not already after the bundle binary is done. if postbuilds: self.WriteLn('\t@$(call do_postbuilds)') postbuilds = [] # Don't write postbuilds for target's output. # Needed by test/mac/gyptest-rebuild.py. self.WriteLn('\t@true # No-op, used by tests') # Since this target depends on binary and resources which are in # nested subfolders, the framework directory will be older than # its dependencies usually. To prevent this rule from executing # on every build (expensive, especially with postbuilds), expliclity # update the time on the framework directory. self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output)) if postbuilds: assert not self.is_mac_bundle, ('Postbuilds for bundles should be done ' 'on the bundle, not the binary (target \'%s\')' % self.target) assert 'product_dir' not in spec, ('Postbuilds do not work with ' 'custom product_dir') if self.type == 'executable': self.WriteLn('%s: LD_INPUTS := %s' % ( QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps)))) if self.toolset == 'host' and self.flavor == 'android': self.WriteDoCmd([self.output_binary], link_deps, 'link_host', part_of_all, postbuilds=postbuilds) else: self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all, postbuilds=postbuilds) elif self.type == 'static_library': for link_dep in link_deps: assert ' ' not in link_dep, ( "Spaces in alink input filenames not supported (%s)" % link_dep) if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not self.is_standalone_static_library): self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin', part_of_all, postbuilds=postbuilds) else: self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all, postbuilds=postbuilds) elif self.type == 'shared_library': self.WriteLn('%s: LD_INPUTS := %s' % ( QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps)))) self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all, postbuilds=postbuilds) elif self.type == 'loadable_module': for link_dep in link_deps: assert ' ' not in link_dep, ( "Spaces in module input filenames not supported (%s)" % link_dep) if self.toolset == 'host' and self.flavor == 'android': self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host', part_of_all, postbuilds=postbuilds) else: self.WriteDoCmd( [self.output_binary], link_deps, 'solink_module', part_of_all, postbuilds=postbuilds) elif self.type == 'none': # Write a stamp line. self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all, postbuilds=postbuilds) else: print "WARNING: no output for", self.type, target # Add an alias for each target (if there are any outputs). # Installable target aliases are created below. if ((self.output and self.output != self.target) and (self.type not in self._INSTALLABLE_TARGETS)): self.WriteMakeRule([self.target], [self.output], comment='Add target alias', phony = True) if part_of_all: self.WriteMakeRule(['all'], [self.target], comment = 'Add target alias to "all" target.', phony = True) # Add special-case rules for our installable targets. # 1) They need to install to the build dir or "product" dir. # 2) They get shortcuts for building (e.g. "make chrome"). # 3) They are part of "make all". if (self.type in self._INSTALLABLE_TARGETS or self.is_standalone_static_library): if self.type == 'shared_library': file_desc = 'shared library' elif self.type == 'static_library': file_desc = 'static library' else: file_desc = 'executable' install_path = self._InstallableTargetInstallPath() installable_deps = [self.output] if (self.flavor == 'mac' and not 'product_dir' in spec and self.toolset == 'target'): # On mac, products are created in install_path immediately. assert install_path == self.output, '%s != %s' % ( install_path, self.output) # Point the target alias to the final binary output. self.WriteMakeRule([self.target], [install_path], comment='Add target alias', phony = True) if install_path != self.output: assert not self.is_mac_bundle # See comment a few lines above. self.WriteDoCmd([install_path], [self.output], 'copy', comment = 'Copy this to the %s output path.' % file_desc, part_of_all=part_of_all) installable_deps.append(install_path) if self.output != self.alias and self.alias != self.target: self.WriteMakeRule([self.alias], installable_deps, comment = 'Short alias for building this %s.' % file_desc, phony = True) if part_of_all: self.WriteMakeRule(['all'], [install_path], comment = 'Add %s to "all" target.' % file_desc, phony = True) def WriteList(self, value_list, variable=None, prefix='', quoter=QuoteIfNecessary): """Write a variable definition that is a list of values. E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out foo = blaha blahb but in a pretty-printed style. """ values = '' if value_list: value_list = [quoter(prefix + l) for l in value_list] values = ' \\\n\t' + ' \\\n\t'.join(value_list) self.fp.write('%s :=%s\n\n' % (variable, values)) def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None, postbuilds=False): """Write a Makefile rule that uses do_cmd. This makes the outputs dependent on the command line that was run, as well as support the V= make command line flag. """ suffix = '' if postbuilds: assert ',' not in command suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS self.WriteMakeRule(outputs, inputs, actions = ['$(call do_cmd,%s%s)' % (command, suffix)], comment = comment, command = command, force = True) # Add our outputs to the list of targets we read depfiles from. # all_deps is only used for deps file reading, and for deps files we replace # spaces with ? because escaping doesn't work with make's $(sort) and # other functions. outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs] self.WriteLn('all_deps += %s' % ' '.join(outputs)) def WriteMakeRule(self, outputs, inputs, actions=None, comment=None, order_only=False, force=False, phony=False, command=None): """Write a Makefile rule, with some extra tricks. outputs: a list of outputs for the rule (note: this is not directly supported by make; see comments below) inputs: a list of inputs for the rule actions: a list of shell commands to run for the rule comment: a comment to put in the Makefile above the rule (also useful for making this Python script's code self-documenting) order_only: if true, makes the dependency order-only force: if true, include FORCE_DO_CMD as an order-only dep phony: if true, the rule does not actually generate the named output, the output is just a name to run the rule command: (optional) command name to generate unambiguous labels """ outputs = map(QuoteSpaces, outputs) inputs = map(QuoteSpaces, inputs) if comment: self.WriteLn('# ' + comment) if phony: self.WriteLn('.PHONY: ' + ' '.join(outputs)) if actions: self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0]) force_append = ' FORCE_DO_CMD' if force else '' if order_only: # Order only rule: Just write a simple rule. # TODO(evanm): just make order_only a list of deps instead of this hack. self.WriteLn('%s: | %s%s' % (' '.join(outputs), ' '.join(inputs), force_append)) elif len(outputs) == 1: # Regular rule, one output: Just write a simple rule. self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append)) else: # Regular rule, more than one output: Multiple outputs are tricky in # make. We will write three rules: # - All outputs depend on an intermediate file. # - Make .INTERMEDIATE depend on the intermediate. # - The intermediate file depends on the inputs and executes the # actual command. # - The intermediate recipe will 'touch' the intermediate file. # - The multi-output rule will have an do-nothing recipe. intermediate = "%s.intermediate" % (command if command else self.target) self.WriteLn('%s: %s' % (' '.join(outputs), intermediate)) self.WriteLn('\t%s' % '@:'); self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate)) self.WriteLn('%s: %s%s' % (intermediate, ' '.join(inputs), force_append)) actions.insert(0, '$(call do_cmd,touch)') if actions: for action in actions: self.WriteLn('\t%s' % action) self.WriteLn() def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps): """Write a set of LOCAL_XXX definitions for Android NDK. These variable definitions will be used by Android NDK but do nothing for non-Android applications. Arguments: module_name: Android NDK module name, which must be unique among all module names. all_sources: A list of source files (will be filtered by Compilable). link_deps: A list of link dependencies, which must be sorted in the order from dependencies to dependents. """ if self.type not in ('executable', 'shared_library', 'static_library'): return self.WriteLn('# Variable definitions for Android applications') self.WriteLn('include $(CLEAR_VARS)') self.WriteLn('LOCAL_MODULE := ' + module_name) self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) ' '$(DEFS_$(BUILDTYPE)) ' # LOCAL_CFLAGS is applied to both of C and C++. There is # no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C # sources. '$(CFLAGS_C_$(BUILDTYPE)) ' # $(INCS_$(BUILDTYPE)) includes the prefix '-I' while # LOCAL_C_INCLUDES does not expect it. So put it in # LOCAL_CFLAGS. '$(INCS_$(BUILDTYPE))') # LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred. self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))') self.WriteLn('LOCAL_C_INCLUDES :=') self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)') # Detect the C++ extension. cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0} default_cpp_ext = '.cpp' for filename in all_sources: ext = os.path.splitext(filename)[1] if ext in cpp_ext: cpp_ext[ext] += 1 if cpp_ext[ext] > cpp_ext[default_cpp_ext]: default_cpp_ext = ext self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext) self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)), 'LOCAL_SRC_FILES') # Filter out those which do not match prefix and suffix and produce # the resulting list without prefix and suffix. def DepsToModules(deps, prefix, suffix): modules = [] for filepath in deps: filename = os.path.basename(filepath) if filename.startswith(prefix) and filename.endswith(suffix): modules.append(filename[len(prefix):-len(suffix)]) return modules # Retrieve the default value of 'SHARED_LIB_SUFFIX' params = {'flavor': 'linux'} default_variables = {} CalculateVariables(default_variables, params) self.WriteList( DepsToModules(link_deps, generator_default_variables['SHARED_LIB_PREFIX'], default_variables['SHARED_LIB_SUFFIX']), 'LOCAL_SHARED_LIBRARIES') self.WriteList( DepsToModules(link_deps, generator_default_variables['STATIC_LIB_PREFIX'], generator_default_variables['STATIC_LIB_SUFFIX']), 'LOCAL_STATIC_LIBRARIES') if self.type == 'executable': self.WriteLn('include $(BUILD_EXECUTABLE)') elif self.type == 'shared_library': self.WriteLn('include $(BUILD_SHARED_LIBRARY)') elif self.type == 'static_library': self.WriteLn('include $(BUILD_STATIC_LIBRARY)') self.WriteLn() def WriteLn(self, text=''): self.fp.write(text + '\n') def GetSortedXcodeEnv(self, additional_settings=None): return gyp.xcode_emulation.GetSortedXcodeEnv( self.xcode_settings, "$(abs_builddir)", os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)", additional_settings) def GetSortedXcodePostbuildEnv(self): # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. # TODO(thakis): It would be nice to have some general mechanism instead. strip_save_file = self.xcode_settings.GetPerTargetSetting( 'CHROMIUM_STRIP_SAVE_FILE', '') # Even if strip_save_file is empty, explicitly write it. Else a postbuild # might pick up an export from an earlier target. return self.GetSortedXcodeEnv( additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file}) def WriteSortedXcodeEnv(self, target, env): for k, v in env: # For # foo := a\ b # the escaped space does the right thing. For # export foo := a\ b # it does not -- the backslash is written to the env as literal character. # So don't escape spaces in |env[k]|. self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v)) def Objectify(self, path): """Convert a path to its output directory form.""" if '$(' in path: path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset) if not '$(obj)' in path: path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path) return path def Pchify(self, path, lang): """Convert a prefix header path to its output directory form.""" path = self.Absolutify(path) if '$(' in path: path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' % (self.toolset, lang)) return path return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path) def Absolutify(self, path): """Convert a subdirectory-relative path into a base-relative path. Skips over paths that contain variables.""" if '$(' in path: # Don't call normpath in this case, as it might collapse the # path too aggressively if it features '..'. However it's still # important to strip trailing slashes. return path.rstrip('/') return os.path.normpath(os.path.join(self.path, path)) def ExpandInputRoot(self, template, expansion, dirname): if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: return template path = template % { 'INPUT_ROOT': expansion, 'INPUT_DIRNAME': dirname, } return path def _InstallableTargetInstallPath(self): """Returns the location of the final output for an installable target.""" # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files # rely on this. Emulate this behavior for mac. # XXX(TooTallNate): disabling this code since we don't want this behavior... #if (self.type == 'shared_library' and # (self.flavor != 'mac' or self.toolset != 'target')): # # Install all shared libs into a common directory (per toolset) for # # convenient access with LD_LIBRARY_PATH. # return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias) return '$(builddir)/' + self.alias def WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files): """Write the target to regenerate the Makefile.""" options = params['options'] build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir) for filename in params['build_files_arg']] gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'], options.toplevel_dir) if not gyp_binary.startswith(os.sep): gyp_binary = os.path.join('.', gyp_binary) root_makefile.write( "quiet_cmd_regen_makefile = ACTION Regenerating $@\n" "cmd_regen_makefile = cd $(srcdir); %(cmd)s\n" "%(makefile_name)s: %(deps)s\n" "\t$(call do_cmd,regen_makefile)\n\n" % { 'makefile_name': makefile_name, 'deps': ' '.join(map(Sourceify, build_files)), 'cmd': gyp.common.EncodePOSIXShellList( [gyp_binary, '-fmake'] + gyp.RegenerateFlags(options) + build_files_args)}) def PerformBuild(data, configurations, params): options = params['options'] for config in configurations: arguments = ['make'] if options.toplevel_dir and options.toplevel_dir != '.': arguments += '-C', options.toplevel_dir arguments.append('BUILDTYPE=' + config) print 'Building [%s]: %s' % (config, arguments) subprocess.check_call(arguments) def GenerateOutput(target_list, target_dicts, data, params): options = params['options'] flavor = gyp.common.GetFlavor(params) generator_flags = params.get('generator_flags', {}) builddir_name = generator_flags.get('output_dir', 'out') android_ndk_version = generator_flags.get('android_ndk_version', None) default_target = generator_flags.get('default_target', 'all') def CalculateMakefilePath(build_file, base_name): """Determine where to write a Makefile for a given gyp file.""" # Paths in gyp files are relative to the .gyp file, but we want # paths relative to the source root for the master makefile. Grab # the path of the .gyp file as the base to relativize against. # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.depth) # We write the file in the base_path directory. output_file = os.path.join(options.depth, base_path, base_name) if options.generator_output: output_file = os.path.join( options.depth, options.generator_output, base_path, base_name) base_path = gyp.common.RelativePath(os.path.dirname(build_file), options.toplevel_dir) return base_path, output_file # TODO: search for the first non-'Default' target. This can go # away when we add verification that all targets have the # necessary configurations. default_configuration = None toolsets = set([target_dicts[target]['toolset'] for target in target_list]) for target in target_list: spec = target_dicts[target] if spec['default_configuration'] != 'Default': default_configuration = spec['default_configuration'] break if not default_configuration: default_configuration = 'Default' srcdir = '.' makefile_name = 'Makefile' + options.suffix makefile_path = os.path.join(options.toplevel_dir, makefile_name) if options.generator_output: global srcdir_prefix makefile_path = os.path.join( options.toplevel_dir, options.generator_output, makefile_name) srcdir = gyp.common.RelativePath(srcdir, options.generator_output) srcdir_prefix = '$(srcdir)/' flock_command= 'flock' copy_archive_arguments = '-af' header_params = { 'default_target': default_target, 'builddir': builddir_name, 'default_configuration': default_configuration, 'flock': flock_command, 'flock_index': 1, 'link_commands': LINK_COMMANDS_LINUX, 'extra_commands': '', 'srcdir': srcdir, 'copy_archive_args': copy_archive_arguments, } if flavor == 'mac': flock_command = './gyp-mac-tool flock' header_params.update({ 'flock': flock_command, 'flock_index': 2, 'link_commands': LINK_COMMANDS_MAC, 'extra_commands': SHARED_HEADER_MAC_COMMANDS, }) elif flavor == 'android': header_params.update({ 'link_commands': LINK_COMMANDS_ANDROID, }) elif flavor == 'solaris': header_params.update({ 'flock': './gyp-flock-tool flock', 'flock_index': 2, }) elif flavor == 'freebsd': # Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific. header_params.update({ 'flock': 'lockf', }) elif flavor == 'openbsd': copy_archive_arguments = '-pPRf' header_params.update({ 'copy_archive_args': copy_archive_arguments, }) elif flavor == 'aix': copy_archive_arguments = '-pPRf' header_params.update({ 'copy_archive_args': copy_archive_arguments, 'link_commands': LINK_COMMANDS_AIX, 'flock': './gyp-flock-tool flock', 'flock_index': 2, }) header_params.update({ 'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'), 'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'), 'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'), 'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'), 'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'), 'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'), 'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'), 'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'), }) build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings_array = data[build_file].get('make_global_settings', []) wrappers = {} for key, value in make_global_settings_array: if key.endswith('_wrapper'): wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value make_global_settings = '' for key, value in make_global_settings_array: if re.match('.*_wrapper', key): continue if value[0] != '$': value = '$(abspath %s)' % value wrapper = wrappers.get(key) if wrapper: value = '%s %s' % (wrapper, value) del wrappers[key] if key in ('CC', 'CC.host', 'CXX', 'CXX.host'): make_global_settings += ( 'ifneq (,$(filter $(origin %s), undefined default))\n' % key) # Let gyp-time envvars win over global settings. env_key = key.replace('.', '_') # CC.host -> CC_host if env_key in os.environ: value = os.environ[env_key] make_global_settings += ' %s = %s\n' % (key, value) make_global_settings += 'endif\n' else: make_global_settings += '%s ?= %s\n' % (key, value) # TODO(ukai): define cmd when only wrapper is specified in # make_global_settings. header_params['make_global_settings'] = make_global_settings gyp.common.EnsureDirExists(makefile_path) root_makefile = open(makefile_path, 'w') root_makefile.write(SHARED_HEADER % header_params) # Currently any versions have the same effect, but in future the behavior # could be different. if android_ndk_version: root_makefile.write( '# Define LOCAL_PATH for build of Android applications.\n' 'LOCAL_PATH := $(call my-dir)\n' '\n') for toolset in toolsets: root_makefile.write('TOOLSET := %s\n' % toolset) WriteRootHeaderSuffixRules(root_makefile) # Put build-time support tools next to the root Makefile. dest_path = os.path.dirname(makefile_path) gyp.common.CopyTool(flavor, dest_path) # Find the list of targets that derive from the gyp file(s) being built. needed_targets = set() for build_file in params['build_files']: for target in gyp.common.AllTargets(target_list, target_dicts, build_file): needed_targets.add(target) build_files = set() include_list = set() for qualified_target in target_list: build_file, target, toolset = gyp.common.ParseQualifiedTarget( qualified_target) this_make_global_settings = data[build_file].get('make_global_settings', []) assert make_global_settings_array == this_make_global_settings, ( "make_global_settings needs to be the same for all targets. %s vs. %s" % (this_make_global_settings, make_global_settings)) build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir)) included_files = data[build_file]['included_files'] for included_file in included_files: # The included_files entries are relative to the dir of the build file # that included them, so we have to undo that and then make them relative # to the root dir. relative_include_file = gyp.common.RelativePath( gyp.common.UnrelativePath(included_file, build_file), options.toplevel_dir) abs_include_file = os.path.abspath(relative_include_file) # If the include file is from the ~/.gyp dir, we should use absolute path # so that relocating the src dir doesn't break the path. if (params['home_dot_gyp'] and abs_include_file.startswith(params['home_dot_gyp'])): build_files.add(abs_include_file) else: build_files.add(relative_include_file) base_path, output_file = CalculateMakefilePath(build_file, target + '.' + toolset + options.suffix + '.mk') spec = target_dicts[qualified_target] configs = spec['configurations'] if flavor == 'mac': gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) writer = MakefileWriter(generator_flags, flavor) writer.Write(qualified_target, base_path, output_file, spec, configs, part_of_all=qualified_target in needed_targets) # Our root_makefile lives at the source root. Compute the relative path # from there to the output_file for including. mkfile_rel_path = gyp.common.RelativePath(output_file, os.path.dirname(makefile_path)) include_list.add(mkfile_rel_path) # Write out per-gyp (sub-project) Makefiles. depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd()) for build_file in build_files: # The paths in build_files were relativized above, so undo that before # testing against the non-relativized items in target_list and before # calculating the Makefile path. build_file = os.path.join(depth_rel_path, build_file) gyp_targets = [target_dicts[target]['target_name'] for target in target_list if target.startswith(build_file) and target in needed_targets] # Only generate Makefiles for gyp files with targets. if not gyp_targets: continue base_path, output_file = CalculateMakefilePath(build_file, os.path.splitext(os.path.basename(build_file))[0] + '.Makefile') makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path), os.path.dirname(output_file)) writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets, builddir_name) # Write out the sorted list of includes. root_makefile.write('\n') for include_file in sorted(include_list): # We wrap each .mk include in an if statement so users can tell make to # not load a file by setting NO_LOAD. The below make code says, only # load the .mk file if the .mk filename doesn't start with a token in # NO_LOAD. root_makefile.write( "ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n" " $(findstring $(join ^,$(prefix)),\\\n" " $(join ^," + include_file + ")))),)\n") root_makefile.write(" include " + include_file + "\n") root_makefile.write("endif\n") root_makefile.write('\n') if (not generator_flags.get('standalone') and generator_flags.get('auto_regeneration', True)): WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files) root_makefile.write(SHARED_FOOTER) root_makefile.close()
gpl-3.0
voxlol/scikit-learn
sklearn/dummy.py
208
17370
# Author: Mathieu Blondel <mathieu@mblondel.org> # Arnaud Joly <a.joly@ulg.ac.be> # Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # License: BSD 3 clause from __future__ import division import warnings import numpy as np import scipy.sparse as sp from .base import BaseEstimator, ClassifierMixin, RegressorMixin from .utils import check_random_state from .utils.validation import check_array from .utils.validation import check_consistent_length from .utils.random import random_choice_csc from .utils.stats import _weighted_percentile from .utils.multiclass import class_distribution class DummyClassifier(BaseEstimator, ClassifierMixin): """ DummyClassifier is a classifier that makes predictions using simple rules. This classifier is useful as a simple baseline to compare with other (real) classifiers. Do not use it for real problems. Read more in the :ref:`User Guide <dummy_estimators>`. Parameters ---------- strategy : str Strategy to use to generate predictions. * "stratified": generates predictions by respecting the training set's class distribution. * "most_frequent": always predicts the most frequent label in the training set. * "prior": always predicts the class that maximizes the class prior (like "most_frequent") and ``predict_proba`` returns the class prior. * "uniform": generates predictions uniformly at random. * "constant": always predicts a constant label that is provided by the user. This is useful for metrics that evaluate a non-majority class random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use. constant : int or str or array of shape = [n_outputs] The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. Attributes ---------- classes_ : array or list of array of shape = [n_classes] Class labels for each output. n_classes_ : array or list of array of shape = [n_classes] Number of label for each output. class_prior_ : array or list of array of shape = [n_classes] Probability of each class for each output. n_outputs_ : int, Number of outputs. outputs_2d_ : bool, True if the output at fit is 2d, else false. sparse_output_ : bool, True if the array returned from predict is to be in sparse CSC format. Is automatically set to True if the input y is passed in sparse format. """ def __init__(self, strategy="stratified", random_state=None, constant=None): self.strategy = strategy self.random_state = random_state self.constant = constant def fit(self, X, y, sample_weight=None): """Fit the random classifier. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- self : object Returns self. """ if self.strategy not in ("most_frequent", "stratified", "uniform", "constant", "prior"): raise ValueError("Unknown strategy type.") if self.strategy == "uniform" and sp.issparse(y): y = y.toarray() warnings.warn('A local copy of the target data has been converted ' 'to a numpy array. Predicting on sparse target data ' 'with the uniform strategy would not save memory ' 'and would be slower.', UserWarning) self.sparse_output_ = sp.issparse(y) if not self.sparse_output_: y = np.atleast_1d(y) self.output_2d_ = y.ndim == 2 if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] if self.strategy == "constant": if self.constant is None: raise ValueError("Constant target value has to be specified " "when the constant strategy is used.") else: constant = np.reshape(np.atleast_1d(self.constant), (-1, 1)) if constant.shape[0] != self.n_outputs_: raise ValueError("Constant target value should have " "shape (%d, 1)." % self.n_outputs_) (self.classes_, self.n_classes_, self.class_prior_) = class_distribution(y, sample_weight) if (self.strategy == "constant" and any(constant[k] not in self.classes_[k] for k in range(self.n_outputs_))): # Checking in case of constant strategy if the constant # provided by the user is in y. raise ValueError("The constant target value must be " "present in training data") if self.n_outputs_ == 1 and not self.output_2d_: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self.class_prior_ = self.class_prior_[0] return self def predict(self, X): """Perform classification on test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples] or [n_samples, n_outputs] Predicted target values for X. """ if not hasattr(self, "classes_"): raise ValueError("DummyClassifier not fitted.") X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # numpy random_state expects Python int and not long as size argument # under Windows n_samples = int(X.shape[0]) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1: # Get same type even for self.n_outputs_ == 1 n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] # Compute probability only once if self.strategy == "stratified": proba = self.predict_proba(X) if self.n_outputs_ == 1: proba = [proba] if self.sparse_output_: class_prob = None if self.strategy in ("most_frequent", "prior"): classes_ = [np.array([cp.argmax()]) for cp in class_prior_] elif self.strategy == "stratified": class_prob = class_prior_ elif self.strategy == "uniform": raise ValueError("Sparse target prediction is not " "supported with the uniform strategy") elif self.strategy == "constant": classes_ = [np.array([c]) for c in constant] y = random_choice_csc(n_samples, classes_, class_prob, self.random_state) else: if self.strategy in ("most_frequent", "prior"): y = np.tile([classes_[k][class_prior_[k].argmax()] for k in range(self.n_outputs_)], [n_samples, 1]) elif self.strategy == "stratified": y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for k in range(self.n_outputs_)).T elif self.strategy == "uniform": ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)] for k in range(self.n_outputs_)] y = np.vstack(ret).T elif self.strategy == "constant": y = np.tile(self.constant, (n_samples, 1)) if self.n_outputs_ == 1 and not self.output_2d_: y = np.ravel(y) return y def predict_proba(self, X): """ Return probability estimates for the test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- P : array-like or list of array-lke of shape = [n_samples, n_classes] Returns the probability of the sample for each class in the model, where classes are ordered arithmetically, for each output. """ if not hasattr(self, "classes_"): raise ValueError("DummyClassifier not fitted.") X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) # numpy random_state expects Python int and not long as size argument # under Windows n_samples = int(X.shape[0]) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1 and not self.output_2d_: # Get same type even for self.n_outputs_ == 1 n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] P = [] for k in range(self.n_outputs_): if self.strategy == "most_frequent": ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax() out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 elif self.strategy == "prior": out = np.ones((n_samples, 1)) * class_prior_[k] elif self.strategy == "stratified": out = rs.multinomial(1, class_prior_[k], size=n_samples) elif self.strategy == "uniform": out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) out /= n_classes_[k] elif self.strategy == "constant": ind = np.where(classes_[k] == constant[k]) out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 P.append(out) if self.n_outputs_ == 1 and not self.output_2d_: P = P[0] return P def predict_log_proba(self, X): """ Return log probability estimates for the test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- P : array-like or list of array-like of shape = [n_samples, n_classes] Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: return [np.log(p) for p in proba] class DummyRegressor(BaseEstimator, RegressorMixin): """ DummyRegressor is a regressor that makes predictions using simple rules. This regressor is useful as a simple baseline to compare with other (real) regressors. Do not use it for real problems. Read more in the :ref:`User Guide <dummy_estimators>`. Parameters ---------- strategy : str Strategy to use to generate predictions. * "mean": always predicts the mean of the training set * "median": always predicts the median of the training set * "quantile": always predicts a specified quantile of the training set, provided with the quantile parameter. * "constant": always predicts a constant value that is provided by the user. constant : int or float or array of shape = [n_outputs] The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. quantile : float in [0.0, 1.0] The quantile to predict using the "quantile" strategy. A quantile of 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the maximum. Attributes ---------- constant_ : float or array of shape [n_outputs] Mean or median or quantile of the training targets or constant value given by the user. n_outputs_ : int, Number of outputs. outputs_2d_ : bool, True if the output at fit is 2d, else false. """ def __init__(self, strategy="mean", constant=None, quantile=None): self.strategy = strategy self.constant = constant self.quantile = quantile def fit(self, X, y, sample_weight=None): """Fit the random regressor. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] or [n_samples, n_outputs] Target values. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- self : object Returns self. """ if self.strategy not in ("mean", "median", "quantile", "constant"): raise ValueError("Unknown strategy type: %s, expected " "'mean', 'median', 'quantile' or 'constant'" % self.strategy) y = check_array(y, ensure_2d=False) if len(y) == 0: raise ValueError("y must not be empty.") self.output_2d_ = y.ndim == 2 if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] check_consistent_length(X, y, sample_weight) if self.strategy == "mean": self.constant_ = np.average(y, axis=0, weights=sample_weight) elif self.strategy == "median": if sample_weight is None: self.constant_ = np.median(y, axis=0) else: self.constant_ = [_weighted_percentile(y[:, k], sample_weight, percentile=50.) for k in range(self.n_outputs_)] elif self.strategy == "quantile": if self.quantile is None or not np.isscalar(self.quantile): raise ValueError("Quantile must be a scalar in the range " "[0.0, 1.0], but got %s." % self.quantile) percentile = self.quantile * 100.0 if sample_weight is None: self.constant_ = np.percentile(y, axis=0, q=percentile) else: self.constant_ = [_weighted_percentile(y[:, k], sample_weight, percentile=percentile) for k in range(self.n_outputs_)] elif self.strategy == "constant": if self.constant is None: raise TypeError("Constant target value has to be specified " "when the constant strategy is used.") self.constant = check_array(self.constant, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False, ensure_min_samples=0) if self.output_2d_ and self.constant.shape[0] != y.shape[1]: raise ValueError( "Constant target value should have " "shape (%d, 1)." % y.shape[1]) self.constant_ = self.constant self.constant_ = np.reshape(self.constant_, (1, -1)) return self def predict(self, X): """ Perform classification on test vectors X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples] or [n_samples, n_outputs] Predicted target values for X. """ if not hasattr(self, "constant_"): raise ValueError("DummyRegressor not fitted.") X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) n_samples = X.shape[0] y = np.ones((n_samples, 1)) * self.constant_ if self.n_outputs_ == 1 and not self.output_2d_: y = np.ravel(y) return y
bsd-3-clause
andreparrish/python-for-android
python3-alpha/python3-src/Lib/test/test_audioop.py
51
7803
import audioop import unittest from test.support import run_unittest def gendata1(): return b'\0\1\2' def gendata2(): if audioop.getsample(b'\0\1', 2, 0) == 1: return b'\0\0\0\1\0\2' else: return b'\0\0\1\0\2\0' def gendata4(): if audioop.getsample(b'\0\0\0\1', 4, 0) == 1: return b'\0\0\0\0\0\0\0\1\0\0\0\2' else: return b'\0\0\0\0\1\0\0\0\2\0\0\0' data = [gendata1(), gendata2(), gendata4()] INVALID_DATA = [ ('abc', 0), ('abc', 2), ('abc', 4), ] class TestAudioop(unittest.TestCase): def test_max(self): self.assertEqual(audioop.max(data[0], 1), 2) self.assertEqual(audioop.max(data[1], 2), 2) self.assertEqual(audioop.max(data[2], 4), 2) def test_minmax(self): self.assertEqual(audioop.minmax(data[0], 1), (0, 2)) self.assertEqual(audioop.minmax(data[1], 2), (0, 2)) self.assertEqual(audioop.minmax(data[2], 4), (0, 2)) def test_maxpp(self): self.assertEqual(audioop.maxpp(data[0], 1), 0) self.assertEqual(audioop.maxpp(data[1], 2), 0) self.assertEqual(audioop.maxpp(data[2], 4), 0) def test_avg(self): self.assertEqual(audioop.avg(data[0], 1), 1) self.assertEqual(audioop.avg(data[1], 2), 1) self.assertEqual(audioop.avg(data[2], 4), 1) def test_avgpp(self): self.assertEqual(audioop.avgpp(data[0], 1), 0) self.assertEqual(audioop.avgpp(data[1], 2), 0) self.assertEqual(audioop.avgpp(data[2], 4), 0) def test_rms(self): self.assertEqual(audioop.rms(data[0], 1), 1) self.assertEqual(audioop.rms(data[1], 2), 1) self.assertEqual(audioop.rms(data[2], 4), 1) def test_cross(self): self.assertEqual(audioop.cross(data[0], 1), 0) self.assertEqual(audioop.cross(data[1], 2), 0) self.assertEqual(audioop.cross(data[2], 4), 0) def test_add(self): data2 = [] for d in data: str = bytearray(len(d)) for i,b in enumerate(d): str[i] = 2*b data2.append(str) self.assertEqual(audioop.add(data[0], data[0], 1), data2[0]) self.assertEqual(audioop.add(data[1], data[1], 2), data2[1]) self.assertEqual(audioop.add(data[2], data[2], 4), data2[2]) def test_bias(self): # Note: this test assumes that avg() works d1 = audioop.bias(data[0], 1, 100) d2 = audioop.bias(data[1], 2, 100) d4 = audioop.bias(data[2], 4, 100) self.assertEqual(audioop.avg(d1, 1), 101) self.assertEqual(audioop.avg(d2, 2), 101) self.assertEqual(audioop.avg(d4, 4), 101) def test_lin2lin(self): # too simple: we test only the size for d1 in data: for d2 in data: got = len(d1)//3 wtd = len(d2)//3 self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2)) def test_adpcm2lin(self): # Very cursory test self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0\0\0\0', (0,0))) def test_lin2adpcm(self): # Very cursory test self.assertEqual(audioop.lin2adpcm(b'\0\0\0\0', 1, None), (b'\0\0', (0,0))) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(data[0], 1), b'\xd5\xc5\xf5') self.assertEqual(audioop.lin2alaw(data[1], 2), b'\xd5\xd5\xd5') self.assertEqual(audioop.lin2alaw(data[2], 4), b'\xd5\xd5\xd5') def test_alaw2lin(self): # Cursory d = audioop.lin2alaw(data[0], 1) self.assertEqual(audioop.alaw2lin(d, 1), data[0]) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(data[0], 1), b'\xff\xe7\xdb') self.assertEqual(audioop.lin2ulaw(data[1], 2), b'\xff\xff\xff') self.assertEqual(audioop.lin2ulaw(data[2], 4), b'\xff\xff\xff') def test_ulaw2lin(self): # Cursory d = audioop.lin2ulaw(data[0], 1) self.assertEqual(audioop.ulaw2lin(d, 1), data[0]) def test_mul(self): data2 = [] for d in data: str = bytearray(len(d)) for i,b in enumerate(d): str[i] = 2*b data2.append(str) self.assertEqual(audioop.mul(data[0], 1, 2), data2[0]) self.assertEqual(audioop.mul(data[1],2, 2), data2[1]) self.assertEqual(audioop.mul(data[2], 4, 2), data2[2]) def test_ratecv(self): state = None d1, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state) d2, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state) self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002') def test_reverse(self): self.assertEqual(audioop.reverse(data[0], 1), b'\2\1\0') def test_tomono(self): data2 = bytearray() for d in data[0]: data2.append(d) data2.append(d) self.assertEqual(audioop.tomono(data2, 1, 0.5, 0.5), data[0]) def test_tostereo(self): data2 = bytearray() for d in data[0]: data2.append(d) data2.append(d) self.assertEqual(audioop.tostereo(data[0], 1, 1, 1), data2) def test_findfactor(self): self.assertEqual(audioop.findfactor(data[1], data[1]), 1.0) def test_findfit(self): self.assertEqual(audioop.findfit(data[1], data[1]), (0, 1.0)) def test_findmax(self): self.assertEqual(audioop.findmax(data[1], 1), 2) def test_getsample(self): for i in range(3): self.assertEqual(audioop.getsample(data[0], 1, i), i) self.assertEqual(audioop.getsample(data[1], 2, i), i) self.assertEqual(audioop.getsample(data[2], 4, i), i) def test_negativelen(self): # from issue 3306, previously it segfaulted self.assertRaises(audioop.error, audioop.findmax, ''.join(chr(x) for x in range(256)), -2392392) def test_issue7673(self): state = None for data, size in INVALID_DATA: size2 = size self.assertRaises(audioop.error, audioop.getsample, data, size, 0) self.assertRaises(audioop.error, audioop.max, data, size) self.assertRaises(audioop.error, audioop.minmax, data, size) self.assertRaises(audioop.error, audioop.avg, data, size) self.assertRaises(audioop.error, audioop.rms, data, size) self.assertRaises(audioop.error, audioop.avgpp, data, size) self.assertRaises(audioop.error, audioop.maxpp, data, size) self.assertRaises(audioop.error, audioop.cross, data, size) self.assertRaises(audioop.error, audioop.mul, data, size, 1.0) self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5) self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5) self.assertRaises(audioop.error, audioop.add, data, data, size) self.assertRaises(audioop.error, audioop.bias, data, size, 0) self.assertRaises(audioop.error, audioop.reverse, data, size) self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2) self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state) self.assertRaises(audioop.error, audioop.lin2ulaw, data, size) self.assertRaises(audioop.error, audioop.ulaw2lin, data, size) self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.alaw2lin, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state) def test_main(): run_unittest(TestAudioop) if __name__ == '__main__': test_main()
apache-2.0
cpennington/edx-platform
openedx/core/lib/gating/api.py
1
18895
""" API for the gating djangoapp """ import json import logging from django.contrib.auth.models import User from django.urls import reverse from django.utils.translation import ugettext as _ from completion.models import BlockCompletion from lms.djangoapps.courseware.access import _has_access_to_course from lms.djangoapps.course_blocks.api import get_course_blocks from lms.djangoapps.grades.api import SubsectionGradeFactory from milestones import api as milestones_api from opaque_keys.edx.keys import UsageKey from openedx.core.lib.gating.exceptions import GatingValidationError from util import milestones_helpers from xblock.completable import XBlockCompletionMode as CompletionMode from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError log = logging.getLogger(__name__) # This is used to namespace gating-specific milestones GATING_NAMESPACE_QUALIFIER = '.gating' def _get_prerequisite_milestone(prereq_content_key): """ Get gating milestone associated with the given content usage key. Arguments: prereq_content_key (str|UsageKey): The content usage key Returns: dict: Milestone dict """ milestones = milestones_api.get_milestones("{usage_key}{qualifier}".format( usage_key=prereq_content_key, qualifier=GATING_NAMESPACE_QUALIFIER )) if not milestones: log.warning(u"Could not find gating milestone for prereq UsageKey %s", prereq_content_key) return None if len(milestones) > 1: # We should only ever have one gating milestone per UsageKey # Log a warning here and pick the first one log.warning(u"Multiple gating milestones found for prereq UsageKey %s", prereq_content_key) return milestones[0] def _validate_min_score(min_score): """ Validates the minimum score entered by the Studio user. Arguments: min_score (str|int): The minimum score to validate Returns: None Raises: GatingValidationError: If the minimum score is not valid """ if min_score: message = _(u"%(min_score)s is not a valid grade percentage") % {'min_score': min_score} try: min_score = int(min_score) except ValueError: raise GatingValidationError(message) if min_score < 0 or min_score > 100: raise GatingValidationError(message) def gating_enabled(default=None): """ Decorator that checks the enable_subsection_gating course flag to see if the subsection gating feature is active for a given course. If not, calls to the decorated function return the specified default value. Arguments: default (ANY): The value to return if the enable_subsection_gating course flag is False Returns: ANY: The specified default value if the gating feature is off, otherwise the result of the decorated function """ def wrap(f): # pylint: disable=missing-docstring def function_wrapper(course, *args): if not course.enable_subsection_gating: return default return f(course, *args) return function_wrapper return wrap def find_gating_milestones(course_key, content_key=None, relationship=None, user=None): """ Finds gating milestone dicts related to the given supplied parameters. Arguments: course_key (str|CourseKey): The course key content_key (str|UsageKey): The content usage key relationship (str): The relationship type (e.g. 'requires') user (dict): The user dict (e.g. {'id': 4}) Returns: list: A list of milestone dicts """ return [ m for m in milestones_api.get_course_content_milestones(course_key, content_key, relationship, user) if GATING_NAMESPACE_QUALIFIER in m.get('namespace') ] def get_gating_milestone(course_key, content_key, relationship): """ Gets a single gating milestone dict related to the given supplied parameters. Arguments: course_key (str|CourseKey): The course key content_key (str|UsageKey): The content usage key relationship (str): The relationship type (e.g. 'requires') Returns: dict or None: The gating milestone dict or None """ try: return find_gating_milestones(course_key, content_key, relationship)[0] except IndexError: return None def get_prerequisites(course_key): """ Find all the gating milestones associated with a course and the XBlock info associated with those gating milestones. Arguments: course_key (str|CourseKey): The course key Returns: list: A list of dicts containing the milestone and associated XBlock info """ course_content_milestones = find_gating_milestones(course_key) milestones_by_block_id = {} block_ids = [] for milestone in course_content_milestones: prereq_content_key = _get_gating_block_id(milestone) block_id = UsageKey.from_string(prereq_content_key).block_id block_ids.append(block_id) milestones_by_block_id[block_id] = milestone result = [] for block in modulestore().get_items(course_key, qualifiers={'name': block_ids}): milestone = milestones_by_block_id.get(block.location.block_id) if milestone: milestone['block_display_name'] = block.display_name milestone['block_usage_key'] = str(block.location) result.append(milestone) return result def add_prerequisite(course_key, prereq_content_key): """ Creates a new Milestone and CourseContentMilestone indicating that the given course content fulfills a prerequisite for gating Arguments: course_key (str|CourseKey): The course key prereq_content_key (str|UsageKey): The prerequisite content usage key Returns: None """ milestone = milestones_api.add_milestone( { 'name': _(u'Gating milestone for {usage_key}').format(usage_key=str(prereq_content_key)), 'namespace': "{usage_key}{qualifier}".format( usage_key=prereq_content_key, qualifier=GATING_NAMESPACE_QUALIFIER ), 'description': _('System defined milestone'), }, propagate=False ) milestones_api.add_course_content_milestone(course_key, prereq_content_key, 'fulfills', milestone) def remove_prerequisite(prereq_content_key): """ Removes the Milestone and CourseContentMilestones related to the gating prerequisite which the given course content fulfills Arguments: prereq_content_key (str|UsageKey): The prerequisite content usage key Returns: None """ milestones = milestones_api.get_milestones("{usage_key}{qualifier}".format( usage_key=prereq_content_key, qualifier=GATING_NAMESPACE_QUALIFIER )) for milestone in milestones: milestones_api.remove_milestone(milestone.get('id')) def is_prerequisite(course_key, prereq_content_key): """ Returns True if there is at least one CourseContentMilestone which the given course content fulfills Arguments: course_key (str|CourseKey): The course key prereq_content_key (str|UsageKey): The prerequisite content usage key Returns: bool: True if the course content fulfills a CourseContentMilestone, otherwise False """ return get_gating_milestone( course_key, prereq_content_key, 'fulfills' ) is not None def set_required_content(course_key, gated_content_key, prereq_content_key, min_score='', min_completion=''): """ Adds a `requires` milestone relationship for the given gated_content_key if a prerequisite prereq_content_key is provided. If prereq_content_key is None, removes the `requires` milestone relationship. Arguments: course_key (str|CourseKey): The course key gated_content_key (str|UsageKey): The gated content usage key prereq_content_key (str|UsageKey): The prerequisite content usage key min_score (str|int): The minimum score min_completion (str|int): The minimum completion percentage Returns: None """ milestone = None for gating_milestone in find_gating_milestones(course_key, gated_content_key, 'requires'): if not prereq_content_key or prereq_content_key not in gating_milestone.get('namespace'): milestones_api.remove_course_content_milestone(course_key, gated_content_key, gating_milestone) else: milestone = gating_milestone if prereq_content_key: _validate_min_score(min_score) requirements = {'min_score': min_score, 'min_completion': min_completion} if not milestone: milestone = _get_prerequisite_milestone(prereq_content_key) milestones_api.add_course_content_milestone(course_key, gated_content_key, 'requires', milestone, requirements) def get_required_content(course_key, gated_content_key): """ Returns the prerequisite content usage key, minimum score and minimum completion percentage needed for fulfillment of that prerequisite for the given gated_content_key. Args: course_key (str|CourseKey): The course key gated_content_key (str|UsageKey): The gated content usage key Returns: tuple: The prerequisite content usage key, minimum score and minimum completion percentage, (None, None, None) if the content is not gated """ milestone = get_gating_milestone(course_key, gated_content_key, 'requires') if milestone: return ( _get_gating_block_id(milestone), milestone.get('requirements', {}).get('min_score', None), milestone.get('requirements', {}).get('min_completion', None), ) else: return None, None, None @gating_enabled(default=[]) def get_gated_content(course, user): """ Returns the unfulfilled gated content usage keys in the given course. Arguments: course (CourseDescriptor): The course user (User): The user Returns: list: The list of gated content usage keys for the given course """ if _has_access_to_course(user, 'staff', course.id): return [] else: # Get the unfulfilled gating milestones for this course, for this user return [ m['content_id'] for m in find_gating_milestones( course.id, None, 'requires', {'id': user.id} ) ] def is_gate_fulfilled(course_key, gating_content_key, user_id): """ Determines if a prerequisite section specified by gating_content_key has any unfulfilled milestones. Arguments: course_key (CourseUsageLocator): Course locator gating_content_key (BlockUsageLocator): The locator for the section content user_id: The id of the user Returns: Returns True if section has no unfufilled milestones or is not a prerequisite. Returns False otherwise """ gating_milestone = get_gating_milestone(course_key, gating_content_key, "fulfills") if not gating_milestone: return True unfulfilled_milestones = [ m['content_id'] for m in find_gating_milestones( course_key, None, 'requires', {'id': user_id} ) if m['namespace'] == gating_milestone['namespace'] ] return not unfulfilled_milestones def compute_is_prereq_met(content_id, user_id, recalc_on_unmet=False): """ Returns true if the prequiste has been met for a given milestone. Will recalculate the subsection grade if specified and prereq unmet Arguments: content_id (BlockUsageLocator): BlockUsageLocator for the content user_id: The id of the user recalc_on_unmet: Recalculate the grade if prereq has not yet been met Returns: tuple: True|False, prereq_meta_info = { 'url': prereq_url|None, 'display_name': prereq_name|None} """ course_key = content_id.course_key # if unfullfilled milestones exist it means prereq has not been met unfulfilled_milestones = milestones_helpers.get_course_content_milestones( course_key, content_id, 'requires', user_id ) prereq_met = not unfulfilled_milestones prereq_meta_info = {'url': None, 'display_name': None} if prereq_met or not recalc_on_unmet: return prereq_met, prereq_meta_info milestone = unfulfilled_milestones[0] student = User.objects.get(id=user_id) store = modulestore() with store.bulk_operations(course_key): subsection_usage_key = UsageKey.from_string(_get_gating_block_id(milestone)) subsection = store.get_item(subsection_usage_key) prereq_meta_info = { 'url': reverse('jump_to', kwargs={'course_id': course_key, 'location': subsection_usage_key}), 'display_name': subsection.display_name, 'id': str(subsection_usage_key) } prereq_met = update_milestone(milestone, subsection_usage_key, milestone, student) return prereq_met, prereq_meta_info def update_milestone(milestone, usage_key, prereq_milestone, user, grade_percentage=None, completion_percentage=None): """ Updates the milestone record based on evaluation of prerequisite met. Arguments: milestone: The gated milestone being evaluated usage_key: Usage key of the prerequisite subsection prereq_milestone: The gating milestone user: The user who has fulfilled milestone grade_percentage: Grade percentage of prerequisite subsection completion_percentage: Completion percentage of prerequisite subsection Returns: True if prerequisite has been met, False if not """ min_score, min_completion = _get_minimum_required_percentage(milestone) if not grade_percentage: grade_percentage = get_subsection_grade_percentage(usage_key, user) if min_score > 0 else 0 if not completion_percentage: completion_percentage = get_subsection_completion_percentage(usage_key, user) if min_completion > 0 else 0 if grade_percentage >= min_score and completion_percentage >= min_completion: milestones_helpers.add_user_milestone({'id': user.id}, prereq_milestone) return True else: milestones_helpers.remove_user_milestone({'id': user.id}, prereq_milestone) return False def _get_gating_block_id(milestone): """ Return the block id of the gating milestone """ return milestone.get('namespace', '').replace(GATING_NAMESPACE_QUALIFIER, '') def get_subsection_grade_percentage(subsection_usage_key, user): """ Computes grade percentage for a subsection in a given course for a user Arguments: subsection_usage_key: key of subsection user: The user whose grade needs to be computed Returns: User's grade percentage for given subsection """ try: subsection_structure = get_course_blocks(user, subsection_usage_key) if any(subsection_structure): subsection_grade_factory = SubsectionGradeFactory(user, course_structure=subsection_structure) if subsection_usage_key in subsection_structure: subsection_grade = subsection_grade_factory.update(subsection_structure[subsection_usage_key]) return _get_subsection_percentage(subsection_grade) except ItemNotFoundError as err: log.warning(u"Could not find course_block for subsection=%s error=%s", subsection_usage_key, err) return 0.0 def get_subsection_completion_percentage(subsection_usage_key, user): """ Computes completion percentage for a subsection in a given course for a user Arguments: subsection_usage_key: key of subsection user: The user whose completion percentage needs to be computed Returns: User's completion percentage for given subsection """ subsection_completion_percentage = 0.0 try: subsection_structure = get_course_blocks(user, subsection_usage_key) if any(subsection_structure): completable_blocks = [] for block in subsection_structure: completion_mode = subsection_structure.get_xblock_field( block, 'completion_mode' ) # always exclude html blocks (in addition to EXCLUDED blocks) for gating calculations # See https://openedx.atlassian.net/browse/WL-1798 if completion_mode not in (CompletionMode.AGGREGATOR, CompletionMode.EXCLUDED) \ and not block.block_type == 'html': completable_blocks.append(block) if not completable_blocks: return 100 subsection_completion_total = 0 course_key = subsection_usage_key.course_key course_block_completions = BlockCompletion.get_learning_context_completions(user, course_key) for block in completable_blocks: if course_block_completions.get(block): subsection_completion_total += course_block_completions.get(block) subsection_completion_percentage = min( 100 * (subsection_completion_total / float(len(completable_blocks))), 100 ) except ItemNotFoundError as err: log.warning(u"Could not find course_block for subsection=%s error=%s", subsection_usage_key, err) return subsection_completion_percentage def _get_minimum_required_percentage(milestone): """ Returns the minimum score and minimum completion percentage requirement for the given milestone. """ # Default minimum score and minimum completion percentage to 100 min_score = 100 min_completion = 100 requirements = milestone.get('requirements') if requirements: try: min_score = int(requirements.get('min_score')) except (ValueError, TypeError): log.warning( u'Gating: Failed to find minimum score for gating milestone %s, defaulting to 100', json.dumps(milestone) ) try: min_completion = int(requirements.get('min_completion', 0)) except (ValueError, TypeError): log.warning( u'Gating: Failed to find minimum completion percentage for gating milestone %s, defaulting to 100', json.dumps(milestone) ) return min_score, min_completion def _get_subsection_percentage(subsection_grade): """ Returns the percentage value of the given subsection_grade. """ return subsection_grade.percent_graded * 100.0
agpl-3.0
Mozta/pagina-diagnostijuego
venv/lib/python2.7/site-packages/django/db/models/manager.py
108
7232
import copy import inspect from importlib import import_module from django.db import router from django.db.models.query import QuerySet from django.utils import six from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class BaseManager(object): # Tracks each time a Manager instance is created. Used to retain order. creation_counter = 0 # Set to True for the 'objects' managers that are automatically created. auto_created = False #: If set to True the manager will be serialized into migrations and will #: thus be available in e.g. RunPython operations use_in_migrations = False def __new__(cls, *args, **kwargs): # We capture the arguments to make returning them trivial obj = super(BaseManager, cls).__new__(cls) obj._constructor_args = (args, kwargs) return obj def __init__(self): super(BaseManager, self).__init__() self._set_creation_counter() self.model = None self.name = None self._db = None self._hints = {} def __str__(self): """ Return "app_label.model_label.manager_name". """ return '%s.%s' % (self.model._meta.label, self.name) def deconstruct(self): """ Returns a 5-tuple of the form (as_manager (True), manager_class, queryset_class, args, kwargs). Raises a ValueError if the manager is dynamically generated. """ qs_class = self._queryset_class if getattr(self, '_built_with_as_manager', False): # using MyQuerySet.as_manager() return ( True, # as_manager None, # manager_class '%s.%s' % (qs_class.__module__, qs_class.__name__), # qs_class None, # args None, # kwargs ) else: module_name = self.__module__ name = self.__class__.__name__ # Make sure it's actually there and not an inner class module = import_module(module_name) if not hasattr(module, name): raise ValueError( "Could not find manager %s in %s.\n" "Please note that you need to inherit from managers you " "dynamically generated with 'from_queryset()'." % (name, module_name) ) return ( False, # as_manager '%s.%s' % (module_name, name), # manager_class None, # qs_class self._constructor_args[0], # args self._constructor_args[1], # kwargs ) def check(self, **kwargs): return [] @classmethod def _get_queryset_methods(cls, queryset_class): def create_method(name, method): def manager_method(self, *args, **kwargs): return getattr(self.get_queryset(), name)(*args, **kwargs) manager_method.__name__ = method.__name__ manager_method.__doc__ = method.__doc__ return manager_method new_methods = {} # Refs http://bugs.python.org/issue1785. predicate = inspect.isfunction if six.PY3 else inspect.ismethod for name, method in inspect.getmembers(queryset_class, predicate=predicate): # Only copy missing methods. if hasattr(cls, name): continue # Only copy public methods or methods with the attribute `queryset_only=False`. queryset_only = getattr(method, 'queryset_only', None) if queryset_only or (queryset_only is None and name.startswith('_')): continue # Copy the method onto the manager. new_methods[name] = create_method(name, method) return new_methods @classmethod def from_queryset(cls, queryset_class, class_name=None): if class_name is None: class_name = '%sFrom%s' % (cls.__name__, queryset_class.__name__) class_dict = { '_queryset_class': queryset_class, } class_dict.update(cls._get_queryset_methods(queryset_class)) return type(class_name, (cls,), class_dict) def contribute_to_class(self, model, name): if not self.name: self.name = name self.model = model setattr(model, name, ManagerDescriptor(self)) model._meta.add_manager(self) def _set_creation_counter(self): """ Sets the creation counter value for this instance and increments the class-level copy. """ self.creation_counter = BaseManager.creation_counter BaseManager.creation_counter += 1 def db_manager(self, using=None, hints=None): obj = copy.copy(self) obj._db = using or self._db obj._hints = hints or self._hints return obj @property def db(self): return self._db or router.db_for_read(self.model, **self._hints) ####################### # PROXIES TO QUERYSET # ####################### def get_queryset(self): """ Returns a new QuerySet object. Subclasses can override this method to easily customize the behavior of the Manager. """ return self._queryset_class(model=self.model, using=self._db, hints=self._hints) def all(self): # We can't proxy this method through the `QuerySet` like we do for the # rest of the `QuerySet` methods. This is because `QuerySet.all()` # works by creating a "copy" of the current queryset and in making said # copy, all the cached `prefetch_related` lookups are lost. See the # implementation of `RelatedManager.get_queryset()` for a better # understanding of how this comes into play. return self.get_queryset() def __eq__(self, other): return ( isinstance(other, self.__class__) and self._constructor_args == other._constructor_args ) def __ne__(self, other): return not (self == other) def __hash__(self): return id(self) class Manager(BaseManager.from_queryset(QuerySet)): pass class ManagerDescriptor(object): def __init__(self, manager): self.manager = manager def __get__(self, instance, cls=None): if instance is not None: raise AttributeError("Manager isn't accessible via %s instances" % cls.__name__) if cls._meta.abstract: raise AttributeError("Manager isn't available; %s is abstract" % ( cls._meta.object_name, )) if cls._meta.swapped: raise AttributeError( "Manager isn't available; '%s.%s' has been swapped for '%s'" % ( cls._meta.app_label, cls._meta.object_name, cls._meta.swapped, ) ) return cls._meta.managers_map[self.manager.name] class EmptyManager(Manager): def __init__(self, model): super(EmptyManager, self).__init__() self.model = model def get_queryset(self): return super(EmptyManager, self).get_queryset().none()
gpl-3.0
sotdjin/glibglab
venv/lib/python2.7/site-packages/six.py
2715
30098
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2015 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): if from_value is None: raise value raise value from from_value """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
mit
TeamTwisted/external_chromium_org
remoting/tools/runclient.py
182
1717
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Gets the chromoting host info from an input arg and then tries to find the authentication info in the .chromotingAuthToken file so that the host authentication arguments can be automatically set. """ import os import platform import sys def main(): auth_filepath = os.path.join(os.path.expanduser('~'), '.chromotingAuthToken') script_path = os.path.dirname(__file__) if platform.system() == "Windows": # TODO(garykac): Make this work on Windows. print 'Not yet supported on Windows.' return 1 elif platform.system() == "Darwin": # Darwin == MacOSX client_path = '../../xcodebuild/Debug/chromoting_simple_client' else: client_path = '../../out/Debug/chromoting_x11_client' client_path = os.path.join(script_path, client_path) # Read username and auth token from token file. auth = open(auth_filepath) authinfo = auth.readlines() username = authinfo[0].rstrip() authtoken = authinfo[1].rstrip() # Request final 8 characters of Host JID from user. # This assumes that the host is published under the same username as the # client attempting to connect. print 'Host JID:', username + '/chromoting', hostjid_suffix = raw_input() hostjid = username + '/chromoting' + hostjid_suffix.upper() command = [] command.append(client_path) command.append('--host_jid ' + hostjid) command.append('--jid ' + username) command.append('--token ' + authtoken) # Launch the client os.system(' '.join(command)) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
mrbox/django
tests/aggregation/tests.py
17
45691
from __future__ import unicode_literals import datetime import re from decimal import Decimal from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( F, Avg, Count, DecimalField, DurationField, FloatField, Func, IntegerField, Max, Min, Sum, Value, ) from django.test import TestCase from django.test.utils import Approximate, CaptureQueriesContext from django.utils import six, timezone from .models import Author, Book, Publisher, Store class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2)) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.all().aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( 'Using an aggregate in order_by() without also including it in ' 'annotate() is not allowed: Avg(F(book__rating)' ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values('age').order_by(Avg('book__rating')) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(len(vals), 1) self.assertEqual(vals["age__sum"], 254) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(len(vals), 1) self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(len(vals), 1) self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2) vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(len(vals), 1) self.assertEqual(vals["book__rating__avg"], 4.0) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(len(vals), 1) self.assertEqual(vals["publisher__num_awards__sum"], 30) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(len(vals), 1) self.assertEqual(vals["book__price__sum"], Decimal("270.27")) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(len(vals), 1) self.assertEqual(vals["books__authors__age__max"], 57) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(len(vals), 1) self.assertEqual(vals["book__publisher__num_awards__min"], 1) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating")) self.assertEqual(len(vals), 1) self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by('pk'), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp" ], lambda b: b.name ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, 'The Definitive Guide to Django: Web Development Done Right' ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = Book.objects.annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = Book.objects.select_related('contact').annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (1, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name) ) def test_annotate_m2m(self): books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 51.5), ('Practical Django Projects', 29.0), ('Python Web Development with Django', Approximate(30.3, places=1)), ('Sams Teach Yourself Django in 24 Hours', 45.0) ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ], lambda b: (b.name, b.num_authors) ) def test_backwards_m2m_annotate(self): authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 4.5), ('Brad Dayley', 3.0), ('Jacob Kaplan-Moss', 4.5), ('James Bennett', 4.0), ('Paul Bissex', 4.0), ('Stuart Russell', 4.0) ], lambda a: (a.name, a.book__rating__avg) ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 1), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 1), ('Peter Norvig', 2), ('Stuart Russell', 1), ('Wesley J. Chun', 1) ], lambda a: (a.name, a.num_books) ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 7), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9), ('Practical Django Projects', 3), ('Python Web Development with Django', 7), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 3) ], lambda b: (b.name, b.publisher__num_awards__sum) ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ('Apress', Decimal("59.69")), ("Jonno's House of Books", None), ('Morgan Kaufmann', Decimal("75.00")), ('Prentice Hall', Decimal("112.49")), ('Sams', Decimal("23.09")) ], lambda p: (p.name, p.book__price__sum) ) def test_annotate_values(self): books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values()) self.assertEqual( books, [ { "contact_id": 1, "id": 1, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": 1, "rating": 4.5, } ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg('authors__age')) .values('pk', 'isbn', 'mean_age') ) self.assertEqual( list(books), [ { "pk": 1, "isbn": "159059725", "mean_age": 34.5, } ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name") self.assertEqual( list(books), [ { "name": "The Definitive Guide to Django: Web Development Done Right" } ] ) books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age')) self.assertEqual( list(books), [ { "contact_id": 1, "id": 1, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": 1, "rating": 4.5, } ] ) books = ( Book.objects .values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1) }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, } ] ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertEqual(len(authors), 9) self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 32.0), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 29.5), ('James Bennett', 34.0), ('Jeffrey Forcier', 27.0), ('Paul Bissex', 31.0), ('Peter Norvig', 46.0), ('Stuart Russell', 57.0), ('Wesley J. Chun', Approximate(33.66, places=1)) ], lambda a: (a.name, a.friends__age__avg) ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) vals = Book.objects.aggregate(Count("rating", distinct=True)) self.assertEqual(vals, {"rating__count": 4}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]['sql'] self.assertIn('SELECT COUNT(*) ', sql) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 2}, ] ) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 1}, {'rating': 4.0, 'count': 2}, ] ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count('book__id'))) implicit = list(Author.objects.annotate(Count('book'))) self.assertEqual(explicit, implicit) def test_annotate_ordering(self): books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating') self.assertEqual( list(books), [ { "rating": 4.5, "oldest": 35, }, { "rating": 3.0, "oldest": 45 }, { "rating": 4.0, "oldest": 57, }, { "rating": 5.0, "oldest": 57, } ] ) books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating") self.assertEqual( list(books), [ { "rating": 5.0, "oldest": 57, }, { "rating": 4.0, "oldest": 57, }, { "rating": 3.0, "oldest": 45, }, { "rating": 4.5, "oldest": 35, } ] ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors")) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Avg('duration', output_field=DurationField())), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum('duration', output_field=DurationField())), {'duration__sum': datetime.timedelta(days=3)} ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[5, 6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum('age')) self.assertEqual(age_sum['age__sum'], 103) def test_filtering(self): p = Publisher.objects.create(name='Expensive Publisher', num_awards=0) Book.objects.create( name='ExpensiveBook1', pages=1, isbn='111', rating=3.5, price=Decimal("1000"), publisher=p, contact_id=1, pubdate=datetime.date(2008, 12, 1) ) Book.objects.create( name='ExpensiveBook2', pages=1, isbn='222', rating=4.0, price=Decimal("1000"), publisher=p, contact_id=1, pubdate=datetime.date(2008, 12, 2) ) Book.objects.create( name='ExpensiveBook3', pages=1, isbn='333', rating=4.5, price=Decimal("35"), publisher=p, contact_id=1, pubdate=datetime.date(2008, 12, 3) ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name ) publishers = ( Publisher.objects .annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, [ "Apress", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name, ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual( publishers, [ "Apress", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Sams", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk") self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) authors = ( Author.objects .annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual( authors, [ "Brad Dayley", ], lambda a: a.name ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Prentice Hall", ], lambda p: p.name ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual( publishers, [ "Apress", ], lambda p: p.name ) books = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) ) self.assertQuerysetEqual( books, [ "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains='Norvig') b = Book.objects.get(name__contains='Done Right') b.authors.add(a) b.save() vals = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ).exclude(earliest_book=None).order_by("earliest_book").values( 'earliest_book', 'num_awards', 'id', 'name', ) self.assertEqual( list(publishers), [ { 'earliest_book': datetime.date(1991, 10, 15), 'num_awards': 9, 'id': 4, 'name': 'Morgan Kaufmann' }, { 'earliest_book': datetime.date(1995, 1, 15), 'num_awards': 7, 'id': 3, 'name': 'Prentice Hall' }, { 'earliest_book': datetime.date(2007, 12, 6), 'num_awards': 3, 'id': 1, 'name': 'Apress' }, { 'earliest_book': datetime.date(2008, 3, 3), 'num_awards': 1, 'id': 2, 'name': 'Sams' } ] ) vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening")) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), } ) def test_annotate_values_list(self): books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual( list(books), [ (1, "159059725", 34.5), ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn") self.assertEqual( list(books), [ ('159059725',) ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age") self.assertEqual( list(books), [ (34.5,) ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price") self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal('23.09'), 1), (Decimal('30'), 1), (Decimal('75'), 1), (Decimal('82.8'), 1), ] ) def test_dates_with_aggregation(self): """ Test that .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year') self.assertQuerysetEqual( dates, [ "datetime.date(1991, 1, 1)", "datetime.date(1995, 1, 1)", "datetime.date(2007, 1, 1)", "datetime.date(2008, 1, 1)" ] ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating')) self.assertEqual(max_rating['max_rating'], 5) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3}) def test_ticket17424(self): """ Check that doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk')) annotated_books = Book.objects.order_by('pk').annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Check that aggregation over sliced queryset works correctly. """ qs = Book.objects.all().order_by('-rating')[0:3] vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating'] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = Book.objects.all().select_for_update().order_by( 'pk').select_related('publisher').annotate(max_pk=Max('pk')) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg('max_pk')) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'].lower() self.assertNotIn('for update', qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r'order by (\w+)', qstr), [', '.join(f[1][0] for f in forced_ordering).lower()] ) else: self.assertNotIn('order by', qstr) self.assertEqual(qstr.count(' join '), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate) book = Book.objects.aggregate(price_sum=Sum('price')) self.assertEqual(book['price_sum'], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with six.assertRaisesRegex(self, TypeError, 'fail is not an aggregate expression'): Book.objects.aggregate(fail=F('price')) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_missing_output_field_raises_error(self): with six.assertRaisesRegex(self, FieldError, 'Cannot resolve expression type, unknown output_field'): Book.objects.annotate(val=Max(2)).first() def test_annotation_expressions(self): authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name') authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name') for qs in (authors, authors2): self.assertEqual(len(qs), 9) self.assertQuerysetEqual( qs, [ ('Adrian Holovaty', 132), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 129), ('James Bennett', 63), ('Jeffrey Forcier', 128), ('Paul Bissex', 120), ('Peter Norvig', 103), ('Stuart Russell', 103), ('Wesley J. Chun', 176) ], lambda a: (a.name, a.combined_ages) ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*')) a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age')) a3 = Author.objects.aggregate(av_age=Avg('age')) self.assertEqual(a1, {'av_age': 37}) self.assertEqual(a2, {'av_age': 37}) self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price'] self.assertIsInstance(v, float) self.assertEqual(v, Approximate(47.39, places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3) self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3) self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)}) def test_combine_different_types(self): with six.assertRaisesRegex(self, FieldError, 'Expression contains mixed types. You must set output_field'): Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk) b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=IntegerField())).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=FloatField())).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=DecimalField())).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with six.assertRaisesRegex(self, TypeError, 'Complex annotations require an alias'): Author.objects.annotate(Sum(F('age') + F('friends__age'))) with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum('age') / Count('age')) with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate( combined_ages=Sum(F('age') + F('friends__age'))) age = qs.aggregate(max_combined_age=Max('combined_ages')) self.assertEqual(age['max_combined_age'], 176) age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age=Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age'], 954) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age_doubled'], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['another_age'], 68) qs = qs.annotate(friend_count=Count('friends')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['friend_count'], 2) qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter( name="Adrian Holovaty").order_by('-combined_age') self.assertEqual( list(qs), [ { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 69 }, { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 63 } ] ) vals = qs.values('name', 'combined_age') self.assertEqual( list(vals), [ { "name": 'Adrian Holovaty', "combined_age": 69 }, { "name": 'Adrian Holovaty', "combined_age": 63 } ] ) def test_annotate_values_aggregate(self): alias_age = Author.objects.annotate( age_alias=F('age') ).values( 'age_alias', ).aggregate(sum_age=Sum('age_alias')) age = Author.objects.values('age').aggregate(sum_age=Sum('age')) self.assertEqual(alias_age['sum_age'], age['sum_age']) def test_annotate_over_annotate(self): author = Author.objects.annotate( age_alias=F('age') ).annotate( sum_age=Sum('age_alias') ).get(name="Adrian Holovaty") other_author = Author.objects.annotate( sum_age=Sum('age') ).get(name="Adrian Holovaty") self.assertEqual(author.sum_age, other_author.sum_age) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(Sum('id__max')) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super(MyMax, self).as_sql(compiler, connection) with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price')) def test_multi_arg_aggregate(self): class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super(MyMax, self).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Book.objects.aggregate(MyMax('pages', 'price')) with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Book.objects.annotate(MyMax('pages', 'price')) Book.objects.aggregate(max_field=MyMax('pages', 'price')) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = dict(function=self.function.lower(), expressions=sql) substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, 'as_' + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra['function'] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, 'as_' + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = dict(function='MAX', expressions='2') substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, 'as_' + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('MAX('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values('rating').aggregate( double_max_rating=Max('rating') + Max('rating')) self.assertEqual(max_rating['double_max_rating'], 5 * 2) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') + 5 ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3 + 5}) def test_expression_on_aggregation(self): # Create a plain expression class Greatest(Func): function = 'GREATEST' def as_sqlite(self, compiler, connection): return super(Greatest, self).as_sql(compiler, connection, function='MAX') qs = Publisher.objects.annotate( price_or_median=Greatest(Avg('book__rating'), Avg('book__price')) ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = Publisher.objects.annotate( rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'), output_field=FloatField()) ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs2, [1, 3], lambda v: v.num_awards)
bsd-3-clause
AOSPU/external_chromium_org
tools/perf/page_sets/gmail_expand_collapse_conversation.py
9
2149
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry.page.actions.all_page_actions import * from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class GmailExpandCollapseConversationPage( page_module.Page): """ Why: Expand and Collapse a long conversation. """ # TODO(edmundyan): Find a long conversation rather than hardcode url def __init__(self, page_set): super(GmailExpandCollapseConversationPage, self).__init__( url='https://mail.google.com/mail/u/0/#inbox/13c6a141fa95ffe0', page_set=page_set, name='gmail_expand_collapse_conversation') self.credentials_path = 'data/credentials.json' self.credentials = 'google' self.user_agent_type = 'desktop' self.archive_data_file = 'data/gmail_expand_collapse_conversation.json' def RunNavigateSteps(self, action_runner): action_runner.NavigateToPage(self) action_runner.WaitForElement('img[alt="Expand all"]') action_runner.ClickElement('img[alt="Expand all"]') action_runner.Wait(5) action_runner.WaitForElement('img[alt="Collapse all"]') action_runner.ClickElement('img[alt="Collapse all"]') action_runner.Wait(1) def RunEndure(self, action_runner): action_runner.WaitForElement('img[alt="Expand all"]') action_runner.ClickElement('img[alt="Expand all"]') action_runner.Wait(1) action_runner.WaitForElement('img[alt="Collapse all"]') action_runner.ClickElement('img[alt="Collapse all"]') action_runner.Wait(1) class GmailExpandCollapseConversationPageSet(page_set_module.PageSet): """ Description: Chrome Endure test for GMail. """ def __init__(self): super(GmailExpandCollapseConversationPageSet, self).__init__( credentials_path='data/credentials.json', user_agent_type='desktop', archive_data_file='data/gmail_expand_collapse_conversation.json', bucket=page_set_module.PUBLIC_BUCKET) self.AddPage(GmailExpandCollapseConversationPage(self))
bsd-3-clause
ayarshabeer/wagtail
wagtail/wagtailsearch/backends/__init__.py
21
2746
# Backend loading # Based on the Django cache framework # https://github.com/django/django/blob/5d263dee304fdaf95e18d2f0619d6925984a7f02/django/core/cache/__init__.py import sys import six from importlib import import_module from django.utils.module_loading import import_string from django.core.exceptions import ImproperlyConfigured from django.conf import settings class InvalidSearchBackendError(ImproperlyConfigured): pass def import_backend(dotted_path): """ Theres two formats for the dotted_path. One with the backend class (old) and one without (new) eg: old: wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch new: wagtail.wagtailsearch.backends.elasticsearch If a new style dotted path was specified, this function would look for a backend class from the "SearchBackend" attribute. """ try: # New backend_module = import_module(dotted_path) return backend_module.SearchBackend except ImportError as e: try: # Old return import_string(dotted_path) except ImportError: six.reraise(ImportError, e, sys.exc_info()[2]) def get_search_backend(backend='default', **kwargs): # Get configuration default_conf = { 'default': { 'BACKEND': 'wagtail.wagtailsearch.backends.db', }, } WAGTAILSEARCH_BACKENDS = getattr( settings, 'WAGTAILSEARCH_BACKENDS', default_conf) # Try to find the backend try: # Try to get the WAGTAILSEARCH_BACKENDS entry for the given backend name first conf = WAGTAILSEARCH_BACKENDS[backend] except KeyError: try: # Trying to import the given backend, in case it's a dotted path import_backend(backend) except ImportError as e: raise InvalidSearchBackendError("Could not find backend '%s': %s" % ( backend, e)) params = kwargs else: # Backend is a conf entry params = conf.copy() params.update(kwargs) backend = params.pop('BACKEND') # Try to import the backend try: backend_cls = import_backend(backend) except ImportError as e: raise InvalidSearchBackendError("Could not find backend '%s': %s" % ( backend, e)) # Create backend return backend_cls(params) def get_search_backends(with_auto_update=False): if hasattr(settings, 'WAGTAILSEARCH_BACKENDS'): for backend, params in settings.WAGTAILSEARCH_BACKENDS.items(): if with_auto_update and params.get('AUTO_UPDATE', True) is False: continue yield get_search_backend(backend) else: yield get_search_backend('default')
bsd-3-clause
tejasapatil/spark
examples/src/main/python/ml/feature_hasher_example.py
67
1560
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from pyspark.sql import SparkSession # $example on$ from pyspark.ml.feature import FeatureHasher # $example off$ if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("FeatureHasherExample")\ .getOrCreate() # $example on$ dataset = spark.createDataFrame([ (2.2, True, "1", "foo"), (3.3, False, "2", "bar"), (4.4, False, "3", "baz"), (5.5, False, "4", "foo") ], ["real", "bool", "stringNum", "string"]) hasher = FeatureHasher(inputCols=["real", "bool", "stringNum", "string"], outputCol="features") featurized = hasher.transform(dataset) featurized.show(truncate=False) # $example off$ spark.stop()
apache-2.0
JazzeYoung/VeryDeepAutoEncoder
theano/sandbox/cuda/blas.py
1
107221
from __future__ import absolute_import, print_function, division import os import logging from six import integer_types from six.moves import StringIO, reduce import theano from theano import Apply from theano import tensor from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda import GpuOp from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, gpu_contiguous) from theano.tensor import as_tensor_variable _logger = logging.getLogger(__name__) class GpuBatchedDot(GpuOp): __props__ = ("stream_threshold",) def __init__(self, stream_threshold=650): self.stream_threshold = stream_threshold def make_node(self, inp1, inp2): inp1 = gpu_contiguous(as_cuda_ndarray_variable(inp1)) inp2 = gpu_contiguous(as_cuda_ndarray_variable(inp2)) assert inp1.dtype == "float32" assert inp2.dtype == "float32" assert inp1.ndim == 3 # (batch, a, b) assert inp2.ndim == 3 return theano.Apply(self, [inp1, inp2], [self.output_type(inp1, inp2)()]) def output_type(self, inp1, inp2): return CudaNdarrayType( (inp1.type.broadcastable[0] or inp2.type.broadcastable[0], inp1.type.broadcastable[1], inp2.type.broadcastable[2])) def c_code(self, node, name, input_names, output_names, sub): bx, by = input_names bz, = output_names fail = sub['fail'] threshold = self.stream_threshold return (""" float alpha = 1.0, beta = 0.0; const int* Nx = CudaNdarray_HOST_DIMS(%(bx)s); const int* Ny = CudaNdarray_HOST_DIMS(%(by)s); int Nz[3] = {0}; // use parallel cublasSgemm calls rather than cublasSgemmBatched for large products // (compute products in double because they can be large and we don't need to be exact) bool use_cublas_sgemm_batched = ( double(Nx[1]) * double(Nx[2]) * double(Ny[2]) < double(%(threshold)s) * double(%(threshold)s) * double(%(threshold)s)); if (Nx[0] != Ny[0]) { PyErr_Format(PyExc_RuntimeError, "The batchsizes (%%d, %%d) don't match.\\n", Nx[0], Ny[0]); %(fail)s; } if (Nx[2] != Ny[1]) { PyErr_Format(PyExc_RuntimeError, "Shape mismatch. (%%d, %%d, %%d) (%%d, %%d, %%d)\\n", Nx[0], Nx[1], Nx[2], Ny[0], Ny[1], Ny[2]); %(fail)s; } Nz[0] = Nx[0]; Nz[1] = Nx[1]; Nz[2] = Ny[2]; if ( !(%(bz)s && %(bz)s->nd==3 && CudaNdarray_is_c_contiguous(%(bz)s) && CudaNdarray_HOST_DIMS(%(bz)s)[0] == Nz[0] && CudaNdarray_HOST_DIMS(%(bz)s)[1] == Nz[1] && CudaNdarray_HOST_DIMS(%(bz)s)[2] == Nz[2])) { Py_XDECREF(%(bz)s); %(bz)s = (CudaNdarray*)CudaNdarray_NewDims(3, Nz); if (NULL == %(bz)s) { PyErr_Format(PyExc_RuntimeError, "Failed to allocate output of %%d x %%d x %%d", Nz[0], Nz[1], Nz[2]); %(fail)s; } } if (Nx[0] == 0 || Nx[1] == 0 || Nx[2] == 0 || Ny[0] == 0 || Ny[1] == 0 || Ny[2] == 0) { const int total_size = Nz[0] * Nz[1] * Nz[2] * sizeof(float); if (cudaSuccess != cudaMemset(CudaNdarray_DEV_DATA(%(bz)s), 0, total_size)) { PyErr_Format(PyExc_RuntimeError, "Failed to fill output with zeros"); %(fail)s; } } else if (use_cublas_sgemm_batched) { cublasStatus_t err; cudaError_t err1; float **host_x = NULL; float **host_z = NULL; float **host_y = NULL; float **gpu_x = NULL; float **gpu_y = NULL; float **gpu_z = NULL; const int ptr_array_size = 3 * Nx[0] * sizeof(float *); const int x_stride = CudaNdarray_HOST_STRIDES(%(bx)s)[0]; const int y_stride = CudaNdarray_HOST_STRIDES(%(by)s)[0]; const int z_stride = CudaNdarray_HOST_STRIDES(%(bz)s)[0]; host_x = (float **) malloc (ptr_array_size); if (host_x == NULL) { CLEANUP(); PyErr_Format(PyExc_RuntimeError, "%%s", "malloc failure"); %(fail)s; } host_y = &host_x[Nx[0]]; host_z = &host_y[Nx[0]]; host_x[0] = CudaNdarray_DEV_DATA(%(bx)s); host_y[0] = CudaNdarray_DEV_DATA(%(by)s); host_z[0] = CudaNdarray_DEV_DATA(%(bz)s); for (int i = 1; i < Nz[0]; i++) { host_x[i] = host_x[i - 1] + x_stride; host_y[i] = host_y[i - 1] + y_stride; host_z[i] = host_z[i - 1] + z_stride; } gpu_x = (float **) device_malloc(ptr_array_size); if (gpu_x == NULL){ %(fail)s; } gpu_y = &gpu_x[Nx[0]]; gpu_z = &gpu_y[Nx[0]]; err1 = cudaMemcpy(gpu_x, host_x, ptr_array_size, cudaMemcpyHostToDevice); if (err1 != cudaSuccess) { CLEANUP(); PyErr_Format(PyExc_RuntimeError, "%%s", "cudaMemcpy failure"); %(fail)s; } err = cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, Ny[2], Nx[1], Nx[2], &alpha, (const float **) gpu_y, Ny[2], (const float **) gpu_x, Nx[2], &beta, gpu_z, Ny[2], Nx[0]); CNDA_THREAD_SYNC; CLEANUP(); if (CUBLAS_STATUS_SUCCESS != err) { PyErr_Format(PyExc_RuntimeError, "cublasSgemmBatched failed (%%i) %%s", err, cublasGetErrorString(err)); %(fail)s; } } else { // copy inputs if not contiguous """ + ("\n".join(""" if (( CudaNdarray_HOST_DIMS(%(var)s)[0] > 1 && CudaNdarray_HOST_STRIDES(%(var)s)[0] != 1 && CudaNdarray_HOST_DIMS(%(var)s)[1] > 1 && CudaNdarray_HOST_STRIDES(%(var)s)[1] != 1 && CudaNdarray_HOST_DIMS(%(var)s)[2] > 1 && CudaNdarray_HOST_STRIDES(%(var)s)[2] != 1) || CudaNdarray_HOST_STRIDES(%(var)s)[0] < 0 || CudaNdarray_HOST_STRIDES(%(var)s)[1] < 0 || CudaNdarray_HOST_STRIDES(%(var)s)[2] < 0) { CudaNdarray *_copy = (CudaNdarray*) CudaNdarray_Copy(%(var)s); if (!_copy) %(fail)s; Py_XDECREF(%(var)s); %(var)s = _copy; } """ % dict(var=var, fail=fail) for var in (bx, by))) + """ // fail if the output is not contiguous; we can't copy it because we // need to write to the original memory if (( CudaNdarray_HOST_DIMS(%(bz)s)[0] > 1 && CudaNdarray_HOST_STRIDES(%(bz)s)[0] != 1 && CudaNdarray_HOST_DIMS(%(bz)s)[1] > 1 && CudaNdarray_HOST_STRIDES(%(bz)s)[1] != 1 && CudaNdarray_HOST_DIMS(%(bz)s)[2] > 1 && CudaNdarray_HOST_STRIDES(%(bz)s)[2] != 1) || CudaNdarray_HOST_STRIDES(%(bz)s)[0] < 0 || CudaNdarray_HOST_STRIDES(%(bz)s)[1] < 0 || CudaNdarray_HOST_STRIDES(%(bz)s)[2] < 0) { PyErr_Format(PyExc_AssertionError, "non-unit or negative stride in output arg %(bz)s (%%i, %%i, %%i) of shape (%%i, %%i, %%i)", CudaNdarray_HOST_STRIDES(%(bz)s)[0], CudaNdarray_HOST_STRIDES(%(bz)s)[1], CudaNdarray_HOST_STRIDES(%(bz)s)[2], CudaNdarray_HOST_DIMS(%(bz)s)[0], CudaNdarray_HOST_DIMS(%(bz)s)[1], CudaNdarray_HOST_DIMS(%(bz)s)[2]); %(fail)s; } const int* Sx = CudaNdarray_HOST_STRIDES(%(bx)s); const int* Sy = CudaNdarray_HOST_STRIDES(%(by)s); const int* Sz = CudaNdarray_HOST_STRIDES(%(bz)s); /* encode the stride structure of _x,_y,_z into a single integer. */ int unit = 0; unit |= ((Sx[2] == 1 || Nx[2] == 1) ? 0x0 : (Sx[1] == 1 || Nx[1] == 1) ? 0x1 : 0x2) << 8; unit |= ((Sy[2] == 1 || Ny[2] == 1) ? 0x0 : (Sy[1] == 1 || Ny[1] == 1) ? 0x1 : 0x2) << 4; unit |= ((Sz[2] == 1 || Nz[2] == 1) ? 0x0 : (Sz[1] == 1 || Nz[1] == 1) ? 0x1 : 0x2) << 0; /* create appropriate strides for malformed matrices that are row or column * vectors, or empty matrices. * In that case, the value of the stride does not really matter, but * some versions of BLAS insist that: * - they are not smaller than the number of elements in the array, * - they are not 0. */ int sx_1 = (Nx[1] > 1) ? Sx[1] : (Nx[2] + 1); int sx_2 = (Nx[2] > 1) ? Sx[2] : (Nx[1] + 1); int sy_1 = (Ny[1] > 1) ? Sy[1] : (Ny[2] + 1); int sy_2 = (Ny[2] > 1) ? Sy[2] : (Ny[1] + 1); int sz_1 = (Nz[1] > 1) ? Sz[1] : (Nz[2] + 1); int sz_2 = (Nz[2] > 1) ? Sz[2] : (Nz[1] + 1); cublasOperation_t N = CUBLAS_OP_N, T = CUBLAS_OP_T; float* x = CudaNdarray_DEV_DATA(%(bx)s); float* y = CudaNdarray_DEV_DATA(%(by)s); float* z = CudaNdarray_DEV_DATA(%(bz)s); float* xend = x + CudaNdarray_SIZE(%(bx)s); float* yend = y + CudaNdarray_SIZE(%(by)s); float* zend = z + CudaNdarray_SIZE(%(bz)s); #define N_STREAMS 32 cudaStream_t streams[N_STREAMS]; for (int i = 0; i < N_STREAMS; i++) { cudaStreamCreate(&streams[i]); } cudaStreamSynchronize(0); for (int i = 0; i < Nx[0]; i++) { assert(CudaNdarray_DEV_DATA(%(bx)s) <= x); assert(x < CudaNdarray_DEV_DATA(%(bx)s) + CudaNdarray_SIZE(%(bx)s)); assert(CudaNdarray_DEV_DATA(%(by)s) <= y); assert(y < CudaNdarray_DEV_DATA(%(by)s) + CudaNdarray_SIZE(%(by)s)); assert(CudaNdarray_DEV_DATA(%(bz)s) <= z); assert(z < CudaNdarray_DEV_DATA(%(bz)s) + CudaNdarray_SIZE(%(bz)s)); cublasSetStream(handle, streams[i %% N_STREAMS]); cublasStatus_t status; switch(unit) { case 0x000: status = cublasSgemm(handle, N, N, Nz[2], Nz[1], Nx[2], &alpha, y, sy_1, x, sx_1, &beta, z, sz_1); break; case 0x100: status = cublasSgemm(handle, N, T, Nz[2], Nz[1], Nx[2], &alpha, y, sy_1, x, sx_2, &beta, z, sz_1); break; case 0x010: status = cublasSgemm(handle, T, N, Nz[2], Nz[1], Nx[2], &alpha, y, sy_2, x, sx_1, &beta, z, sz_1); break; case 0x110: status = cublasSgemm(handle, T, T, Nz[2], Nz[1], Nx[2], &alpha, y, sy_2, x, sx_2, &beta, z, sz_1); break; case 0x001: status = cublasSgemm(handle, T, T, Nz[1], Nz[2], Nx[2], &alpha, x, sx_1, y, sy_1, &beta, z, sz_2); break; case 0x101: status = cublasSgemm(handle, N, T, Nz[1], Nz[2], Nx[2], &alpha, x, sx_2, y, sy_1, &beta, z, sz_2); break; case 0x011: status = cublasSgemm(handle, T, N, Nz[1], Nz[2], Nx[2], &alpha, x, sx_1, y, sy_2, &beta, z, sz_2); break; case 0x111: status = cublasSgemm(handle, N, N, Nz[1], Nz[2], Nx[2], &alpha, x, sx_2, y, sy_2, &beta, z, sz_2); break; default: PyErr_Format(PyExc_ValueError, "some matrix has no unit stride (unit=%%x)", unit); %(fail)s; } if (status != CUBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "cublasSgemm failed (%%i) %%s\\n" " unit=%%x N=%%d," " x shape=[%%d %%d %%d], y shape=[%%d %%d %%d], z shape=[%%d %%d %%d]" " x strides=[%%d %%d %%d], y strides=[%%d %%d %%d], z strides=[%%d %%d %%d]", status, cublasGetErrorString(status), unit, N, Nx[0], Nx[1], Nx[2], Sx[0], Sx[1], Sx[2], Ny[0], Ny[1], Ny[2], Sy[0], Sy[1], Sy[2], Nz[0], Nz[1], Nz[2], Sz[0], Sz[1], Sz[2]); %(fail)s; } x += Sx[0]; y += Sy[0]; z += Sz[0]; }; cublasSetStream(handle, NULL); for (int i = 0; i < N_STREAMS; i++) { cudaStreamSynchronize(streams[i]); cudaStreamDestroy(streams[i]); } } """) % locals() def c_support_code(self): return """ #define CLEANUP() \ do \ { \ if (host_x) free (host_x); \ if (gpu_x) device_free(gpu_x); \ } while (0) """ def grad(self, inp, grads): x, y = inp gz, = grads xgrad = GpuBatchedDot(stream_threshold=self.stream_threshold)(gz, y.dimshuffle(0, 2, 1)) ygrad = GpuBatchedDot(stream_threshold=self.stream_threshold)(x.dimshuffle(0, 2, 1), gz) rval = xgrad, ygrad for elem in rval: assert elem.dtype.find('float') != -1 return rval def c_code_cache_version(self): return (3,) def infer_shape(self, node, shapes): xshp, yshp = shapes return [xshp[:-1] + yshp[2:]] batched_dot = GpuBatchedDot() """ Call cublasSgemmBatched. Take 2 3d tensor as input. """ BatchedDotOp = batched_dot class GpuDot22(GpuOp): """ Implement dot(2d, 2d) on the gpu. """ def __str__(self): return 'GpuDot22' def __eq__(self, other): return type(self) == type(other) def __hash__(self): return hash(type(self)) def make_node(self, x, y): if x.type.ndim != 2: raise TypeError(x) if y.type.ndim != 2: raise TypeError(y) otype = CudaNdarrayType( (x.type.broadcastable[0], y.type.broadcastable[1])) return Apply(self, [x, y], [otype()]) def c_code_cache_version(self): return (1, 2) def c_code(self, node, nodename, inputs, outputs, sub): x, y = inputs z, = outputs fail = sub['fail'] return """ if (%(x)s->nd != 2) { PyErr_Format(PyExc_TypeError, "rank(x)==%%i must be 2", %(x)s->nd); %(fail)s; } if (%(y)s->nd != 2) { PyErr_Format(PyExc_TypeError, "rank(y)==%%i must be 2", %(y)s->nd); %(fail)s; } if ((NULL == %(z)s) || (CudaNdarray_HOST_DIMS(%(z)s)[0] != CudaNdarray_HOST_DIMS(%(x)s)[0]) || (CudaNdarray_HOST_DIMS(%(z)s)[1] != CudaNdarray_HOST_DIMS(%(y)s)[1]) || (CudaNdarray_HOST_STRIDES(%(z)s)[0] < 0) || (CudaNdarray_HOST_STRIDES(%(z)s)[1] < 0) || ((CudaNdarray_HOST_DIMS(%(z)s)[0] > 1) && (CudaNdarray_HOST_STRIDES(%(z)s)[0] != 1) && (CudaNdarray_HOST_DIMS(%(z)s)[1] > 1) && (CudaNdarray_HOST_STRIDES(%(z)s)[1] != 1))) { Py_XDECREF(%(z)s); npy_intp dims[2]; dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0]; dims[1] = CudaNdarray_HOST_DIMS(%(y)s)[1]; %(z)s = (CudaNdarray*)CudaNdarray_New(); if ((NULL == %(z)s) || CudaNdarray_alloc_contiguous(%(z)s, 2, dims)) { if (%(z)s) { Py_DECREF(%(z)s); %(z)s = NULL; } %(fail)s; } } if (CudaNdarray_gemm(1.0f, %(x)s, %(y)s, 0.0f, %(z)s)) { if (%(z)s) { Py_DECREF(%(z)s); %(z)s = NULL; } %(fail)s; } """ % locals() gpu_dot22 = GpuDot22() class GpuDot22Scalar(GpuOp): """ Implement dot(2d, 2d) * scalar on the gpu. Notes ----- Not used anymore. Keep to allow unpickle of old graph. """ def __str__(self): return 'GpuDot22Scalar' def __eq__(self, other): return type(self) == type(other) def __hash__(self): return hash(type(self)) def make_node(self, x, y, a): if x.type.ndim != 2: raise TypeError(x) if y.type.ndim != 2: raise TypeError(y) if not tensor.blas._as_scalar(a): raise TypeError(a) otype = CudaNdarrayType( (x.type.broadcastable[0], y.type.broadcastable[1])) return Apply(self, [x, y, a], [otype()]) def c_code_cache_version(self): return (1, 2) def c_code(self, node, name, inputs, outputs, sub): x, y, a = inputs z, = outputs fail = sub['fail'] return """ #define REAL float float %(name)s_a = (PyArray_TYPE(%(a)s) == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(a)s))[0]) : (REAL)(((double*)PyArray_DATA(%(a)s))[0]); #undef REAL if (%(x)s->nd != 2) { PyErr_Format(PyExc_TypeError, "rank(x)==%%i must be 2", %(x)s->nd); %(fail)s; } if (%(y)s->nd != 2) { PyErr_Format(PyExc_TypeError, "rank(y)==%%i must be 2", %(y)s->nd); %(fail)s; } if ((NULL == %(z)s) || (CudaNdarray_HOST_DIMS(%(z)s)[0] != CudaNdarray_HOST_DIMS(%(x)s)[0]) || (CudaNdarray_HOST_DIMS(%(z)s)[1] != CudaNdarray_HOST_DIMS(%(y)s)[1]) || (CudaNdarray_HOST_STRIDES(%(z)s)[0] < 0) || (CudaNdarray_HOST_STRIDES(%(z)s)[1] < 0) || ((CudaNdarray_HOST_DIMS(%(z)s)[0] > 1) && (CudaNdarray_HOST_STRIDES(%(z)s)[0] != 1) && (CudaNdarray_HOST_DIMS(%(z)s)[1] > 1) && (CudaNdarray_HOST_STRIDES(%(z)s)[1] != 1))) { //if (%(z)s) Py_DECREF(%(z)s); Py_XDECREF(%(z)s); npy_intp dims[2]; dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0]; dims[1] = CudaNdarray_HOST_DIMS(%(y)s)[1]; %(z)s = (CudaNdarray*)CudaNdarray_New(); if ((NULL == %(z)s) || CudaNdarray_alloc_contiguous(%(z)s, 2, dims)) { if (%(z)s) { Py_DECREF(%(z)s); %(z)s = NULL; } %(fail)s; } } if (CudaNdarray_gemm(%(name)s_a, %(x)s, %(y)s, 0.0f, %(z)s)) { if (%(z)s) { Py_DECREF(%(z)s); %(z)s = NULL; } %(fail)s; } """ % locals() gpu_dot22scalar = GpuDot22Scalar() class GpuGemm(GpuOp): """ implement the gemm on the gpu. """ def __init__(self, inplace): self.inplace = inplace if self.inplace: self.destroy_map = {0: [0]} def __str__(self): if self.inplace: return 'GpuGemm{inplace}' else: return 'GpuGemm{no_inplace}' def __eq__(self, other): return (type(self) == type(other) and self.inplace == other.inplace) def __hash__(self): return hash(type(self)) ^ hash(self.inplace) def __setstate__(self, dct): self.__dict__.update(dct) # Correctly reload older pickles where _op_use_c_code and # destroy_map were not saved if '_op_use_c_code' not in self.__dict__: self._op_use_c_code = theano.config.cxx if 'destroy_map' not in self.__dict__ and self.inplace: self.destroy_map = {0: [0]} def make_node(self, z, a, x, y, b): # the more complicated error checking performed by tensor.gemm # is assumed to already have been done return Apply(self, [z, a, x, y, b], [z.type()]) def c_code_cache_version(self): return (4,) def c_code(self, node, name, inputs, outputs, sub): # z_out = alpha * dot(x,y) + beta * z_in # inplace version, set set z_out = z_in # not inplace version, we copy z_in to z_out. z_in, a, x, y, b = inputs z_out, = outputs inplace = int(self.inplace) fail = sub['fail'] sio = StringIO() print(""" #define REAL float float %(name)s_a = (PyArray_TYPE(%(a)s) == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(a)s))[0]) : (REAL)(((double*)PyArray_DATA(%(a)s))[0]); float %(name)s_b = (PyArray_TYPE(%(b)s) == NPY_FLOAT) ? (REAL)(((float*)PyArray_DATA(%(b)s))[0]) : (REAL)(((double*)PyArray_DATA(%(b)s))[0]); #undef REAL if (%(inplace)s && (CudaNdarray_HOST_STRIDES(%(z_in)s)[0] >= 0) && (CudaNdarray_HOST_STRIDES(%(z_in)s)[1] >= 0) && ((CudaNdarray_HOST_DIMS(%(z_in)s)[0] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_in)s)[0] == 1) || (CudaNdarray_HOST_DIMS(%(z_in)s)[1] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_in)s)[1] == 1))) { // The input has an appropriate layout, we work inplace Py_XDECREF(%(z_out)s); %(z_out)s = %(z_in)s; Py_INCREF(%(z_out)s); } else if (%(z_out)s && (%(z_out)s->nd == 2) && (CudaNdarray_HOST_DIMS(%(z_out)s)[0] == CudaNdarray_HOST_DIMS(%(z_in)s)[0]) && (CudaNdarray_HOST_DIMS(%(z_out)s)[1] == CudaNdarray_HOST_DIMS(%(z_in)s)[1]) && (CudaNdarray_HOST_STRIDES(%(z_out)s)[0] >= 0) && (CudaNdarray_HOST_STRIDES(%(z_out)s)[1] >= 0) // The following condition is needed as this is a condition by cublas // on the memory layout of the output it accepts. && ((CudaNdarray_HOST_DIMS(%(z_out)s)[0] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_out)s)[0] == 1) || (CudaNdarray_HOST_DIMS(%(z_out)s)[1] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_out)s)[1] == 1))) { // The existing output has an appropriate layout, // copy the input data into it, then work inplace if (CudaNdarray_CopyFromCudaNdarray(%(z_out)s, %(z_in)s)) { %(fail)s; } } else { // Copy the input, use the copy as output Py_XDECREF(%(z_out)s); %(z_out)s = (CudaNdarray*)CudaNdarray_Copy(%(z_in)s); if (!%(z_out)s) { %(fail)s; } } if (CudaNdarray_gemm(%(name)s_a, %(x)s, %(y)s, %(name)s_b, %(z_out)s)) { %(fail)s; } """, file=sio) return sio.getvalue() % locals() gpu_gemm_no_inplace = GpuGemm(inplace=False) gpu_gemm_inplace = GpuGemm(inplace=True) class GpuGemv(GpuOp): """ implement gemv on the gpu. """ def __init__(self, inplace): self.inplace = inplace if self.inplace: self.destroy_map = {0: [0]} def __str__(self): if self.inplace: return 'GpuGemv{inplace}' else: return 'GpuGemv{no_inplace}' def __eq__(self, other): return (type(self) == type(other) and self.inplace == other.inplace) def __hash__(self): return hash(type(self)) ^ hash(self.inplace) def __setstate__(self, dct): self.__dict__.update(dct) # Correctly reload older pickles where _op_use_c_code and # destroy_map were not saved if '_op_use_c_code' not in self.__dict__: self._op_use_c_code = theano.config.cxx if 'destroy_map' not in self.__dict__ and self.inplace: self.destroy_map = {0: [0]} def make_node(self, z, a, x, y, b): # the more complicated error checking performed by tensor.gemv # is assumed to already have been done return Apply(self, [z, a, x, y, b], [z.type()]) def c_code_cache_version(self): return (3,) def c_code(self, node, name, inputs, outputs, sub): # z_out = alpha * dot(x,y) + beta * z_in # inplace version, set set z_out = z_in # not inplace version, we copy z_in to z_out. z_in, a, x, y, b = inputs z_out, = outputs inplace = int(self.inplace) fail = sub['fail'] sio = StringIO() print(""" float %(name)s_alpha = ((dtype_%(a)s*)(PyArray_DATA(%(a)s)))[0]; float %(name)s_beta = ((dtype_%(b)s*)(PyArray_DATA(%(b)s)))[0]; if (%(inplace)s && ((CudaNdarray_HOST_STRIDES(%(z_in)s)[0] > 0) || ((CudaNdarray_HOST_STRIDES(%(z_in)s)[0] == 0) && (CudaNdarray_HOST_DIMS(%(z_in)s)[0] == 1)))) { // Work inplace on the input Py_XDECREF(%(z_out)s); %(z_out)s = %(z_in)s; Py_INCREF(%(z_out)s); } else if (%(z_out)s && (CudaNdarray_HOST_DIMS(%(z_out)s)[0] == CudaNdarray_HOST_DIMS(%(z_in)s)[0]) && ((CudaNdarray_HOST_STRIDES(%(z_out)s)[0] > 0) || ((CudaNdarray_HOST_STRIDES(%(z_out)s)[0] == 0) && (CudaNdarray_HOST_DIMS(%(z_out)s)[0] == 1)))) { // Work on the output if (CudaNdarray_CopyFromCudaNdarray(%(z_out)s, %(z_in)s)) { %(fail)s; } } else { // Copy Py_XDECREF(%(z_out)s); %(z_out)s = (CudaNdarray*)CudaNdarray_Copy(%(z_in)s); if (!%(z_out)s) { %(fail)s; } } if (CudaNdarray_sgemv(%(name)s_alpha, %(x)s, %(y)s, %(name)s_beta, %(z_out)s)) { %(fail)s; } """, file=sio) return sio.getvalue() % locals() gpu_gemv_no_inplace = GpuGemv(inplace=False) gpu_gemv_inplace = GpuGemv(inplace=True) class GpuGer(GpuOp): """ implement ger on the gpu. """ def __init__(self, inplace): self.inplace = inplace if self.inplace: self.destroy_map = {0: [0]} def __str__(self): if self.inplace: return 'GpuGer{inplace}' else: return 'GpuGer{no_inplace}' def __eq__(self, other): return (type(self) == type(other) and self.inplace == other.inplace) def __hash__(self): return hash(type(self)) ^ hash(self.inplace) def __setstate__(self, dct): self.__dict__.update(dct) # Correctly reload older pickles where _op_use_c_code and # destroy_map were not saved if '_op_use_c_code' not in self.__dict__: self._op_use_c_code = theano.config.cxx if 'destroy_map' not in self.__dict__ and self.inplace: self.destroy_map = {0: [0]} def make_node(self, z, a, x, y): # the more complicated error checking performed by tensor.ger is # assumed to already have been done return Apply(self, [z, a, x, y], [z.type()]) def c_code_cache_version(self): return (2,) def c_code(self, node, name, inputs, outputs, sub): # z_out = alpha * dot(x,y) + beta * z_in # inplace version, set set z_out = z_in # not inplace version, we copy z_in to z_out. z_in, a, x, y = inputs z_out, = outputs inplace = int(self.inplace) fail = sub['fail'] sio = StringIO() print(""" float %(name)s_alpha = ((dtype_%(a)s*)(PyArray_DATA(%(a)s)))[0]; if (%(inplace)s && (CudaNdarray_HOST_STRIDES(%(z_in)s)[0] >= 0) && (CudaNdarray_HOST_STRIDES(%(z_in)s)[1] >= 0) && ((CudaNdarray_HOST_DIMS(%(z_in)s)[0] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_in)s)[0] == 1) || (CudaNdarray_HOST_DIMS(%(z_in)s)[1] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_in)s)[1] == 1))) { // The input has an appropriate layout, we work inplace Py_XDECREF(%(z_out)s); %(z_out)s = %(z_in)s; Py_INCREF(%(z_out)s); } else if (%(z_out)s && (%(z_out)s->nd == 2) && (CudaNdarray_HOST_DIMS(%(z_out)s)[0] == CudaNdarray_HOST_DIMS(%(z_in)s)[0]) && (CudaNdarray_HOST_DIMS(%(z_out)s)[1] == CudaNdarray_HOST_DIMS(%(z_in)s)[1]) && (CudaNdarray_HOST_STRIDES(%(z_out)s)[0] >= 0) && (CudaNdarray_HOST_STRIDES(%(z_out)s)[1] >= 0) && ((CudaNdarray_HOST_DIMS(%(z_out)s)[0] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_out)s)[0] == 1) || (CudaNdarray_HOST_DIMS(%(z_out)s)[1] <= 1) || (CudaNdarray_HOST_STRIDES(%(z_out)s)[1] == 1))) { // The existing output has an appropriate layout, // copy the input data into it, then work inplace if (CudaNdarray_CopyFromCudaNdarray(%(z_out)s, %(z_in)s)) { %(fail)s; } } else { // Copy the input, use the copy as output Py_XDECREF(%(z_out)s); %(z_out)s = (CudaNdarray*)CudaNdarray_Copy(%(z_in)s); if (!%(z_out)s) { %(fail)s; } } if (CudaNdarray_sger(%(name)s_alpha, %(x)s, %(y)s, %(z_out)s)) { %(fail)s; } """, file=sio) return sio.getvalue() % locals() gpu_ger_no_inplace = GpuGer(inplace=False) gpu_ger_inplace = GpuGer(inplace=True) class BaseGpuCorrMM(GpuOp): """ Base class for `GpuCorrMM`, `GpuCorrMM_gradWeights` and `GpuCorrMM_gradInputs`. Cannot be used directly. Parameters ---------- border_mode : {'valid', 'full', 'half'} Additionally, the padding size could be directly specified by an integer or a pair of integers subsample Perform subsampling of the output (default: (1, 1)). pad *deprecated*, now you should always use border_mode. """ check_broadcast = False __props__ = ('border_mode', 'subsample') def __init__(self, border_mode="valid", subsample=(1, 1), pad=(0, 0)): if pad != (0, 0): _logger.warning( 'do not use pad for BaseGpuCorrMM; please set padding in ' 'border_mode parameter, see the docstring for more details') if border_mode != "valid": raise ValueError("border_mode must be 'valid' if pad is given") border_mode = pad if isinstance(border_mode, integer_types): border_mode = (border_mode, border_mode) if isinstance(border_mode, tuple): pad_h, pad_w = map(int, border_mode) border_mode = (pad_h, pad_w) if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or border_mode in ('valid', 'full', 'half')): raise ValueError( 'invalid border_mode {}, which must be either ' '"valid", "full", "half", an integer or a pair of' ' integers'.format(border_mode)) self.border_mode = border_mode if len(subsample) != 2: raise ValueError("subsample must have two elements") self.subsample = subsample @property def pad(self): if self.border_mode != 'valid': return self.border_mode return (0, 0) def __str__(self): return '%s{%s, %s}' % ( self.__class__.__name__, self.border_mode, str(self.subsample)) def flops(self, inp, outp): """ Useful with the hack in profilemode to print the MFlops. """ # if the output shape is correct, then this gives the correct # flops for any direction, sampling, padding, and border mode inputs, filters = inp outputs, = outp assert inputs[1] == filters[1] # nb mul and add by output pixel flops = filters[2] * filters[3] * 2 # nb flops by output image flops *= outputs[2] * outputs[3] # nb patch multiplied flops *= inputs[1] * filters[0] * inputs[0] return flops def c_headers(self): return ['cuda_ndarray.cuh', '<stdio.h>'] def c_code_cache_version(self): # raise this whenever modifying any of the support_code_files return (0, 24) def c_support_code_apply(self, node, nodename): # REMEMBER TO RAISE c_code_cache_version when changing any of # these files files = ['corr_gemm.cu'] codes = [open(os.path.join(os.path.split(__file__)[0], f)).read() for f in files] return reduce(str.__add__, codes) def c_code_helper(self, bottom, weights, top, direction, sub, height=None, width=None): """ This generates the C code for GpuCorrMM (direction="forward"), GpuCorrMM_gradWeights (direction="backprop weights"), and GpuCorrMM_gradInputs (direction="backprop inputs"). Depending on the direction, one of bottom, weights, top will receive the output, while the other two serve as inputs. Parameters ---------- bottom Variable name of the input images in the forward pass, or the gradient of the input images in backprop wrt. inputs weights Variable name of the filters in the forward pass, or the gradient of the filters in backprop wrt. weights top Variable name of the output images / feature maps in the forward pass, or the gradient of the outputs in the backprop passes direction : {'forward', 'backprop weights', 'backprop inputs'} "forward" to correlate bottom with weights and store results in top, "backprop weights" to do a valid convolution of bottom with top (swapping the first two dimensions) and store results in weights, and "backprop inputs" to do a full convolution of top with weights (swapping the first two dimensions) and store results in bottom. sub Dictionary of substitutions useable to help generating the C code. height If self.subsample[0] != 1, a variable giving the height of the filters for direction="backprop weights" or the height of the input images for direction="backprop inputs". If self.border_mode == 'half', a variable giving the height of the filters for direction="backprop weights". Ignored otherwise. width If self.subsample[1] != 1, a variable giving the width of the filters for direction="backprop weights" or the width of the input images for direction="backprop inputs". If self.border_mode == 'half', a variable giving the width of the filters for direction="backprop weights". Ignored otherwise. """ dH, dW = self.subsample if self.border_mode == "half": padH = padW = -1 elif self.border_mode == "full": padH = padW = -2 elif isinstance(self.border_mode, tuple): padH, padW = self.border_mode else: assert self.border_mode == "valid" padH = padW = 0 if direction == "forward": direction = 0 out = top elif direction == "backprop weights": direction = 1 out = weights elif direction == "backprop inputs": direction = 2 out = bottom else: raise ValueError("direction must be one of 'forward', " "'backprop weights', 'backprop inputs'") # When subsampling, we cannot unambiguously infer the height and width # of bottom and weights from top, so we require them to be given. # Similarly, when pad="half", we cannot infer the weight size. if ((direction != 0) and (dH != 1)) or ((direction == 1) and (padH == -1)): if not height: raise ValueError("height must be given for backprop with vertical sampling or pad='half'") height = '(*(npy_int*)(PyArray_DATA(%s)))' % height else: height = 'NULL' if ((direction != 0) and (dW != 1)) or ((direction == 1) and (padW == -1)): if not width: raise ValueError("width must be given for backprop with horizontal sampling or pad='half'") width = '(*(npy_int*)(PyArray_DATA(%s)))' % width else: width = 'NULL' sub = sub.copy() sub.update(locals()) return """ // Mandatory args int direction = %(direction)s; // forward, bprop weights, bprop inputs // Optional args int dH = %(dH)s; int dW = %(dW)s; int padH = %(padH)s; int padW = %(padW)s; CudaNdarray * bottom = %(bottom)s; CudaNdarray * weights = %(weights)s; CudaNdarray * top = %(top)s; CudaNdarray * out2 = NULL; // Obtain or infer kernel width and height // (we need to know it early to be able to handle auto-padding) int kH, kW; if (direction != 1) { // weight is an input variable, we can just read its shape kH = CudaNdarray_HOST_DIMS(weights)[2]; kW = CudaNdarray_HOST_DIMS(weights)[3]; } else { if ((dH != 1) || (padH == -1)) { // vertical subsampling or half padding, kernel height is specified kH = %(height)s; } else if (padH == -2) { // vertical full padding, we can infer the kernel height kH = 2 - CudaNdarray_HOST_DIMS(bottom)[2] + (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH; } else { // explicit padding, we can infer the kernel height kH = CudaNdarray_HOST_DIMS(bottom)[2] + 2*padH - (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH; } if ((dW != 1) || (padW == -1)) { kW = %(width)s; } else if (padW == -2) { kW = 2 - CudaNdarray_HOST_DIMS(bottom)[3] + (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW; } else { kW = CudaNdarray_HOST_DIMS(bottom)[3] + 2*padW - (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW; } } // Auto-padding if requested if (padH == -1) { // vertical half padding padH = kH / 2; } else if (padH == -2) { // vertical full padding padH = kH - 1; } else if (padH < 0) { PyErr_SetString(PyExc_ValueError, "BaseGpuCorrMM: padH must be >= -2"); %(fail)s } if (padW == -1) { // horizontal half padding padW = kW / 2; } else if (padW == -2) { // horizontal full padding padW = kW - 1; } else if (padW < 0) { PyErr_SetString(PyExc_ValueError, "BaseGpuCorrMM: padW must be >= -2"); %(fail)s } // Infer output shape int out_dim[4]; switch(direction) { case 0: // forward pass // output is top: (batchsize, num_filters, height, width) // height and width: top = (bottom + 2*pad - weight) / sample + 1 out_dim[0] = CudaNdarray_HOST_DIMS(bottom)[0]; out_dim[1] = CudaNdarray_HOST_DIMS(weights)[0]; out_dim[2] = (CudaNdarray_HOST_DIMS(bottom)[2] + 2*padH - CudaNdarray_HOST_DIMS(weights)[2]) / dH + 1; out_dim[3] = (CudaNdarray_HOST_DIMS(bottom)[3] + 2*padW - CudaNdarray_HOST_DIMS(weights)[3]) / dW + 1; break; case 1: // backprop wrt. weights // output is weights: (num_filters, num_channels, height, width) // height and width: weights = bottom + 2*pad - (top - 1) * sample out_dim[0] = CudaNdarray_HOST_DIMS(top)[1]; out_dim[1] = CudaNdarray_HOST_DIMS(bottom)[1]; out_dim[2] = kH; // already inferred further above out_dim[3] = kW; // how convenient break; case 2: // backprop wrt. inputs // output is bottom: (batchsize, num_channels, height, width) // height and width: bottom = (top - 1) * sample + weights - 2*pad out_dim[0] = CudaNdarray_HOST_DIMS(top)[0]; out_dim[1] = CudaNdarray_HOST_DIMS(weights)[1]; out_dim[2] = (dH != 1) ? %(height)s : (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH + CudaNdarray_HOST_DIMS(weights)[2] - 2*padH; out_dim[3] = (dW != 1) ? %(width)s : (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW + CudaNdarray_HOST_DIMS(weights)[3] - 2*padW; break; default: PyErr_SetString(PyExc_ValueError, "BaseGpuCorrMM: direction must be 0, 1, or 2\\n"); %(fail)s } // Prepare output array if ( !(%(out)s && %(out)s->nd==4 && CudaNdarray_is_c_contiguous(%(out)s) && CudaNdarray_HOST_DIMS(%(out)s)[0]==out_dim[0] && CudaNdarray_HOST_DIMS(%(out)s)[1]==out_dim[1] && CudaNdarray_HOST_DIMS(%(out)s)[2]==out_dim[2] && CudaNdarray_HOST_DIMS(%(out)s)[3]==out_dim[3])) { Py_XDECREF(%(out)s); %(out)s = (CudaNdarray*)CudaNdarray_NewDims(4,out_dim); if (NULL == %(out)s) { PyErr_Format(PyExc_RuntimeError, "BaseGpuCorrMM: Failed to allocate output of %%d x %%d x %%d x %%d", out_dim[0], out_dim[1], out_dim[2], out_dim[3]); %(fail)s } } // Call CUDA code out2 = corrMM(%(bottom)s, %(weights)s, %(top)s, direction, dH, dW, padH, padW); if (out2==NULL){ %(fail)s } assert (out2 == %(out)s); """ % sub class GpuCorrMM(BaseGpuCorrMM): """ GPU correlation implementation using Matrix Multiplication. Parameters ---------- border_mode The width of a border of implicit zeros to pad the input with. Must be a tuple with 2 elements giving the numbers of rows and columns to pad on each side, or a single integer to pad the same on all sides, or a string shortcut setting the padding at runtime: ``'valid'`` for ``(0, 0)`` (valid convolution, no padding), ``'full'`` for ``(kernel_rows - 1, kernel_columns - 1)`` (full convolution), ``'half'`` for ``(kernel_rows // 2, kernel_columns // 2)`` (same convolution for odd-sized kernels). Note that the two widths are each applied twice, once per side (left and right, top and bottom). subsample The subsample operation applied to each output image. Should be a tuple with 2 elements. `(sv, sh)` is equivalent to `GpuCorrMM(...)(...)[:,:,::sv, ::sh]`, but faster. Set to `(1, 1)` to disable subsampling. pad Deprecated alias for `border_mode`. Notes ----- Currently, the Op requires the inputs, filters and outputs to be C-contiguous. Use :func:`gpu_contiguous <theano.sandbox.cuda.basic_ops.gpu_contiguous>` on these arguments if needed. You can either enable the Theano flag `optimizer_including=conv_gemm` to automatically replace all convolution operations with `GpuCorrMM` or one of its gradients, or you can use it as a replacement for :func:`conv2d <theano.tensor.nnet.conv.conv2d>`, called as `GpuCorrMM(subsample=...)(image, filters)`. The latter is currently faster, but note that it computes a correlation -- if you need to compute a convolution, flip the filters as `filters[:,:,::-1,::-1]`. ..warning:: For 700 series Nvidia GPUs of compute capability 3.5 and CUDA 5.0 to 6.0, there is a bug in CUBLAS' matrix multiplication function that can make GpuCorrMM or its gradients crash for some input and filter shapes. So if you have a Tesla K20, Tesla K40, Quadro K6000, GeForce GT 640 (DDR5), GeForce GTX 780 (or Ti), GeForce GTX TITAN (or Black or Z) and experience a crash, switching to CUDA 6.5 or CUDA 4.2 should fix it. If this is not possible, changing the input or filter shapes (e.g., the batchsize or number of filters) may also work around the CUBLAS bug. """ def __init__(self, border_mode="valid", subsample=(1, 1), pad=(0, 0)): super(GpuCorrMM, self).__init__(border_mode, subsample, pad) def make_node(self, img, kern): img = as_cuda_ndarray_variable(img) kern = as_cuda_ndarray_variable(kern) if img.type.ndim != 4: raise TypeError('img must be 4D tensor') if kern.type.ndim != 4: raise TypeError('kern must be 4D tensor') broadcastable = [img.type.broadcastable[0], kern.type.broadcastable[0], False, False] return Apply(self, [img, kern], [CudaNdarrayType(broadcastable)()]) def c_code(self, node, nodename, inp, out_, sub): bottom, weights = inp top, = out_ direction = "forward" return super(GpuCorrMM, self).c_code_helper(bottom, weights, top, direction, sub) def grad(self, inp, grads): bottom, weights = inp top, = grads top = gpu_contiguous(top) d_bottom = GpuCorrMM_gradInputs(self.border_mode, self.subsample)( weights, top, bottom.shape[-2:]) d_weights = GpuCorrMM_gradWeights(self.border_mode, self.subsample)( bottom, top, weights.shape[-2:]) return d_bottom, d_weights class GpuCorrMM_gradWeights(BaseGpuCorrMM): """ Gradient wrt. filters for `GpuCorrMM`. Notes ----- You will not want to use this directly, but rely on Theano's automatic differentiation or graph optimization to use it as needed. """ def __init__(self, border_mode="valid", subsample=(1, 1), pad=(0, 0)): super(GpuCorrMM_gradWeights, self).__init__(border_mode, subsample, pad) def make_node(self, img, topgrad, shape=None): img = as_cuda_ndarray_variable(img) topgrad = as_cuda_ndarray_variable(topgrad) if img.type.ndim != 4: raise TypeError('img must be 4D tensor') if topgrad.type.ndim != 4: raise TypeError('topgrad must be 4D tensor') if self.subsample != (1, 1) or self.border_mode == "half": if shape is None: raise ValueError('shape must be given if subsample != (1, 1)' ' or border_mode == "half"') height_width = [shape[0], shape[1]] assert shape[0].ndim == 0 assert shape[1].ndim == 0 else: height_width = [] broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1], False, False] return Apply(self, [img, topgrad] + height_width, [CudaNdarrayType(broadcastable)()]) def c_code(self, node, nodename, inp, out_, sub): bottom, top = inp[:2] height, width = inp[2:] or (None, None) weights, = out_ direction = "backprop weights" return super(GpuCorrMM_gradWeights, self).c_code_helper(bottom, weights, top, direction, sub, height, width) def grad(self, inp, grads): bottom, top = inp[:2] weights, = grads weights = gpu_contiguous(weights) d_bottom = GpuCorrMM_gradInputs( self.border_mode, self.subsample)(weights, top, bottom.shape[-2:]) d_top = GpuCorrMM( self.border_mode, self.subsample)(bottom, weights) d_height_width = ( theano.gradient.DisconnectedType()(), ) * 2 if len(inp) == 4 else () return (d_bottom, d_top) + d_height_width def connection_pattern(self, node): if node.nin == 2: return [[1], [1]] else: return [[1], [1], [0], [0]] # no connection to height, width class GpuCorrMM_gradInputs(BaseGpuCorrMM): """ Gradient wrt. inputs for `GpuCorrMM`. Notes ----- You will not want to use this directly, but rely on Theano's automatic differentiation or graph optimization to use it as needed. """ def __init__(self, border_mode="valid", subsample=(1, 1), pad=(0, 0)): super(GpuCorrMM_gradInputs, self).__init__(border_mode, subsample, pad) def make_node(self, kern, topgrad, shape=None): kern = as_cuda_ndarray_variable(kern) topgrad = as_cuda_ndarray_variable(topgrad) if kern.type.ndim != 4: raise TypeError('kern must be 4D tensor') if topgrad.type.ndim != 4: raise TypeError('topgrad must be 4D tensor') if self.subsample != (1, 1) and shape is None: raise ValueError('shape must be given if subsample != (1, 1)') height_width = [shape[0], shape[1]] if self.subsample != (1, 1) else [] if height_width: assert shape[0].ndim == 0 assert shape[1].ndim == 0 broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1], False, False] return Apply(self, [kern, topgrad] + height_width, [CudaNdarrayType(broadcastable)()]) def c_code(self, node, nodename, inp, out_, sub): weights, top = inp[:2] height, width = inp[2:] or (None, None) bottom, = out_ direction = "backprop inputs" return super(GpuCorrMM_gradInputs, self).c_code_helper(bottom, weights, top, direction, sub, height, width) def grad(self, inp, grads): weights, top = inp[:2] bottom, = grads bottom = gpu_contiguous(bottom) d_weights = GpuCorrMM_gradWeights( self.border_mode, self.subsample)( bottom, top, weights.shape[-2:]) d_top = GpuCorrMM( self.border_mode, self.subsample)(bottom, weights) d_height_width = ( theano.gradient.DisconnectedType()(), ) * 2 if len(inp) == 4 else () return (d_weights, d_top) + d_height_width def connection_pattern(self, node): if node.nin == 2: return [[1], [1]] else: return [[1], [1], [0], [0]] # no connection to height, width class BaseGpuCorr3dMM(GpuOp): """ Base class for `GpuCorr3dMM`, `GpuCorr3dMM_gradWeights` and `GpuCorr3dMM_gradInputs`. Cannot be used directly. """ __props__ = ('border_mode', 'subsample', 'pad') def __init__(self, border_mode="valid", subsample=(1, 1, 1), pad=(0, 0, 0)): if border_mode != "valid": raise ValueError("border_mode must be 'valid'") self.border_mode = border_mode if len(subsample) != 3: raise ValueError("subsample must have three elements") self.subsample = subsample if (pad not in ("half", "full")) and (len(pad) != 3): raise ValueError("pad must be 'half', 'full', or have three elements") self.pad = pad def __str__(self): return '%s{%s, %s, pad=%r}' % ( self.__class__.__name__, self.border_mode, str(self.subsample), self.pad) def flops(self, inp, outp): """ Useful with the hack in profilemode to print the MFlops""" # if the output shape is correct, then this gives the correct # flops for any direction, sampling, padding, and border mode inputs, filters = inp outputs, = outp assert inputs[1] == filters[1] # nb mul and add by output pixel flops = filters[2] * filters[3] * filters[4] * 2 # nb flops by output image flops *= outputs[2] * outputs[3] * outputs[4] # nb patch multiplied flops *= inputs[1] * filters[0] * inputs[0] return flops def c_headers(self): return ['cuda_ndarray.cuh', '<stdio.h>'] def c_code_cache_version(self): # raise this whenever modifying any of the support_code_files return (0, 23) def c_support_code_apply(self, node, nodename): # REMEMBER TO RAISE c_code_cache_version when changing any of # these files files = ['corr3d_gemm.cu'] codes = [open(os.path.join(os.path.split(__file__)[0], f)).read() for f in files] return reduce(str.__add__, codes) def c_code_helper(self, bottom, weights, top, direction, sub, height=None, width=None, depth=None): """ This generates the C code for GpuCorrMM (direction="forward"), GpuCorrMM_gradWeights (direction="backprop weights"), and GpuCorrMM_gradInputs (direction="backprop inputs"). Depending on the direction, one of bottom, weights, top will receive the output, while the other two serve as inputs. Parameters ---------- bottom Variable name of the input images in the forward pass, or the gradient of the input images in backprop wrt. inputs. weights Variable name of the filters in the forward pass, or the gradient of the filters in backprop wrt. weights. top Variable name of the output images / feature maps in the forward pass, or the gradient of the outputs in the backprop passes. direction : {'forward', 'backprop weights', 'backprop inputs'} "forward" to correlate bottom with weights and store results in top, "backprop weights" to do a valid convolution of bottom with top (swapping the first two dimensions) and store results in weights, and "backprop inputs" to do a full convolution of top with weights (swapping the first two dimensions) and store results in bottom. sub Dictionary of substitutions useable to help generating the C code. height If self.subsample[0] != 1, a variable giving the height of the filters for direction="backprop weights" or the height of the input images for direction="backprop inputs". If self.pad == 'half', a variable giving the height of the filters for direction="backprop weights". Ignored otherwise. width If self.subsample[1] != 1, a variable giving the width of the filters for direction="backprop weights" or the width of the input images for direction="backprop inputs". If self.pad == 'half', a variable giving the width of the filters for direction="backprop weights". Ignored otherwise. depth If self.subsample[2] != 1, a variable giving the depth of the filters for direction="backprop weights" or the depth of the input images for direction="backprop inputs". If self.pad == 'half', a variable giving the depth of the filters for direction="backprop weights". Ignored otherwise. """ if self.border_mode != "valid": raise ValueError("mode must be 'valid'") dH, dW, dD = self.subsample if self.pad == "half": padH = padW = padD = -1 elif self.pad == "full": padH = padW = padD = -2 else: padH, padW, padD = self.pad if direction == "forward": direction = 0 out = top elif direction == "backprop weights": direction = 1 out = weights elif direction == "backprop inputs": direction = 2 out = bottom else: raise ValueError("direction must be one of 'forward', " "'backprop weights', 'backprop inputs'") # When subsampling, we cannot unambiguously infer the height and width # of bottom and weights from top, so we require them to be given. # Similarly, when pad="half", we cannot infer the weight size. if ((direction != 0) and (dH != 1)) or ((direction == 1) and (padH == -1)): if not height: raise ValueError("height must be given for backprop with vertical sampling or pad='half'") height = '(*(npy_int*)(PyArray_DATA(%s)))' % height else: height = 'NULL' if ((direction != 0) and (dW != 1)) or ((direction == 1) and (padW == -1)): if not width: raise ValueError("width must be given for backprop with horizontal sampling or pad='half'") width = '(*(npy_int*)(PyArray_DATA(%s)))' % width else: width = 'NULL' if ((direction != 0) and (dD != 1)) or ((direction == 1) and (padD == -1)): if not depth: raise ValueError("depth must be given for backprop with horizontal sampling or pad='half'") depth = '(*(npy_int*)(PyArray_DATA(%s)))' % depth else: depth = 'NULL' sub = sub.copy() sub.update(locals()) return """ // Mandatory args int direction = %(direction)s; // forward, bprop weights, bprop inputs // Optional args int dH = %(dH)s; int dW = %(dW)s; int dD = %(dD)s; int padH = %(padH)s; int padW = %(padW)s; int padD = %(padD)s; CudaNdarray * bottom = %(bottom)s; CudaNdarray * weights = %(weights)s; CudaNdarray * top = %(top)s; CudaNdarray * out2 = NULL; // Obtain or infer kernel width and height // (we need to know it early to be able to handle auto-padding) int kH, kW, kD; if (direction != 1) { // weight is an input variable, we can just read its shape kH = CudaNdarray_HOST_DIMS(weights)[2]; kW = CudaNdarray_HOST_DIMS(weights)[3]; kD = CudaNdarray_HOST_DIMS(weights)[4]; } else { if ((dH != 1) || (padH == -1)) { // vertical subsampling or half padding, kernel height is specified kH = %(height)s; } else if (padH == -2) { // vertical full padding, we can infer the kernel height kH = 2 - CudaNdarray_HOST_DIMS(bottom)[2] + (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH; } else { // explicit padding, we can infer the kernel height kH = CudaNdarray_HOST_DIMS(bottom)[2] + 2*padH - (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH; } if ((dW != 1) || (padW == -1)) { kW = %(width)s; } else if (padW == -2) { kW = 2 - CudaNdarray_HOST_DIMS(bottom)[3] + (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW; } else { kW = CudaNdarray_HOST_DIMS(bottom)[3] + 2*padW - (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW; } if ((dD != 1) || (padD == -1)) { kD = %(depth)s; } else if (padD == -2) { kD = 2 - CudaNdarray_HOST_DIMS(bottom)[4] + (CudaNdarray_HOST_DIMS(top)[4] - 1) * dD; } else { kD = CudaNdarray_HOST_DIMS(bottom)[4] + 2*padD - (CudaNdarray_HOST_DIMS(top)[4] - 1) * dD; } } // Auto-padding if requested if (padH == -1) { // vertical half padding padH = kH / 2; } else if (padH == -2) { // vertical full padding padH = kH - 1; } else if (padH < 0) { PyErr_SetString(PyExc_ValueError, "BaseGpuCorr3dMM: padH must be >= -2"); %(fail)s } if (padW == -1) { // horizontal half padding padW = kW / 2; } else if (padW == -2) { // horizontal full padding padW = kW - 1; } else if (padW < 0) { PyErr_SetString(PyExc_ValueError, "BaseGpuCorr3dMM: padW must be >= -2"); %(fail)s } if (padD == -1) { // horizontal half padding padD = kD / 2; } else if (padD == -2) { // horizontal full padding padD = kD - 1; } else if (padD < 0) { PyErr_SetString(PyExc_ValueError, "BaseGpuCorr3dMM: padD must be >= -2"); %(fail)s } // Infer output shape int out_dim[5]; switch(direction) { case 0: // forward pass // output is top: (batchsize, num_filters, height, width, depth) // height and width: top = (bottom + 2*pad - weight) / sample + 1 out_dim[0] = CudaNdarray_HOST_DIMS(bottom)[0]; out_dim[1] = CudaNdarray_HOST_DIMS(weights)[0]; out_dim[2] = (CudaNdarray_HOST_DIMS(bottom)[2] + 2*padH - CudaNdarray_HOST_DIMS(weights)[2]) / dH + 1; out_dim[3] = (CudaNdarray_HOST_DIMS(bottom)[3] + 2*padW - CudaNdarray_HOST_DIMS(weights)[3]) / dW + 1; out_dim[4] = (CudaNdarray_HOST_DIMS(bottom)[4] + 2*padD - CudaNdarray_HOST_DIMS(weights)[4]) / dD + 1; break; case 1: // backprop wrt. weights // output is weights: (num_filters, num_channels, height, width, depth) // height, width and depth: weights = bottom + 2*pad - (top-1) * sample out_dim[0] = CudaNdarray_HOST_DIMS(top)[1]; out_dim[1] = CudaNdarray_HOST_DIMS(bottom)[1]; out_dim[2] = kH; // already inferred further above out_dim[3] = kW; // how convenient out_dim[4] = kD; break; case 2: // backprop wrt. inputs // output is bottom: (batchsize, num_channels, height, width, depth) // height, width and depth: bottom = (top-1) * sample + weights - 2*pad out_dim[0] = CudaNdarray_HOST_DIMS(top)[0]; out_dim[1] = CudaNdarray_HOST_DIMS(weights)[1]; out_dim[2] = (dH != 1) ? %(height)s : (CudaNdarray_HOST_DIMS(top)[2] - 1) * dH + CudaNdarray_HOST_DIMS(weights)[2] - 2*padH; out_dim[3] = (dW != 1) ? %(width)s : (CudaNdarray_HOST_DIMS(top)[3] - 1) * dW + CudaNdarray_HOST_DIMS(weights)[3] - 2*padW; out_dim[4] = (dD != 1) ? %(depth)s : (CudaNdarray_HOST_DIMS(top)[4] - 1) * dD + CudaNdarray_HOST_DIMS(weights)[4] - 2*padD; break; default: PyErr_SetString(PyExc_ValueError, "BaseGpuCorr3dMM: direction must be 0, 1, or 2\\n"); %(fail)s } // Prepare output array if (!(%(out)s && %(out)s->nd == 5 && CudaNdarray_is_c_contiguous(%(out)s) && CudaNdarray_HOST_DIMS(%(out)s)[0] == out_dim[0] && CudaNdarray_HOST_DIMS(%(out)s)[1] == out_dim[1] && CudaNdarray_HOST_DIMS(%(out)s)[2] == out_dim[2] && CudaNdarray_HOST_DIMS(%(out)s)[3] == out_dim[3] && CudaNdarray_HOST_DIMS(%(out)s)[4] == out_dim[4])) { Py_XDECREF(%(out)s); %(out)s = (CudaNdarray*)CudaNdarray_NewDims(5, out_dim); if (NULL == %(out)s) { PyErr_Format(PyExc_RuntimeError, "BaseGpuCorr3dM: Failed to allocate output of %%d x %%d x %%d x %%d x %%d", out_dim[0], out_dim[1], out_dim[2], out_dim[3], out_dim[4]); %(fail)s } } // Call CUDA code out2 = corr3dMM(%(bottom)s, %(weights)s, %(top)s, direction, dH, dW, dD, padH, padW, padD); if (out2==NULL){ %(fail)s } assert (out2 == %(out)s); """ % sub class GpuCorr3dMM(BaseGpuCorr3dMM): """GPU correlation implementation using Matrix Multiplication. Parameters ---------- border_mode Currently supports "valid" only; "full" can be simulated by setting `pad="full"` (at the cost of performance), or by using `GpuCorrMM_gradInputs`. subsample The subsample operation applied to each output image. Should be a tuple with 3 elements. `(sv, sh, sl)` is equivalent to `GpuCorrMM(...)(...)[:,:,::sv, ::sh, ::sl]`, but faster. Set to `(1, 1, 1)` to disable subsampling. pad The width of a border of implicit zeros to pad the input image with. Should be a tuple with 3 elements giving the numbers of rows and columns to pad on each side, or "half" to set the padding to `(kernel_rows // 2, kernel_columns // 2, kernel_depth // 2)`, or "full" to set the padding to `(kernel_rows - 1, kernel_columns - 1, kernel_depth - 1)` at runtime. Set to `(0, 0, 0)` to disable padding. Notes ----- Currently, the Op requires the inputs, filters and outputs to be C-contiguous. Use :func:`gpu_contiguous <theano.sandbox.cuda.basic_ops.gpu_contiguous>` on these arguments if needed. .. warning:: For 700 series Nvidia GPUs of compute capability 3.5 and CUDA 5.0 to 6.0, there is a bug in CUBLAS' matrix multiplication function that can make GpuCorrMM or its gradients crash for some input and filter shapes. So if you have a Tesla K20, Tesla K40, Quadro K6000, GeForce GT 640 (DDR5), GeForce GTX 780 (or Ti), GeForce GTX TITAN (or Black or Z) and experience a crash, switching to CUDA 6.5 or CUDA 4.2 should fix it. If this is not possible, changing the input or filter shapes (e.g., the batchsize or number of filters) may also work around the CUBLAS bug. """ def __init__(self, border_mode="valid", subsample=(1, 1, 1), pad=(0, 0, 0)): super(GpuCorr3dMM, self).__init__(border_mode, subsample, pad) def make_node(self, img, kern): img = as_cuda_ndarray_variable(img) kern = as_cuda_ndarray_variable(kern) if img.type.ndim != 5: raise TypeError('img must be 5D tensor') if kern.type.ndim != 5: raise TypeError('kern must be 5D tensor') broadcastable = [img.type.broadcastable[0], kern.type.broadcastable[0], False, False, False] return Apply(self, [img, kern], [CudaNdarrayType(broadcastable)()]) def c_code(self, node, nodename, inp, out_, sub): bottom, weights = inp top, = out_ direction = "forward" return super(GpuCorr3dMM, self).c_code_helper(bottom, weights, top, direction, sub) def grad(self, inp, grads): bottom, weights = inp top, = grads top = gpu_contiguous(top) d_bottom = GpuCorr3dMM_gradInputs(self.border_mode, self.subsample, self.pad)(weights, top, bottom.shape[-3:]) d_weights = GpuCorr3dMM_gradWeights(self.border_mode, self.subsample, self.pad)(bottom, top, weights.shape[-3:]) return d_bottom, d_weights class GpuCorr3dMM_gradWeights(BaseGpuCorr3dMM): """Gradient wrt. filters for `GpuCorr3dMM`. Notes ----- You will not want to use this directly, but rely on Theano's automatic differentiation or graph optimization to use it as needed. """ def __init__(self, border_mode="valid", subsample=(1, 1, 1), pad=(0, 0, 0)): super(GpuCorr3dMM_gradWeights, self).__init__(border_mode, subsample, pad) def make_node(self, img, topgrad, shape=None): img = as_cuda_ndarray_variable(img) topgrad = as_cuda_ndarray_variable(topgrad) if shape is not None: shape = as_tensor_variable(shape) if img.type.ndim != 5: raise TypeError('img must be 5D tensor') if topgrad.type.ndim != 5: raise TypeError('topgrad must be 5D tensor') if self.subsample != (1, 1, 1) or self.pad == "half": if shape is None: raise ValueError('shape must be given if subsample != (1, 1, 1), or pad == "half"') height_width_depth = [shape[0], shape[1], shape[2]] else: height_width_depth = [] broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1], False, False, False] return Apply(self, [img, topgrad] + height_width_depth, [CudaNdarrayType(broadcastable)()]) def c_code(self, node, nodename, inp, out_, sub): bottom, top = inp[:2] height, width, depth = inp[2:] or (None, None, None) weights, = out_ direction = "backprop weights" return super(GpuCorr3dMM_gradWeights, self).c_code_helper(bottom, weights, top, direction, sub, height, width, depth) def grad(self, inp, grads): bottom, top = inp[:2] weights, = grads weights = gpu_contiguous(weights) d_bottom = GpuCorr3dMM_gradInputs(self.border_mode, self.subsample, self.pad)(weights, top, bottom.shape[-3:]) d_top = GpuCorr3dMM(self.border_mode, self.subsample, self.pad)( bottom, weights) d_height_width_depth = (theano.gradient.DisconnectedType()(),) * 3 if len(inp) == 5 else () return (d_bottom, d_top) + d_height_width_depth def connection_pattern(self, node): if node.nin == 2: return [[1], [1]] else: return [[1], [1], [0], [0], [0]] # no connection to height, width, depth class GpuCorr3dMM_gradInputs(BaseGpuCorr3dMM): """Gradient wrt. inputs for `GpuCorr3dMM`. Notes ----- You will not want to use this directly, but rely on Theano's automatic differentiation or graph optimization to use it as needed. """ def __init__(self, border_mode="valid", subsample=(1, 1, 1), pad=(0, 0, 0)): super(GpuCorr3dMM_gradInputs, self).__init__(border_mode, subsample, pad) def make_node(self, kern, topgrad, shape=None): kern = as_cuda_ndarray_variable(kern) topgrad = as_cuda_ndarray_variable(topgrad) if kern.type.ndim != 5: raise TypeError('kern must be 5D tensor') if topgrad.type.ndim != 5: raise TypeError('topgrad must be 5D tensor') if self.subsample != (1, 1, 1) and shape is None: raise ValueError('shape must be given if subsample != (1, 1, 1)') height_width_depth = [shape[0], shape[1], shape[2]] if self.subsample != (1, 1, 1) else [] broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1], False, False, False] return Apply(self, [kern, topgrad] + height_width_depth, [CudaNdarrayType(broadcastable)()]) def c_code(self, node, nodename, inp, out_, sub): weights, top = inp[:2] height, width, depth = inp[2:] or (None, None, None) bottom, = out_ direction = "backprop inputs" return super(GpuCorr3dMM_gradInputs, self).c_code_helper(bottom, weights, top, direction, sub, height, width, depth) def grad(self, inp, grads): weights, top = inp[:2] bottom, = grads bottom = gpu_contiguous(bottom) d_weights = GpuCorr3dMM_gradWeights( self.border_mode, self.subsample, self.pad)( bottom, top, weights.shape[-3:]) d_top = GpuCorr3dMM( self.border_mode, self.subsample, self.pad)( bottom, weights) d_height_width_depth = (theano.gradient.DisconnectedType()(),)\ * 3 if len(inp) == 5 else () return (d_weights, d_top) + d_height_width_depth def connection_pattern(self, node): if node.nin == 2: return [[1], [1]] else: return [[1], [1], [0], [0], [0]] # no connection to height, width, depth ## # Not really a BLAS operation, but whatever. # class GpuConv(GpuOp): """ Implement the batched and stacked 2d convolution on the gpu. Parameters ---------- version Each version of c_code implements many kernel for the convolution. By default we try to guess the best one. You can force one version with this parameter. This parameter is used by the tests. direction_hint : {'forward', 'bprop weights', 'bprop inputs'} Serves as a hint for graph optimizers replacing GpuConv by other implementations. If the GpuConv is inserted automatically, we take its value from ConvOp. verbose For value of 1,2 and 3. Print more information during the execution of the convolution. Mostly used for optimization or debugging. kshp The size of the kernel. If provided, can generate faster code. If the GpuConv op is automatically inserted, We take its value automatically from the Conv op. imshp The size of the image. Not used for code generation but allows to select an experimental new version in another repo. max_threads_dim0 The maximum number of threads for the block size dimensions 0 (blockDim.x) used by the GPU function. nkern The number of kernels. Not used for this op, but can be used by graph optimizers to select a more optimal convolution implementation. If the GpuConv op is inserted automatically, we take its value from the Conv op. bsize The batch size. Not used for this op, but can be used by graph optimizers to select a more optimal convolution implementation. If the GpuConv op is inserted automatically, we take its value from the Conv op. fft_opt Deactivate fft_opt optimization at the op level when set to False. Note that by default fft optimization aren't enabled. See :ref:`convolution documentation <libdoc_tensor_nnet_conv>` to enable them. """ check_broadcast = False @staticmethod def logical_output_shape_2d(imshp, kshp, mode): if mode == 'valid': return imshp[0] - kshp[0] + 1, imshp[1] - kshp[1] + 1 if mode == 'full': return imshp[0] + kshp[0] - 1, imshp[1] + kshp[1] - 1 raise ValueError(mode) def __init__(self, border_mode, subsample=(1, 1), logical_img_hw=None, logical_kern_hw=None, logical_kern_align_top=True, version=-1, direction_hint=None, verbose=0, kshp=None, imshp=None, max_threads_dim0=None, nkern=None, bsize=None, fft_opt=True): self.border_mode = border_mode if version != -1: raise Exception( """GpuConv with version!=-1 is disabled as we do not test it anymore. It probably work, so you probably can just comment this error and use it. But we want to make sure you know about that. Also, this Op is pretty slow and isn't used by default anymore. We strongly suggest to use GpuCorrMM that is much faster and implement all the functionality (at a cost of some extra memory usage). If you can use cuDNN, that is even better. """) self.subsample = subsample if logical_img_hw is not None: h, w = logical_img_hw # TODO: reconsider this... since shapes are not given in # constructor, maybe a multiplier + offset is a more # appropriate way of passing this logical grid logical_img_hw = tuple(logical_img_hw) self.logical_img_hw = logical_img_hw if logical_kern_hw is not None: h, w = logical_kern_hw # TODO: reconsider this... since shapes are not given in # constructor, maybe a multiplier + offset is a more # appropriate way of passing this logical grid logical_kern_hw = tuple(logical_kern_hw) self.logical_kern_hw = logical_kern_hw self.logical_kern_align_top = logical_kern_align_top self.version = version self.direction_hint = direction_hint self.verbose = verbose self.kshp = kshp self.imshp = imshp self.max_threads_dim0 = max_threads_dim0 self.nkern = nkern self.bsize = bsize self.fft_opt = fft_opt def __eq__(self, other): return type(self) == type(other) \ and self.border_mode == other.border_mode \ and self.subsample == other.subsample \ and self.logical_img_hw == other.logical_img_hw \ and self.logical_kern_hw == other.logical_kern_hw \ and self.logical_kern_align_top == other.logical_kern_align_top \ and self.version == other.version \ and self.verbose == other.verbose \ and self.kshp == other.kshp\ and self.imshp == other.imshp\ and self.max_threads_dim0 == other.max_threads_dim0 def __setstate__(self, d): self.__dict__.update(d) if not hasattr(self, "imshp"): self.imshp = None if not hasattr(self, "max_threads_dim0"): self.max_threads_dim0 = None if not hasattr(self, "direction_hint"): self.direction_hint = None if not hasattr(self, "nkern"): self.nkern = None if not hasattr(self, "bsize"): self.bsize = None if not hasattr(self, "fft_opt"): self.fft_opt = True def __hash__(self): # don't use hash(self.version) as hash(-1)==-2 and # hash(-2)==-2 in python! return hash(type(self)) \ ^ hash(self.border_mode) \ ^ hash(self.subsample) \ ^ hash(self.logical_img_hw) \ ^ hash(self.logical_kern_hw) \ ^ hash(self.logical_kern_align_top) \ ^ self.version \ ^ hash(self.verbose) \ ^ hash(self.kshp)\ ^ hash(self.imshp)\ ^ hash(self.max_threads_dim0) def __str__(self): return '%s{%s, %s, %s, %s, %s, %s, %s}' % ( self.__class__.__name__, self.border_mode, str(self.subsample), str(self.logical_img_hw), str(self.logical_kern_hw), str(self.logical_kern_align_top), str(self.imshp), str(self.kshp)) def make_node(self, img, kern): if img.type.ndim != 4: raise TypeError('img must be 4D tensor') if kern.type.ndim != 4: raise TypeError('kern must be 4D tensor') broadcastable = [img.type.broadcastable[0], kern.type.broadcastable[0], False, False] return Apply(self, [img, kern], [CudaNdarrayType(broadcastable)()]) def flops(self, inputs, outputs): """ Useful with the hack in profilemode to print the MFlops""" images, kerns = inputs out, = outputs assert images[1] == kerns[1] flops = 0 if self.border_mode == "valid": # nb mul and add by output pixel flops = kerns[2] * kerns[3] * 2 # nb flops by output image flops *= out[2] * out[3] # nb patch multiplied flops *= images[1] * kerns[0] * images[0] else: flops = (images[0] * kerns[0] * images[1] * kerns[2] * kerns[3] * images[2] * images[3] * 2) return flops def prepare_node(self, node, storage_map, compute_map): if node.op.max_threads_dim0 is None: cuda = theano.sandbox.cuda device_id = cuda.use.device_number if device_id is None: cuda.use("gpu", force=False, default_to_move_computation_to_gpu=False, move_shared_float32_to_gpu=False, enable_cuda=False, test_driver=True) device_id = cuda.use.device_number cuda_ndarray = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray prop = cuda_ndarray.device_properties(device_id) node.op.max_threads_dim0 = prop['maxThreadsDim0'] def c_compile_args(self): nb = 0 if (self.kshp is not None) and (self.kshp[1] is not None): nb = self.kshp[1] return ['-DTHEANO_KERN_WID=' + str(nb)] # ,'-g','-G'] def c_headers(self): return ['cuda_ndarray.cuh', '<stdio.h>'] def c_code_cache_version(self): # raise this whenever modifying any of the support_code_files return (0, 23) def c_support_code_apply(self, node, nodename): # REMEMBER TO RAISE c_code_cache_version when changing any of # these files files = ['conv_kernel.cu', 'conv_full_kernel.cu', 'conv.cu'] codes = [open(os.path.join(os.path.split(__file__)[0], f)).read() for f in files] return reduce(str.__add__, codes) def c_code(self, node, nodename, inp, out_, sub): img, kern = inp out, = out_ dx = self.subsample[0] dy = self.subsample[1] version = self.version verbose = self.verbose sub = sub.copy() max_threads_dim0 = self.max_threads_dim0 if self.border_mode == "valid": bmode = 1 else: assert self.border_mode == "full" bmode = 0 if max_threads_dim0 is None: raise NotImplementedError("GpuConv.c_code should not be called " "directly. It should be called by " "make_thunk() that add some information " "related to the selected GPU.") sub.update(locals()) return """ //Mandatory args int mode = %(bmode)s; //Optional args int version = %(version)s; int verbose = %(verbose)s; int dx = %(dx)s; int dy = %(dy)s; // TODO, make out be decref before we alloc out2! CudaNdarray * out2 = (CudaNdarray *)CudaNdarray_Conv(%(img)s, %(kern)s, %(out)s, mode, dx, dy, version, verbose, %(max_threads_dim0)s); Py_XDECREF(%(out)s); %(out)s = out2; if (%(out)s==NULL){ %(fail)s } """ % sub class GpuDownsampleFactorMax(GpuOp): """ Implement downsample with max on the gpu. """ def __init__(self, ds, ignore_border=False): self.ds = tuple(ds) self.ignore_border = ignore_border def __eq__(self, other): return (type(self) == type(other) and self.ds == other.ds and self.ignore_border == other.ignore_border) def __hash__(self): return hash(type(self)) ^ hash(self.ds) ^ hash(self.ignore_border) def __str__(self): return '%s{%s,%s}' % (self.__class__.__name__, self.ds, self.ignore_border) def make_node(self, x): if not isinstance(x.type, CudaNdarrayType): raise TypeError() if not x.type.ndim == 4: raise TypeError() return Apply(self, [x], [x.type()]) # def perform(self, node, input_storage, output_storage): # raise NotImplementedError('only C is implemented') def c_code_cache_version(self): return (6) def c_code(self, node, nodename, inp, out, sub): x, = inp z, = out fail = sub['fail'] ds0, ds1 = self.ds ignore_border = int(self.ignore_border) return """ int dims[4], xdim2, xdim3; if (%(x)s->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuDownsampleFactorMax: rank error"); %(fail)s; } xdim2 = CudaNdarray_HOST_DIMS(%(x)s)[2]; xdim3 = CudaNdarray_HOST_DIMS(%(x)s)[3]; dims[0] = CudaNdarray_HOST_DIMS(%(x)s)[0]; dims[1] = CudaNdarray_HOST_DIMS(%(x)s)[1]; dims[2] = xdim2 / %(ds0)s; dims[3] = xdim3 / %(ds1)s; if (! %(ignore_border)s) { dims[2] += (xdim2%%(%(ds0)s)?1:0); dims[3] += (xdim3%%(%(ds1)s)?1:0); } if(dims[3]>512){ PyErr_Format(PyExc_ValueError, "GpuDownsampleFactorMax: last dimention size of %%d" " is bigger then 512. This case is not implemented.", dims[3]); %(fail)s; } if ((NULL == %(z)s) || (CudaNdarray_HOST_DIMS(%(z)s)[0] != dims[0]) || (CudaNdarray_HOST_DIMS(%(z)s)[1] != dims[1]) || (CudaNdarray_HOST_DIMS(%(z)s)[2] != dims[2]) || (CudaNdarray_HOST_DIMS(%(z)s)[3] != dims[3])) { Py_XDECREF(%(z)s); %(z)s = (CudaNdarray*)CudaNdarray_New(); if ((NULL == %(z)s) || CudaNdarray_alloc_contiguous(%(z)s, 4, dims)) { Py_XDECREF(%(z)s); %(z)s = NULL; PyErr_SetString(PyExc_ValueError, "GpuDownsampleFactorMax:" "Was not able to allocate output!"); %(fail)s; } } { dim3 grid(std::min(dims[0] * dims[1], 65535), dims[2]); //dim3 block(std::min(dims[3], 512)); //TODO: implement this by supporting more outputs than threads dim3 block(dims[3]); if ((grid.x*grid.y) && dims[3]) kMaxPool_%(nodename)s<%(ds0)s, %(ds1)s> <<<grid, block, xdim3*sizeof(float)>>>( dims[0], dims[1], dims[2], dims[3], xdim2, xdim3, CudaNdarray_DEV_DATA(%(x)s), CudaNdarray_HOST_STRIDES(%(x)s)[0], CudaNdarray_HOST_STRIDES(%(x)s)[1], CudaNdarray_HOST_STRIDES(%(x)s)[2], CudaNdarray_HOST_STRIDES(%(x)s)[3], CudaNdarray_DEV_DATA(%(z)s), CudaNdarray_HOST_STRIDES(%(z)s)[0], CudaNdarray_HOST_STRIDES(%(z)s)[1], CudaNdarray_HOST_STRIDES(%(z)s)[2], CudaNdarray_HOST_STRIDES(%(z)s)[3]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s. (grid: %%i x %%i;" " block: %%i x %%i x %%i)\\n", "kMaxPool_%(nodename)s", cudaGetErrorString(err), grid.x, grid.y, block.x, block.y, block.z); %(fail)s; } } """ % locals() def c_support_code_apply(self, node, nodename): ignore_border = int(self.ignore_border) return """ template<int pf2, int pf3> __global__ void kMaxPool_%(nodename)s( int D0, int D1, int D2, int D3, int xD2, int xD3, const float * x, int xS0, int xS1, int xS2, int xS3, float *z, int zS0, int zS1, int zS2, int zS3) { float cur_max, cur_x; // Cast threadIdx.x into a signed int, to avoid problems with // indexing with negative offsets. int tx = threadIdx.x; for(int block_x_idx = blockIdx.x; block_x_idx < D0 * D1; block_x_idx += gridDim.x){ int i0 = block_x_idx %% D0; int i1 = block_x_idx / D0; int i2 = blockIdx.y; extern __shared__ float xbuf[]; //size [xD3] for (int r2 = 0; (r2 < pf2) && (%(ignore_border)s || (r2 + i2*pf2 < xD2)); ++r2) { __syncthreads(); // load the current row of the image into shared memory for (int j = tx; j < xD3; j += blockDim.x) { xbuf[j] = x[i0*xS0 + i1*xS1 + (i2*pf2+r2)*xS2 + j*xS3]; } __syncthreads(); // initialize our max if this is the // first row we're loading cur_max = (r2 == 0) ? xbuf[tx*pf3] : cur_max; // do a mini-reduction over the pf3 relevant elements // in the current row if (%(ignore_border)s) { for (int k = 0; k < pf3; ++k) { cur_x = xbuf[tx*pf3+k]; cur_max = (cur_x > cur_max) ? cur_x : cur_max; } } else { for (int k = 0; k < pf3; ++k) { if (tx*pf3 + k < xD3) { cur_x = xbuf[tx*pf3+k]; cur_max = (cur_x > cur_max) ? cur_x : cur_max; } } } } z[i0*zS0 + i1*zS1 + i2*zS2 + tx*zS3] = cur_max; } } """ % locals() class GpuDownsampleFactorMaxGrad(GpuOp): """ Implement the grad of downsample with max on the gpu. """ def __init__(self, ds, ignore_border): self.ds = tuple(ds) self.ignore_border = ignore_border def __eq__(self, other): return (type(self) == type(other) and self.ds == other.ds and self.ignore_border == other.ignore_border) def __hash__(self): return hash(type(self)) ^ hash(self.ds) ^ hash(self.ignore_border) def __str__(self): return '%s{%s,%s}' % (self.__class__.__name__, self.ds, self.ignore_border) def make_node(self, x, z, gz): return Apply(self, [x, z, gz], [x.type()]) def c_code_cache_version(self): return (9,) def c_code(self, node, nodename, inp, out, sub): x, z, gz = inp gx, = out fail = sub['fail'] ds0, ds1 = self.ds ignore_border = int(self.ignore_border) return """ if (%(x)s->nd != 4 || %(z)s->nd != 4 || %(gz)s->nd != 4) { PyErr_SetString(PyExc_ValueError, "rank error"); %(fail)s; } if ((NULL == %(gx)s) || (CudaNdarray_HOST_DIMS(%(gx)s)[0] != CudaNdarray_HOST_DIMS(%(x)s)[0]) || (CudaNdarray_HOST_DIMS(%(gx)s)[1] != CudaNdarray_HOST_DIMS(%(x)s)[1]) || (CudaNdarray_HOST_DIMS(%(gx)s)[2] != CudaNdarray_HOST_DIMS(%(x)s)[2]) || (CudaNdarray_HOST_DIMS(%(gx)s)[3] != CudaNdarray_HOST_DIMS(%(x)s)[3])) { Py_XDECREF(%(gx)s); %(gx)s = (CudaNdarray*)CudaNdarray_New(); if ((NULL == %(gx)s) || CudaNdarray_alloc_contiguous(%(gx)s, 4, CudaNdarray_HOST_DIMS(%(x)s))) { Py_XDECREF(%(gx)s); %(gx)s = NULL; %(fail)s; } } { //TODO: supporting more output columns than threads // make sure we cover every x row when ignore border isset and // there's a border present to be ignored int needs_extra_z_col = %(ignore_border)s && (CudaNdarray_HOST_DIMS(%(x)s)[2] %% %(ds0)s); dim3 grid(std::min(CudaNdarray_HOST_DIMS(%(z)s)[0], 65535), CudaNdarray_HOST_DIMS(%(z)s)[2] + (needs_extra_z_col ? 1 : 0)); dim3 block(std::min(CudaNdarray_HOST_DIMS(%(x)s)[3], 512)); kDownsampleMaxGrad_%(nodename)s<%(ds0)s, %(ds1)s> <<<grid, block>>>( CudaNdarray_HOST_DIMS(%(z)s)[0], CudaNdarray_HOST_DIMS(%(z)s)[1], CudaNdarray_HOST_DIMS(%(z)s)[2], CudaNdarray_HOST_DIMS(%(z)s)[3], CudaNdarray_HOST_DIMS(%(x)s)[2], CudaNdarray_HOST_DIMS(%(x)s)[3], CudaNdarray_DEV_DATA(%(x)s), CudaNdarray_HOST_STRIDES(%(x)s)[0], CudaNdarray_HOST_STRIDES(%(x)s)[1], CudaNdarray_HOST_STRIDES(%(x)s)[2], CudaNdarray_HOST_STRIDES(%(x)s)[3], CudaNdarray_DEV_DATA(%(z)s), CudaNdarray_HOST_STRIDES(%(z)s)[0], CudaNdarray_HOST_STRIDES(%(z)s)[1], CudaNdarray_HOST_STRIDES(%(z)s)[2], CudaNdarray_HOST_STRIDES(%(z)s)[3], CudaNdarray_DEV_DATA(%(gz)s), CudaNdarray_HOST_STRIDES(%(gz)s)[0], CudaNdarray_HOST_STRIDES(%(gz)s)[1], CudaNdarray_HOST_STRIDES(%(gz)s)[2], CudaNdarray_HOST_STRIDES(%(gz)s)[3], CudaNdarray_DEV_DATA(%(gx)s), CudaNdarray_HOST_STRIDES(%(gx)s)[0], CudaNdarray_HOST_STRIDES(%(gx)s)[1], CudaNdarray_HOST_STRIDES(%(gx)s)[2], CudaNdarray_HOST_STRIDES(%(gx)s)[3]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s. (grid: %%i x %%i; block: %%i x %%i x %%i)\\n", "kDownsampleMaxGrad_%(nodename)s", cudaGetErrorString(err), grid.x, grid.y, block.x, block.y, block.z); %(fail)s; } } """ % locals() def c_support_code_apply(self, node, nodename): # This code considers every position in the output z, andthen # computes the gradient for the input pixels that were # downsampled to that z-position. It does so by running along # every z row (sometimes plus one, to make sure every gx row # gets totally filled), and by running along every x col. This # code is not sensitive to the ignore_border flag along the # row dimension (since it runs for every position in the # output z), but it is sensitive along the col dimension. ignore_border = int(self.ignore_border) return """ // ds0 is the downsampling factor in rows, ds1 in columns template<int ds0, int ds1> __global__ void kDownsampleMaxGrad_%(nodename)s( int D0, int D1, int D2, int D3, int xD2, int xD3, const float * x, int xS0, int xS1, int xS2, int xS3, const float * z, int zS0, int zS1, int zS2, int zS3, const float * gz, int gzS0, int gzS1, int gzS2, int gzS3, float *gx, int gxS0, int gxS1, int gxS2, int gxS3) { // D0: number of image rows // D1: number of image cols // D2: number of z rows // D3: number of z cols // xD2: number of x rows // xD3: number of x cols // various .S. variables are strides float cur_max, cur_x, my_z, my_gz; // Cast threadIdx.x into a signed int, to avoid problems with // indexing with negative offsets. int tx = threadIdx.x; int bdimx = blockDim.x; for(int i0 = blockIdx.x; i0 < D0; i0 += gridDim.x){ int i1 = 0; // image col // row wrt z and/or gz, ranges from 0 to D2 - 1 OR D2 // (as needed to cover all x rows) int i2 = blockIdx.y; int x_col = tx; // col wrt x, ranges from 0 to xD3 - 1 int z_col = x_col/ds1; // z_col corresponding to this x_col //TODO: raise occupancy. Use threadIdx.y to run several // iterations of this i1 loop in parallel for (i1 = 0; i1 < D1; ++i1) // loop over images (same for z and x) { for(int col_iter = 0; (tx + col_iter * bdimx < xD3) ; col_iter++){ //The if inside is to don't do the division if we // need only 1 col_iter if(tx + bdimx < xD3) { x_col = tx + col_iter * bdimx; z_col = x_col/ds1; } if (%(ignore_border)s && ((x_col >= ds1 * D3) || (i2 >= D2))) { // This happens only if x_col, or i2*ds0, was ignored // (via ignore_border) // TODO: if ignore_border is False, this is impossible // and we don't even need to generate this code. my_gz = 0.0f; //any fp number suffices for my_z, so we don't even //need to set it to anything in particular. } else { // this is effectively: // my_gz = gz[image_row][image_col][z_row][z_col] // my_z = z[image_row][image_col][z_row][z_col] my_gz = gz[i0 * gzS0 + i1 * gzS1 + i2 * gzS2 + z_col*gzS3]; my_z = z[i0 * zS0 + i1 * zS1 + i2 * zS2 + z_col* zS3]; } for (int x_row = i2*ds0; (x_row < i2*ds0+ds0) && (x_row < xD2); ++x_row) { // this is effectively: // gx[image_row][image_col][x_row][x_col] // = (my_z == x[image_row][image_col][ // x_row][x_col]) ? my_gz : 0.0f; gx[i0*gxS0 + i1*gxS1 + x_row*gxS2 + x_col*gxS3] = (my_z == x[i0*xS0 + i1*xS1 + x_row*xS2 + x_col*xS3]) ? my_gz : 0.0f; } } } } } """ % locals() class GpuDownsampleFactorMaxGradGrad(GpuOp): """ Implement the grad of downsample with max on the gpu. """ __props__ = ('ds', 'ignore_border') def __init__(self, ds, ignore_border): self.ds = tuple(ds) self.ignore_border = ignore_border def make_node(self, x, z, gx): x = as_cuda_ndarray_variable(x) z = as_cuda_ndarray_variable(z) gx = as_cuda_ndarray_variable(gx) if x.type.ndim != 4: raise TypeError('x must be 4D tensor') if z.type.ndim != 4: raise TypeError('z must be 4D tensor') if gx.type.ndim != 4: raise TypeError('gx must be 4D tensor') return Apply(self, [x, z, gx], [x.type()]) def c_code_cache_version(self): return (1,) def c_code(self, node, nodename, inp, out, sub): x, z, gx = inp gz, = out fail = sub['fail'] ds0, ds1 = self.ds ignore_border = int(self.ignore_border) return """ if (%(x)s->nd != 4 || %(z)s->nd != 4 || %(gx)s->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuDownsampleFactorMaxGradGrad: rank error"); %(fail)s; } if ((NULL == %(gz)s) || (CudaNdarray_HOST_DIMS(%(gz)s)[0] != CudaNdarray_HOST_DIMS(%(z)s)[0]) || (CudaNdarray_HOST_DIMS(%(gz)s)[1] != CudaNdarray_HOST_DIMS(%(z)s)[1]) || (CudaNdarray_HOST_DIMS(%(gz)s)[2] != CudaNdarray_HOST_DIMS(%(z)s)[2]) || (CudaNdarray_HOST_DIMS(%(gz)s)[3] != CudaNdarray_HOST_DIMS(%(z)s)[3])) { Py_XDECREF(%(gz)s); %(gz)s = (CudaNdarray*)CudaNdarray_New(); if ((NULL == %(gz)s) || CudaNdarray_alloc_contiguous(%(gz)s, 4, CudaNdarray_HOST_DIMS(%(z)s))) { Py_XDECREF(%(gz)s); %(gz)s = NULL; %(fail)s; } } { int needs_extra_z_col = %(ignore_border)s && (CudaNdarray_HOST_DIMS(%(x)s)[2] %% %(ds0)s); dim3 grid(std::min(CudaNdarray_HOST_DIMS(%(z)s)[0], 65535), CudaNdarray_HOST_DIMS(%(z)s)[2] + (needs_extra_z_col ? 1 : 0)); dim3 block(std::min(CudaNdarray_HOST_DIMS(%(x)s)[3], 512)); kDownsampleMaxGradGrad_%(nodename)s<%(ds0)s, %(ds1)s> <<<grid, block>>>( CudaNdarray_HOST_DIMS(%(z)s)[0], CudaNdarray_HOST_DIMS(%(z)s)[1], CudaNdarray_HOST_DIMS(%(z)s)[2], CudaNdarray_HOST_DIMS(%(z)s)[3], CudaNdarray_HOST_DIMS(%(x)s)[2], CudaNdarray_HOST_DIMS(%(x)s)[3], CudaNdarray_DEV_DATA(%(x)s), CudaNdarray_HOST_STRIDES(%(x)s)[0], CudaNdarray_HOST_STRIDES(%(x)s)[1], CudaNdarray_HOST_STRIDES(%(x)s)[2], CudaNdarray_HOST_STRIDES(%(x)s)[3], CudaNdarray_DEV_DATA(%(z)s), CudaNdarray_HOST_STRIDES(%(z)s)[0], CudaNdarray_HOST_STRIDES(%(z)s)[1], CudaNdarray_HOST_STRIDES(%(z)s)[2], CudaNdarray_HOST_STRIDES(%(z)s)[3], CudaNdarray_DEV_DATA(%(gz)s), CudaNdarray_HOST_STRIDES(%(gz)s)[0], CudaNdarray_HOST_STRIDES(%(gz)s)[1], CudaNdarray_HOST_STRIDES(%(gz)s)[2], CudaNdarray_HOST_STRIDES(%(gz)s)[3], CudaNdarray_DEV_DATA(%(gx)s), CudaNdarray_HOST_STRIDES(%(gx)s)[0], CudaNdarray_HOST_STRIDES(%(gx)s)[1], CudaNdarray_HOST_STRIDES(%(gx)s)[2], CudaNdarray_HOST_STRIDES(%(gx)s)[3]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s. (grid: %%i x %%i; block: %%i x %%i x %%i)\\n", "kDownsampleMaxGradGrad_%(nodename)s", cudaGetErrorString(err), grid.x, grid.y, block.x, block.y, block.z); %(fail)s; } } """ % locals() def c_support_code_apply(self, node, nodename): return """ // ds0 is the downsampling factor in rows, ds1 in columns template<int ds0, int ds1> __global__ void kDownsampleMaxGradGrad_%(nodename)s( int D0, int D1, int D2, int D3, int xD2, int xD3, const float * x, int xS0, int xS1, int xS2, int xS3, const float * z, int zS0, int zS1, int zS2, int zS3, float * gz, int gzS0, int gzS1, int gzS2, int gzS3, const float *gx, int gxS0, int gxS1, int gxS2, int gxS3) { // D0: number of image rows // D1: number of image cols // D2: number of z rows // D3: number of z cols // xD2: number of x rows // xD3: number of x cols // various .S. variables are strides float cur_max, cur_x, my_z, my_gx; // Cast threadIdx.x into a signed int, to avoid problems with // indexing with negative offsets. int tx = threadIdx.x; int bdimx = blockDim.x; for(int i0 = blockIdx.x; i0 < D0; i0 += gridDim.x){ int i1 = 0; // image col // row wrt z and/or gz, ranges from 0 to D2 - 1 OR D2 // (as needed to cover all x rows) int i2 = blockIdx.y; int x_col = tx; // col wrt x, ranges from 0 to xD3 - 1 int z_col = x_col/ds1; // z_col corresponding to this x_col //TODO: raise occupancy. Use threadIdx.y to run several // iterations of this i1 loop in parallel for (i1 = 0; i1 < D1; ++i1) // loop over images (same for z and x) { for(int col_iter = 0; (tx + col_iter * bdimx < xD3) ; col_iter++){ //The if inside is to don't do the division if we // need only 1 col_iter if(tx + bdimx < xD3) { x_col = tx + col_iter * bdimx; z_col = x_col/ds1; } my_z = z[i0 * zS0 + i1 * zS1 + i2 * zS2 + z_col* zS3]; for (int x_row = i2*ds0; (x_row < i2*ds0+ds0) && (x_row < xD2); ++x_row) { // my_gx = gx[image_row][image_col][x_row][x_col] my_gx = gx[i0*gxS0 + i1*gxS1 + x_row*gxS2 + x_col*gxS3]; if (my_z == x[i0*xS0 + i1*xS1 + x_row*xS2 + x_col*xS3]) { gz[i0 * gzS0 + i1 * gzS1 + i2 * gzS2 + z_col* gzS3] = my_gx; } } } } } } """ % locals()
bsd-3-clause
berinhard/py-notify
run-tests.py
4
3647
#! /usr/bin/env python # -*- coding: utf-8 -*- #--------------------------------------------------------------------# # This file is part of Py-notify. # # # # Copyright (C) 2007, 2008 Paul Pogonyshev. # # # # This library is free software; you can redistribute it and/or # # modify it under the terms of the GNU Lesser General Public License # # as published by the Free Software Foundation; either version 2.1 # # of the License, or (at your option) any later version. # # # # This library is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # Lesser General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with this library; if not, write to the Free # # Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # # Boston, MA 02110-1301 USA # #--------------------------------------------------------------------# import os import sys import unittest if not os.path.isfile (os.path.join ('notify', 'all.py')): sys.exit ("%s: cannot find '%s', strange..." % (sys.argv[0], os.path.join ('notify', 'all.py'))) _extensions_built = False def _build_extensions (): global _extensions_built if not _extensions_built: print ('Building extension...') # FIXME: Is that portable enough? if os.system ('python setup.py build_ext') != 0: sys.exit (1) _extensions_built = True _TEST_MODULES = ('all', 'base', 'bind', 'condition', '_gc', 'mediator', 'signal', 'utils', 'variable') def _import_module (module_name): _build_extensions () return __import__('test.%s' % module_name, globals (), locals (), ('*',)) def _import_module_tests (module_name): return unittest.defaultTestLoader.loadTestsFromModule (_import_module (module_name)) def _import_all_tests (): everything = unittest.TestSuite () for module_name in _TEST_MODULES: everything.addTest (_import_module_tests (module_name)) return everything class _TestModuleImporter (object): def __init__(self, module_name): self._module_name = module_name def __call__(self): return _import_module_tests (object.__getattribute__(self, '_module_name')) def __getattribute__(self, name): try: return object.__getattribute__(self, name) except AttributeError: return getattr (_import_module (object.__getattribute__(self, '_module_name')), name) class AllTests (object): def __init__(self): self.everything = _import_all_tests for module_name in _TEST_MODULES: setattr (self, module_name, _TestModuleImporter (module_name)) class TestProgram (unittest.TestProgram): def runTests (self): _build_extensions () print ('\nNote that almost all time is spent in gc.collect() calls, not in this package\n') unittest.TestProgram.runTests (self) TestProgram (AllTests (), 'everything') # Local variables: # mode: python # python-indent: 4 # indent-tabs-mode: nil # fill-column: 90 # End:
lgpl-2.1
wolfram74/numerical_methods_iserles_notes
venv/lib/python2.7/site-packages/sympy/polys/agca/modules.py
24
45751
""" Computations with modules over polynomial rings. This module implements various classes that encapsulate groebner basis computations for modules. Most of them should not be instantiated by hand. Instead, use the constructing routines on objects you already have. For example, to construct a free module over ``QQ[x, y]``, call ``QQ[x, y].free_module(rank)`` instead of the ``FreeModule`` constructor. In fact ``FreeModule`` is an abstract base class that should not be instantiated, the ``free_module`` method instead returns the implementing class ``FreeModulePolyRing``. In general, the abstract base classes implement most functionality in terms of a few non-implemented methods. The concrete base classes supply only these non-implemented methods. They may also supply new implementations of the convenience methods, for example if there are faster algorithms available. """ from __future__ import print_function, division from copy import copy from sympy.polys.polyerrors import CoercionFailed from sympy.polys.orderings import ProductOrder, monomial_key from sympy.polys.domains.field import Field from sympy.polys.agca.ideals import Ideal from sympy.core.compatibility import iterable, reduce # TODO # - module saturation # - module quotient/intersection for quotient rings # - free resoltutions / syzygies # - finding small/minimal generating sets # - ... ########################################################################## ## Abstract base classes ################################################# ########################################################################## class Module(object): """ Abstract base class for modules. Do not instantiate - use ring explicit constructors instead: >>> from sympy import QQ >>> from sympy.abc import x >>> QQ.old_poly_ring(x).free_module(2) QQ[x]**2 Attributes: - dtype - type of elements - ring - containing ring Non-implemented methods: - submodule - quotient_module - is_zero - is_submodule - multiply_ideal The method convert likely needs to be changed in subclasses. """ def __init__(self, ring): self.ring = ring def convert(self, elem, M=None): """ Convert ``elem`` into internal representation of this module. If ``M`` is not None, it should be a module containing it. """ if not isinstance(elem, self.dtype): raise CoercionFailed return elem def submodule(self, *gens): """Generate a submodule.""" raise NotImplementedError def quotient_module(self, other): """Generate a quotient module.""" raise NotImplementedError def __div__(self, e): if not isinstance(e, Module): e = self.submodule(*e) return self.quotient_module(e) __truediv__ = __div__ def contains(self, elem): """Return True if ``elem`` is an element of this module.""" try: self.convert(elem) return True except CoercionFailed: return False def __contains__(self, elem): return self.contains(elem) def subset(self, other): """ Returns True if ``other`` is is a subset of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.subset([(1, x), (x, 2)]) True >>> F.subset([(1/x, x), (x, 2)]) False """ return all(self.contains(x) for x in other) def __eq__(self, other): return self.is_submodule(other) and other.is_submodule(self) def __ne__(self, other): return not (self == other) def is_zero(self): """Returns True if ``self`` is a zero module.""" raise NotImplementedError def is_submodule(self, other): """Returns True if ``other`` is a submodule of ``self``.""" raise NotImplementedError def multiply_ideal(self, other): """ Multiply ``self`` by the ideal ``other``. """ raise NotImplementedError def __mul__(self, e): if not isinstance(e, Ideal): try: e = self.ring.ideal(e) except (CoercionFailed, NotImplementedError): return NotImplemented return self.multiply_ideal(e) __rmul__ = __mul__ def identity_hom(self): """Return the identity homomorphism on ``self``.""" raise NotImplementedError class ModuleElement(object): """ Base class for module element wrappers. Use this class to wrap primitive data types as module elements. It stores a reference to the containing module, and implements all the arithmetic operators. Attributes: - module - containing module - data - internal data Methods that likely need change in subclasses: - add - mul - div - eq """ def __init__(self, module, data): self.module = module self.data = data def add(self, d1, d2): """Add data ``d1`` and ``d2``.""" return d1 + d2 def mul(self, m, d): """Multiply module data ``m`` by coefficient d.""" return m * d def div(self, m, d): """Divide module data ``m`` by coefficient d.""" return m / d def eq(self, d1, d2): """Return true if d1 and d2 represent the same element.""" return d1 == d2 def __add__(self, om): if not isinstance(om, self.__class__) or om.module != self.module: try: om = self.module.convert(om) except CoercionFailed: return NotImplemented return self.__class__(self.module, self.add(self.data, om.data)) __radd__ = __add__ def __neg__(self): return self.__class__(self.module, self.mul(self.data, self.module.ring.convert(-1))) def __sub__(self, om): if not isinstance(om, self.__class__) or om.module != self.module: try: om = self.module.convert(om) except CoercionFailed: return NotImplemented return self.__add__(-om) def __rsub__(self, om): return (-self).__add__(om) def __mul__(self, o): if not isinstance(o, self.module.ring.dtype): try: o = self.module.ring.convert(o) except CoercionFailed: return NotImplemented return self.__class__(self.module, self.mul(self.data, o)) __rmul__ = __mul__ def __div__(self, o): if not isinstance(o, self.module.ring.dtype): try: o = self.module.ring.convert(o) except CoercionFailed: return NotImplemented return self.__class__(self.module, self.div(self.data, o)) __truediv__ = __div__ def __eq__(self, om): if not isinstance(om, self.__class__) or om.module != self.module: try: om = self.module.convert(om) except CoercionFailed: return False return self.eq(self.data, om.data) def __ne__(self, om): return not self.__eq__(om) ########################################################################## ## Free Modules ########################################################## ########################################################################## class FreeModuleElement(ModuleElement): """Element of a free module. Data stored as a tuple.""" def add(self, d1, d2): return tuple(x + y for x, y in zip(d1, d2)) def mul(self, d, p): return tuple(x * p for x in d) def div(self, d, p): return tuple(x / p for x in d) def __repr__(self): from sympy import sstr return '[' + ', '.join(sstr(x) for x in self.data) + ']' def __iter__(self): return self.data.__iter__() def __getitem__(self, idx): return self.data[idx] class FreeModule(Module): """ Abstract base class for free modules. Additional attributes: - rank - rank of the free module Non-implemented methods: - submodule """ dtype = FreeModuleElement def __init__(self, ring, rank): Module.__init__(self, ring) self.rank = rank def __repr__(self): return repr(self.ring) + "**" + repr(self.rank) def is_submodule(self, other): """ Returns True if ``other`` is a submodule of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> M = F.submodule([2, x]) >>> F.is_submodule(F) True >>> F.is_submodule(M) True >>> M.is_submodule(F) False """ if isinstance(other, SubModule): return other.container == self if isinstance(other, FreeModule): return other.ring == self.ring and other.rank == self.rank return False def convert(self, elem, M=None): """ Convert ``elem`` into the internal representation. This method is called implicitly whenever computations involve elements not in the internal representation. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.convert([1, 0]) [1, 0] """ if isinstance(elem, FreeModuleElement): if elem.module is self: return elem if elem.module.rank != self.rank: raise CoercionFailed return FreeModuleElement(self, tuple(self.ring.convert(x, elem.module.ring) for x in elem.data)) elif iterable(elem): tpl = tuple(self.ring.convert(x) for x in elem) if len(tpl) != self.rank: raise CoercionFailed return FreeModuleElement(self, tpl) elif elem is 0: return FreeModuleElement(self, (self.ring.convert(0),)*self.rank) else: raise CoercionFailed def is_zero(self): """ Returns True if ``self`` is a zero module. (If, as this implementation assumes, the coefficient ring is not the zero ring, then this is equivalent to the rank being zero.) >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(0).is_zero() True >>> QQ.old_poly_ring(x).free_module(1).is_zero() False """ return self.rank == 0 def basis(self): """ Return a set of basis elements. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(3).basis() ([1, 0, 0], [0, 1, 0], [0, 0, 1]) """ from sympy.matrices import eye M = eye(self.rank) return tuple(self.convert(M.row(i)) for i in range(self.rank)) def quotient_module(self, submodule): """ Return a quotient module. >>> from sympy.abc import x >>> from sympy import QQ >>> M = QQ.old_poly_ring(x).free_module(2) >>> M.quotient_module(M.submodule([1, x], [x, 2])) QQ[x]**2/<[1, x], [x, 2]> Or more conicisely, using the overloaded division operator: >>> QQ.old_poly_ring(x).free_module(2) / [[1, x], [x, 2]] QQ[x]**2/<[1, x], [x, 2]> """ return QuotientModule(self.ring, self, submodule) def multiply_ideal(self, other): """ Multiply ``self`` by the ideal ``other``. >>> from sympy.abc import x >>> from sympy import QQ >>> I = QQ.old_poly_ring(x).ideal(x) >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.multiply_ideal(I) <[x, 0], [0, x]> """ return self.submodule(*self.basis()).multiply_ideal(other) def identity_hom(self): """ Return the identity homomorphism on ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(2).identity_hom() Matrix([ [1, 0], : QQ[x]**2 -> QQ[x]**2 [0, 1]]) """ from sympy.polys.agca.homomorphisms import homomorphism return homomorphism(self, self, self.basis()) class FreeModulePolyRing(FreeModule): """ Free module over a generalized polynomial ring. Do not instantiate this, use the constructor method of the ring instead: >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(3) >>> F QQ[x]**3 >>> F.contains([x, 1, 0]) True >>> F.contains([1/x, 0, 1]) False """ def __init__(self, ring, rank): from sympy.polys.domains.old_polynomialring import PolynomialRingBase FreeModule.__init__(self, ring, rank) if not isinstance(ring, PolynomialRingBase): raise NotImplementedError('This implementation only works over ' + 'polynomial rings, got %s' % ring) if not isinstance(ring.dom, Field): raise NotImplementedError('Ground domain must be a field, ' + 'got %s' % ring.dom) def submodule(self, *gens, **opts): """ Generate a submodule. >>> from sympy.abc import x, y >>> from sympy import QQ >>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, x + y]) >>> M <[x, x + y]> >>> M.contains([2*x, 2*x + 2*y]) True >>> M.contains([x, y]) False """ return SubModulePolyRing(gens, self, **opts) class FreeModuleQuotientRing(FreeModule): """ Free module over a quotient ring. Do not instantiate this, use the constructor method of the ring instead: >>> from sympy.abc import x >>> from sympy import QQ >>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(3) >>> F (QQ[x]/<x**2 + 1>)**3 Attributes - quot - the quotient module `R^n / IR^n`, where `R/I` is our ring """ def __init__(self, ring, rank): from sympy.polys.domains.quotientring import QuotientRing FreeModule.__init__(self, ring, rank) if not isinstance(ring, QuotientRing): raise NotImplementedError('This implementation only works over ' + 'quotient rings, got %s' % ring) F = self.ring.ring.free_module(self.rank) self.quot = F / (self.ring.base_ideal*F) def __repr__(self): return "(" + repr(self.ring) + ")" + "**" + repr(self.rank) def submodule(self, *gens, **opts): """ Generate a submodule. >>> from sympy.abc import x, y >>> from sympy import QQ >>> M = (QQ.old_poly_ring(x, y)/[x**2 - y**2]).free_module(2).submodule([x, x + y]) >>> M <[x + <x**2 - y**2>, x + y + <x**2 - y**2>]> >>> M.contains([y**2, x**2 + x*y]) True >>> M.contains([x, y]) False """ return SubModuleQuotientRing(gens, self, **opts) def lift(self, elem): """ Lift the element ``elem`` of self to the module self.quot. Note that self.quot is the same set as self, just as an R-module and not as an R/I-module, so this makes sense. >>> from sympy.abc import x >>> from sympy import QQ >>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2) >>> e = F.convert([1, 0]) >>> e [1 + <x**2 + 1>, 0 + <x**2 + 1>] >>> L = F.quot >>> l = F.lift(e) >>> l [1, 0] + <[x**2 + 1, 0], [0, x**2 + 1]> >>> L.contains(l) True """ return self.quot.convert([x.data for x in elem]) def unlift(self, elem): """ Push down an element of self.quot to self. This undoes ``lift``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2) >>> e = F.convert([1, 0]) >>> l = F.lift(e) >>> e == l False >>> e == F.unlift(l) True """ return self.convert(elem.data) ########################################################################## ## Submodules and subquotients ########################################### ########################################################################## class SubModule(Module): """ Base class for submodules. Attributes: - container - containing module - gens - generators (subset of containing module) - rank - rank of containing module Non-implemented methods: - _contains - _syzygies - _in_terms_of_generators - _intersect - _module_quotient Methods that likely need change in subclasses: - reduce_element """ def __init__(self, gens, container): Module.__init__(self, container.ring) self.gens = tuple(container.convert(x) for x in gens) self.container = container self.rank = container.rank self.ring = container.ring self.dtype = container.dtype def __repr__(self): return "<" + ", ".join(repr(x) for x in self.gens) + ">" def _contains(self, other): """Implementation of containment. Other is guaranteed to be FreeModuleElement.""" raise NotImplementedError def _syzygies(self): """Implementation of syzygy computation wrt self generators.""" raise NotImplementedError def _in_terms_of_generators(self, e): """Implementation of expression in terms of generators.""" raise NotImplementedError def convert(self, elem, M=None): """ Convert ``elem`` into the internal represantition. Mostly called implicitly. >>> from sympy.abc import x >>> from sympy import QQ >>> M = QQ.old_poly_ring(x).free_module(2).submodule([1, x]) >>> M.convert([2, 2*x]) [2, 2*x] """ if isinstance(elem, self.container.dtype) and elem.module is self: return elem r = copy(self.container.convert(elem, M)) r.module = self if not self._contains(r): raise CoercionFailed return r def _intersect(self, other): """Implementation of intersection. Other is guaranteed to be a submodule of same free module.""" raise NotImplementedError def _module_quotient(self, other): """Implementation of quotient. Other is guaranteed to be a submodule of same free module.""" raise NotImplementedError def intersect(self, other, **options): """ Returns the intersection of ``self`` with submodule ``other``. >>> from sympy.abc import x, y >>> from sympy import QQ >>> F = QQ.old_poly_ring(x, y).free_module(2) >>> F.submodule([x, x]).intersect(F.submodule([y, y])) <[x*y, x*y]> Some implementation allow further options to be passed. Currently, to only one implemented is ``relations=True``, in which case the function will return a triple ``(res, rela, relb)``, where ``res`` is the intersection module, and ``rela`` and ``relb`` are lists of coefficient vectors, expressing the generators of ``res`` in terms of the generators of ``self`` (``rela``) and ``other`` (``relb``). >>> F.submodule([x, x]).intersect(F.submodule([y, y]), relations=True) (<[x*y, x*y]>, [(y,)], [(x,)]) The above result says: the intersection module is generated by the single element `(-xy, -xy) = -y (x, x) = -x (y, y)`, where `(x, x)` and `(y, y)` respectively are the unique generators of the two modules being intersected. """ if not isinstance(other, SubModule): raise TypeError('%s is not a SubModule' % other) if other.container != self.container: raise ValueError( '%s is contained in a different free module' % other) return self._intersect(other, **options) def module_quotient(self, other, **options): r""" Returns the module quotient of ``self`` by submodule ``other``. That is, if ``self`` is the module `M` and ``other`` is `N`, then return the ideal `\{f \in R | fN \subset M\}`. >>> from sympy import QQ >>> from sympy.abc import x, y >>> F = QQ.old_poly_ring(x, y).free_module(2) >>> S = F.submodule([x*y, x*y]) >>> T = F.submodule([x, x]) >>> S.module_quotient(T) <y> Some implementations allow further options to be passed. Currently, the only one implemented is ``relations=True``, which may only be passed if ``other`` is prinicipal. In this case the function will return a pair ``(res, rel)`` where ``res`` is the ideal, and ``rel`` is a list of coefficient vectors, expressing the generators of the ideal, multiplied by the generator of ``other`` in terms of generators of ``self``. >>> S.module_quotient(T, relations=True) (<y>, [[1]]) This means that the quotient ideal is generated by the single element `y`, and that `y (x, x) = 1 (xy, xy)`, `(x, x)` and `(xy, xy)` being the generators of `T` and `S`, respectively. """ if not isinstance(other, SubModule): raise TypeError('%s is not a SubModule' % other) if other.container != self.container: raise ValueError( '%s is contained in a different free module' % other) return self._module_quotient(other, **options) def union(self, other): """ Returns the module generated by the union of ``self`` and ``other``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(1) >>> M = F.submodule([x**2 + x]) # <x(x+1)> >>> N = F.submodule([x**2 - 1]) # <(x-1)(x+1)> >>> M.union(N) == F.submodule([x+1]) True """ if not isinstance(other, SubModule): raise TypeError('%s is not a SubModule' % other) if other.container != self.container: raise ValueError( '%s is contained in a different free module' % other) return self.__class__(self.gens + other.gens, self.container) def is_zero(self): """ Return True if ``self`` is a zero module. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.submodule([x, 1]).is_zero() False >>> F.submodule([0, 0]).is_zero() True """ return all(x == 0 for x in self.gens) def submodule(self, *gens): """ Generate a submodule. >>> from sympy.abc import x >>> from sympy import QQ >>> M = QQ.old_poly_ring(x).free_module(2).submodule([x, 1]) >>> M.submodule([x**2, x]) <[x**2, x]> """ if not self.subset(gens): raise ValueError('%s not a subset of %s' % (gens, self)) return self.__class__(gens, self.container) def is_full_module(self): """ Return True if ``self`` is the entire free module. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.submodule([x, 1]).is_full_module() False >>> F.submodule([1, 1], [1, 2]).is_full_module() True """ return all(self.contains(x) for x in self.container.basis()) def is_submodule(self, other): """ Returns True if ``other`` is a submodule of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> M = F.submodule([2, x]) >>> N = M.submodule([2*x, x**2]) >>> M.is_submodule(M) True >>> M.is_submodule(N) True >>> N.is_submodule(M) False """ if isinstance(other, SubModule): return self.container == other.container and \ all(self.contains(x) for x in other.gens) if isinstance(other, (FreeModule, QuotientModule)): return self.container == other and self.is_full_module() return False def syzygy_module(self, **opts): r""" Compute the syzygy module of the generators of ``self``. Suppose `M` is generated by `f_1, \dots, f_n` over the ring `R`. Consider the homomorphism `\phi: R^n \to M`, given by sending `(r_1, \dots, r_n) \to r_1 f_1 + \dots + r_n f_n`. The syzygy module is defined to be the kernel of `\phi`. The syzygy module is zero iff the generators generate freely a free submodule: >>> from sympy.abc import x, y >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(2).submodule([1, 0], [1, 1]).syzygy_module().is_zero() True A slightly more interesting example: >>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, 2*x], [y, 2*y]) >>> S = QQ.old_poly_ring(x, y).free_module(2).submodule([y, -x]) >>> M.syzygy_module() == S True """ F = self.ring.free_module(len(self.gens)) # NOTE we filter out zero syzygies. This is for convenience of the # _syzygies function and not meant to replace any real "generating set # reduction" algorithm return F.submodule(*[x for x in self._syzygies() if F.convert(x) != 0], **opts) def in_terms_of_generators(self, e): """ Express element ``e`` of ``self`` in terms of the generators. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> M = F.submodule([1, 0], [1, 1]) >>> M.in_terms_of_generators([x, x**2]) [-x**2 + x, x**2] """ try: e = self.convert(e) except CoercionFailed: raise ValueError('%s is not an element of %s' % (e, self)) return self._in_terms_of_generators(e) def reduce_element(self, x): """ Reduce the element ``x`` of our ring modulo the ideal ``self``. Here "reduce" has no specific meaning, it could return a unique normal form, simplify the expression a bit, or just do nothing. """ return x def quotient_module(self, other, **opts): """ Return a quotient module. This is the same as taking a submodule of a quotient of the containing module. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> S1 = F.submodule([x, 1]) >>> S2 = F.submodule([x**2, x]) >>> S1.quotient_module(S2) <[x, 1] + <[x**2, x]>> Or more coincisely, using the overloaded division operator: >>> F.submodule([x, 1]) / [(x**2, x)] <[x, 1] + <[x**2, x]>> """ if not self.is_submodule(other): raise ValueError('%s not a submodule of %s' % (other, self)) return SubQuotientModule(self.gens, self.container.quotient_module(other), **opts) def __add__(self, oth): return self.container.quotient_module(self).convert(oth) __radd__ = __add__ def multiply_ideal(self, I): """ Multiply ``self`` by the ideal ``I``. >>> from sympy.abc import x >>> from sympy import QQ >>> I = QQ.old_poly_ring(x).ideal(x**2) >>> M = QQ.old_poly_ring(x).free_module(2).submodule([1, 1]) >>> I*M <[x**2, x**2]> """ return self.submodule(*[x*g for [x] in I._module.gens for g in self.gens]) def inclusion_hom(self): """ Return a homomorphism representing the inclusion map of ``self``. That is, the natural map from ``self`` to ``self.container``. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(2).submodule([x, x]).inclusion_hom() Matrix([ [1, 0], : <[x, x]> -> QQ[x]**2 [0, 1]]) """ return self.container.identity_hom().restrict_domain(self) def identity_hom(self): """ Return the identity homomorphism on ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> QQ.old_poly_ring(x).free_module(2).submodule([x, x]).identity_hom() Matrix([ [1, 0], : <[x, x]> -> <[x, x]> [0, 1]]) """ return self.container.identity_hom().restrict_domain( self).restrict_codomain(self) class SubQuotientModule(SubModule): """ Submodule of a quotient module. Equivalently, quotient module of a submodule. Do not instantiate this, instead use the submodule or quotient_module constructing methods: >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> S = F.submodule([1, 0], [1, x]) >>> Q = F/[(1, 0)] >>> S/[(1, 0)] == Q.submodule([5, x]) True Attributes: - base - base module we are quotient of - killed_module - submodule used to form the quotient """ def __init__(self, gens, container, **opts): SubModule.__init__(self, gens, container) self.killed_module = self.container.killed_module # XXX it is important for some code below that the generators of base # are in this particular order! self.base = self.container.base.submodule( *[x.data for x in self.gens], **opts).union(self.killed_module) def _contains(self, elem): return self.base.contains(elem.data) def _syzygies(self): # let N = self.killed_module be generated by e_1, ..., e_r # let F = self.base be generated by f_1, ..., f_s and e_1, ..., e_r # Then self = F/N. # Let phi: R**s --> self be the evident surjection. # Similarly psi: R**(s + r) --> F. # We need to find generators for ker(phi). Let chi: R**s --> F be the # evident lift of phi. For X in R**s, phi(X) = 0 iff chi(X) is # contained in N, iff there exists Y in R**r such that # psi(X, Y) = 0. # Hence if alpha: R**(s + r) --> R**s is the projection map, then # ker(phi) = alpha ker(psi). return [X[:len(self.gens)] for X in self.base._syzygies()] def _in_terms_of_generators(self, e): return self.base._in_terms_of_generators(e.data)[:len(self.gens)] def is_full_module(self): """ Return True if ``self`` is the entire free module. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> F.submodule([x, 1]).is_full_module() False >>> F.submodule([1, 1], [1, 2]).is_full_module() True """ return self.base.is_full_module() def quotient_hom(self): """ Return the quotient homomorphism to self. That is, return the natural map from ``self.base`` to ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> M = (QQ.old_poly_ring(x).free_module(2) / [(1, x)]).submodule([1, 0]) >>> M.quotient_hom() Matrix([ [1, 0], : <[1, 0], [1, x]> -> <[1, 0] + <[1, x]>, [1, x] + <[1, x]>> [0, 1]]) """ return self.base.identity_hom().quotient_codomain(self.killed_module) _subs0 = lambda x: x[0] _subs1 = lambda x: x[1:] class ModuleOrder(ProductOrder): """A product monomial order with a zeroth term as module index.""" def __init__(self, o1, o2, TOP): if TOP: ProductOrder.__init__(self, (o2, _subs1), (o1, _subs0)) else: ProductOrder.__init__(self, (o1, _subs0), (o2, _subs1)) class SubModulePolyRing(SubModule): """ Submodule of a free module over a generalized polynomial ring. Do not instantiate this, use the constructor method of FreeModule instead: >>> from sympy.abc import x, y >>> from sympy import QQ >>> F = QQ.old_poly_ring(x, y).free_module(2) >>> F.submodule([x, y], [1, 0]) <[x, y], [1, 0]> Attributes: - order - monomial order used """ #self._gb - cached groebner basis #self._gbe - cached groebner basis relations def __init__(self, gens, container, order="lex", TOP=True): SubModule.__init__(self, gens, container) if not isinstance(container, FreeModulePolyRing): raise NotImplementedError('This implementation is for submodules of ' + 'FreeModulePolyRing, got %s' % container) self.order = ModuleOrder(monomial_key(order), self.ring.order, TOP) self._gb = None self._gbe = None def __eq__(self, other): if isinstance(other, SubModulePolyRing) and self.order != other.order: return False return SubModule.__eq__(self, other) def _groebner(self, extended=False): """Returns a standard basis in sdm form.""" from sympy.polys.distributedmodules import sdm_groebner, sdm_nf_mora if self._gbe is None and extended: gb, gbe = sdm_groebner( [self.ring._vector_to_sdm(x, self.order) for x in self.gens], sdm_nf_mora, self.order, self.ring.dom, extended=True) self._gb, self._gbe = tuple(gb), tuple(gbe) if self._gb is None: self._gb = tuple(sdm_groebner( [self.ring._vector_to_sdm(x, self.order) for x in self.gens], sdm_nf_mora, self.order, self.ring.dom)) if extended: return self._gb, self._gbe else: return self._gb def _groebner_vec(self, extended=False): """Returns a standard basis in element form.""" if not extended: return [self.convert(self.ring._sdm_to_vector(x, self.rank)) for x in self._groebner()] gb, gbe = self._groebner(extended=True) return ([self.convert(self.ring._sdm_to_vector(x, self.rank)) for x in gb], [self.ring._sdm_to_vector(x, len(self.gens)) for x in gbe]) def _contains(self, x): from sympy.polys.distributedmodules import sdm_zero, sdm_nf_mora return sdm_nf_mora(self.ring._vector_to_sdm(x, self.order), self._groebner(), self.order, self.ring.dom) == \ sdm_zero() def _syzygies(self): """Compute syzygies. See [SCA, algorithm 2.5.4].""" # NOTE if self.gens is a standard basis, this can be done more # efficiently using Schreyer's theorem from sympy.matrices import eye # First bullet point k = len(self.gens) r = self.rank im = eye(k) Rkr = self.ring.free_module(r + k) newgens = [] for j, f in enumerate(self.gens): m = [0]*(r + k) for i, v in enumerate(f): m[i] = f[i] for i in range(k): m[r + i] = im[j, i] newgens.append(Rkr.convert(m)) # Note: we need *descending* order on module index, and TOP=False to # get an eliminetaion order F = Rkr.submodule(*newgens, order='ilex', TOP=False) # Second bullet point: standard basis of F G = F._groebner_vec() # Third bullet point: G0 = G intersect the new k components G0 = [x[r:] for x in G if all(y == self.ring.convert(0) for y in x[:r])] # Fourth and fifth bullet points: we are done return G0 def _in_terms_of_generators(self, e): """Expression in terms of generators. See [SCA, 2.8.1].""" # NOTE: if gens is a standard basis, this can be done more efficiently M = self.ring.free_module(self.rank).submodule(*((e,) + self.gens)) S = M.syzygy_module( order="ilex", TOP=False) # We want decreasing order! G = S._groebner_vec() # This list cannot not be empty since e is an element e = [x for x in G if self.ring.is_unit(x[0])][0] return [-x/e[0] for x in e[1:]] def reduce_element(self, x, NF=None): """ Reduce the element ``x`` of our container modulo ``self``. This applies the normal form ``NF`` to ``x``. If ``NF`` is passed as none, the default Mora normal form is used (which is not unique!). """ from sympy.polys.distributedmodules import sdm_nf_mora if NF is None: NF = sdm_nf_mora return self.container.convert(self.ring._sdm_to_vector(NF( self.ring._vector_to_sdm(x, self.order), self._groebner(), self.order, self.ring.dom), self.rank)) def _intersect(self, other, relations=False): # See: [SCA, section 2.8.2] fi = self.gens hi = other.gens r = self.rank ci = [[0]*(2*r) for _ in range(r)] for k in range(r): ci[k][k] = 1 ci[k][r + k] = 1 di = [list(f) + [0]*r for f in fi] ei = [[0]*r + list(h) for h in hi] syz = self.ring.free_module(2*r).submodule(*(ci + di + ei))._syzygies() nonzero = [x for x in syz if any(y != self.ring.zero for y in x[:r])] res = self.container.submodule(*([-y for y in x[:r]] for x in nonzero)) reln1 = [x[r:r + len(fi)] for x in nonzero] reln2 = [x[r + len(fi):] for x in nonzero] if relations: return res, reln1, reln2 return res def _module_quotient(self, other, relations=False): # See: [SCA, section 2.8.4] if relations and len(other.gens) != 1: raise NotImplementedError if len(other.gens) == 0: return self.ring.ideal(1) elif len(other.gens) == 1: # We do some trickery. Let f be the (vector!) generating ``other`` # and f1, .., fn be the (vectors) generating self. # Consider the submodule of R^{r+1} generated by (f, 1) and # {(fi, 0) | i}. Then the intersection with the last module # component yields the quotient. g1 = list(other.gens[0]) + [1] gi = [list(x) + [0] for x in self.gens] # NOTE: We *need* to use an elimination order M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi), order='ilex', TOP=False) if not relations: return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if all(y == self.ring.zero for y in x[:-1])]) else: G, R = M._groebner_vec(extended=True) indices = [i for i, x in enumerate(G) if all(y == self.ring.zero for y in x[:-1])] return (self.ring.ideal(*[G[i][-1] for i in indices]), [[-x for x in R[i][1:]] for i in indices]) # For more generators, we use I : <h1, .., hn> = intersection of # {I : <hi> | i} # TODO this can be done more efficiently return reduce(lambda x, y: x.intersect(y), (self._module_quotient(self.container.submodule(x)) for x in other.gens)) class SubModuleQuotientRing(SubModule): """ Class for submodules of free modules over quotient rings. Do not instantiate this. Instead use the submodule methods. >>> from sympy.abc import x, y >>> from sympy import QQ >>> M = (QQ.old_poly_ring(x, y)/[x**2 - y**2]).free_module(2).submodule([x, x + y]) >>> M <[x + <x**2 - y**2>, x + y + <x**2 - y**2>]> >>> M.contains([y**2, x**2 + x*y]) True >>> M.contains([x, y]) False Attributes: - quot - the subquotient of `R^n/IR^n` generated by lifts of our generators """ def __init__(self, gens, container): SubModule.__init__(self, gens, container) self.quot = self.container.quot.submodule( *[self.container.lift(x) for x in self.gens]) def _contains(self, elem): return self.quot._contains(self.container.lift(elem)) def _syzygies(self): return [tuple(self.ring.convert(y, self.quot.ring) for y in x) for x in self.quot._syzygies()] def _in_terms_of_generators(self, elem): return [self.ring.convert(x, self.quot.ring) for x in self.quot._in_terms_of_generators(self.container.lift(elem))] ########################################################################## ## Quotient Modules ###################################################### ########################################################################## class QuotientModuleElement(ModuleElement): """Element of a quotient module.""" def eq(self, d1, d2): """Equality comparison.""" return self.module.killed_module.contains(d1 - d2) def __repr__(self): from sympy import sstr return repr(self.data) + " + " + repr(self.module.killed_module) class QuotientModule(Module): """ Class for quotient modules. Do not instantiate this directly. For subquotients, see the SubQuotientModule class. Attributes: - base - the base module we are a quotient of - killed_module - the submodule used to form the quotient - rank of the base """ dtype = QuotientModuleElement def __init__(self, ring, base, submodule): Module.__init__(self, ring) if not base.is_submodule(submodule): raise ValueError('%s is not a submodule of %s' % (submodule, base)) self.base = base self.killed_module = submodule self.rank = base.rank def __repr__(self): return repr(self.base) + "/" + repr(self.killed_module) def is_zero(self): """ Return True if ``self`` is a zero module. This happens if and only if the base module is the same as the submodule being killed. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) >>> (F/[(1, 0)]).is_zero() False >>> (F/[(1, 0), (0, 1)]).is_zero() True """ return self.base == self.killed_module def is_submodule(self, other): """ Return True if ``other`` is a submodule of ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> Q = QQ.old_poly_ring(x).free_module(2) / [(x, x)] >>> S = Q.submodule([1, 0]) >>> Q.is_submodule(S) True >>> S.is_submodule(Q) False """ if isinstance(other, QuotientModule): return self.killed_module == other.killed_module and \ self.base.is_submodule(other.base) if isinstance(other, SubQuotientModule): return other.container == self return False def submodule(self, *gens, **opts): """ Generate a submodule. This is the same as taking a quotient of a submodule of the base module. >>> from sympy.abc import x >>> from sympy import QQ >>> Q = QQ.old_poly_ring(x).free_module(2) / [(x, x)] >>> Q.submodule([x, 0]) <[x, 0] + <[x, x]>> """ return SubQuotientModule(gens, self, **opts) def convert(self, elem, M=None): """ Convert ``elem`` into the internal representation. This method is called implicitly whenever computations involve elements not in the internal representation. >>> from sympy.abc import x >>> from sympy import QQ >>> F = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)] >>> F.convert([1, 0]) [1, 0] + <[1, 2], [1, x]> """ if isinstance(elem, QuotientModuleElement): if elem.module is self: return elem if self.killed_module.is_submodule(elem.module.killed_module): return QuotientModuleElement(self, self.base.convert(elem.data)) raise CoercionFailed return QuotientModuleElement(self, self.base.convert(elem)) def identity_hom(self): """ Return the identity homomorphism on ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> M = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)] >>> M.identity_hom() Matrix([ [1, 0], : QQ[x]**2/<[1, 2], [1, x]> -> QQ[x]**2/<[1, 2], [1, x]> [0, 1]]) """ return self.base.identity_hom().quotient_codomain( self.killed_module).quotient_domain(self.killed_module) def quotient_hom(self): """ Return the quotient homomorphism to ``self``. That is, return a homomorphism representing the natural map from ``self.base`` to ``self``. >>> from sympy.abc import x >>> from sympy import QQ >>> M = QQ.old_poly_ring(x).free_module(2) / [(1, 2), (1, x)] >>> M.quotient_hom() Matrix([ [1, 0], : QQ[x]**2 -> QQ[x]**2/<[1, 2], [1, x]> [0, 1]]) """ return self.base.identity_hom().quotient_codomain( self.killed_module)
mit
jakirkham/splauncher
splauncher/core.py
3
1955
from __future__ import print_function __author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>" __date__ = "$May 18, 2015 16:52:18 EDT$" import datetime import os import logging drmaa_logger = logging.getLogger(__name__) try: import drmaa except ImportError: # python-drmaa is not installed. drmaa_logger.error( "Was not able to import drmaa. " + "If this is meant to be run using the OpenGrid submission " + "system, then drmaa needs to be installed via pip or " + "easy_install." ) raise except RuntimeError: # The drmaa library was not specified, but python-drmaa is installed. drmaa_logger.error( "Was able to import drmaa. " + "However, the drmaa library could not be found. Please " + "either specify the location of libdrmaa.so using the " + "DRMAA_LIBRARY_PATH environment variable or disable/remove " + "use_drmaa from the config file." ) raise def main(*argv): hostname = os.uname()[1] job_time = datetime.datetime.utcnow() job_time_str = job_time.isoformat().replace(":", ".") job_name = "splaunch_" + argv[1].replace("/", "-") + "_" + job_time_str s = drmaa.Session() s.initialize() session_name = s.contact job_template = s.createJobTemplate() job_template.jobName = job_name job_template.remoteCommand = argv[1] job_template.args = argv[2:] job_template.jobEnvironment = os.environ job_template.inputPath = "localhost:" + os.devnull job_template.outputPath = hostname + ":" + job_name + ".out" job_template.errorPath = hostname + ":" + job_name + ".err" job_template.workingDirectory = os.getcwd() process_id = s.runJob(job_template) s.deleteJobTemplate(job_template) s.exit() print( "From context \"%s\" launched job \"%s\" with process ID \"%s\"." % ( session_name, job_name, process_id ) ) return(0)
bsd-3-clause
AnishShah/tensorflow
tensorflow/python/debug/examples/debug_mnist.py
34
6803
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Demo of the tfdbg curses CLI: Locating the source of bad numerical values. The neural network in this demo is larged based on the tutorial at: tensorflow/examples/tutorials/mnist/mnist_with_summaries.py But modifications are made so that problematic numerical values (infs and nans) appear in nodes of the graph during training. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data from tensorflow.python import debug as tf_debug IMAGE_SIZE = 28 HIDDEN_SIZE = 500 NUM_LABELS = 10 RAND_SEED = 42 def main(_): # Import data mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True, fake_data=FLAGS.fake_data) def feed_dict(train): if train or FLAGS.fake_data: xs, ys = mnist.train.next_batch(FLAGS.train_batch_size, fake_data=FLAGS.fake_data) else: xs, ys = mnist.test.images, mnist.test.labels return {x: xs, y_: ys} sess = tf.InteractiveSession() # Create the MNIST neural network graph. # Input placeholders. with tf.name_scope("input"): x = tf.placeholder( tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE], name="x-input") y_ = tf.placeholder(tf.float32, [None, NUM_LABELS], name="y-input") def weight_variable(shape): """Create a weight variable with appropriate initialization.""" initial = tf.truncated_normal(shape, stddev=0.1, seed=RAND_SEED) return tf.Variable(initial) def bias_variable(shape): """Create a bias variable with appropriate initialization.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): """Reusable code for making a simple neural net layer.""" # Adding a name scope ensures logical grouping of the layers in the graph. with tf.name_scope(layer_name): # This Variable will hold the state of the weights for the layer with tf.name_scope("weights"): weights = weight_variable([input_dim, output_dim]) with tf.name_scope("biases"): biases = bias_variable([output_dim]) with tf.name_scope("Wx_plus_b"): preactivate = tf.matmul(input_tensor, weights) + biases activations = act(preactivate) return activations hidden = nn_layer(x, IMAGE_SIZE**2, HIDDEN_SIZE, "hidden") logits = nn_layer(hidden, HIDDEN_SIZE, NUM_LABELS, "output", tf.identity) y = tf.nn.softmax(logits) with tf.name_scope("cross_entropy"): # The following line is the culprit of the bad numerical values that appear # during training of this graph. Log of zero gives inf, which is first seen # in the intermediate tensor "cross_entropy/Log:0" during the 4th run() # call. A multiplication of the inf values with zeros leads to nans, # which is first in "cross_entropy/mul:0". # # You can use the built-in, numerically-stable implementation to fix this # issue: # diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits) diff = -(y_ * tf.log(y)) with tf.name_scope("total"): cross_entropy = tf.reduce_mean(diff) with tf.name_scope("train"): train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( cross_entropy) with tf.name_scope("accuracy"): with tf.name_scope("correct_prediction"): correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) with tf.name_scope("accuracy"): accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.global_variables_initializer()) if FLAGS.debug and FLAGS.tensorboard_debug_address: raise ValueError( "The --debug and --tensorboard_debug_address flags are mutually " "exclusive.") if FLAGS.debug: sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type) elif FLAGS.tensorboard_debug_address: sess = tf_debug.TensorBoardDebugWrapperSession( sess, FLAGS.tensorboard_debug_address) # Add this point, sess is a debug wrapper around the actual Session if # FLAGS.debug is true. In that case, calling run() will launch the CLI. for i in range(FLAGS.max_steps): acc = sess.run(accuracy, feed_dict=feed_dict(False)) print("Accuracy at step %d: %s" % (i, acc)) sess.run(train_step, feed_dict=feed_dict(True)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--max_steps", type=int, default=10, help="Number of steps to run trainer.") parser.add_argument( "--train_batch_size", type=int, default=100, help="Batch size used during training.") parser.add_argument( "--learning_rate", type=float, default=0.025, help="Initial learning rate.") parser.add_argument( "--data_dir", type=str, default="/tmp/mnist_data", help="Directory for storing data") parser.add_argument( "--ui_type", type=str, default="curses", help="Command-line user interface type (curses | readline)") parser.add_argument( "--fake_data", type="bool", nargs="?", const=True, default=False, help="Use fake MNIST data for unit testing") parser.add_argument( "--debug", type="bool", nargs="?", const=True, default=False, help="Use debugger to track down bad values during training. " "Mutually exclusive with the --tensorboard_debug_address flag.") parser.add_argument( "--tensorboard_debug_address", type=str, default=None, help="Connect to the TensorBoard Debugger Plugin backend specified by " "the gRPC address (e.g., localhost:1234). Mutually exclusive with the " "--debug flag.") FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
SimonWang2014/DockerConsoleApp
libs/stormed-amqp/stormed/message.py
4
3116
from stormed.util import WithFields from stormed.method import basic class Message(WithFields): """An AMQP Message The body parameter represents the message content. If the parameter is a unicode object, it is encoded to UTF8. The optional properties are those defined in the AMQP standard (see stormed.method.codegen.basic.properties) When the message is received from the server the rx_data attribute contains the AMQP method instance (e.g. basic.GetOk, basic.Deliver). This instance carries the server metadata (e.g. the redelivered bit). A message received from the server can be acknowledged o rejected with the Message.ack() and Message.reject() methods if required. """ _fields = basic.properties def __init__(self, body, **properties): self.body = body if isinstance(body, unicode): encoding = properties.setdefault('content_encoding', 'utf8') self.body = body.encode(encoding) else: properties.setdefault('content_type', 'application/octet-stream') self.rx_data = None self.rx_channel = None super(Message, self).__init__(**properties) def ack(self, multiple=False): """acknowledge the message""" if self.rx_channel is None: raise ValueError('cannot ack an unreceived message') method = basic.Ack(delivery_tag=self.rx_data.delivery_tag, multiple=multiple) self.rx_channel.send_method(method) def nack(self, multiple=False, requeue=True): """reject the message""" if self.rx_channel is None: raise ValueError('cannot nack an unreceived message') method = basic.Nack(delivery_tag=self.rx_data.delivery_tag, multiple=multiple, requeue=requeue) self.rx_channel.send_method(method) def reject(self, requeue=True): """reject the message""" if self.rx_channel is None: raise ValueError('cannot reject an unreceived message') method = basic.Reject(delivery_tag=self.rx_data.delivery_tag, requeue=requeue) self.rx_channel.send_method(method) class ContentHeader(object): def __init__(self, size, properties): self.size = size self.properties = properties class MessageBuilder(object): def __init__(self, content_method): self.content_method = content_method self.content_header = None self.chunks = [] self.received_size = 0 def add_content_header(self, content_header): self.content_header = content_header def add_content_body(self, content_body): self.chunks.append(content_body) self.received_size += len(content_body) @property def msg_complete(self): return self.content_header.size == self.received_size def get_msg(self): assert self.msg_complete body = ''.join(self.chunks) msg = Message(body, **self.content_header.properties) msg.rx_data = self.content_method return msg
apache-2.0
rysson/filmkodi
plugin.video.mrknow/mylib/pydevd_attach_to_process/winappdbg/win32/defines.py
102
22799
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2014, Mario Vilas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice,this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Common definitions. """ # TODO # + add TCHAR and related types? __revision__ = "$Id$" import ctypes import functools from winappdbg import compat #------------------------------------------------------------------------------ # Some stuff from ctypes we'll be using very frequently. addressof = ctypes.addressof sizeof = ctypes.sizeof SIZEOF = ctypes.sizeof POINTER = ctypes.POINTER Structure = ctypes.Structure Union = ctypes.Union WINFUNCTYPE = ctypes.WINFUNCTYPE windll = ctypes.windll # The IronPython implementation of byref() was giving me problems, # so I'm replacing it with the slower pointer() function. try: ctypes.c_void_p(ctypes.byref(ctypes.c_char())) # this fails in IronPython byref = ctypes.byref except TypeError: byref = ctypes.pointer # XXX DEBUG # The following code can be enabled to make the Win32 API wrappers log to # standard output the dll and function names, the parameter values and the # return value for each call. ##WIN32_VERBOSE_MODE = True WIN32_VERBOSE_MODE = False if WIN32_VERBOSE_MODE: class WinDllHook(object): def __getattr__(self, name): if name.startswith('_'): return object.__getattr__(self, name) return WinFuncHook(name) class WinFuncHook(object): def __init__(self, name): self.__name = name def __getattr__(self, name): if name.startswith('_'): return object.__getattr__(self, name) return WinCallHook(self.__name, name) class WinCallHook(object): def __init__(self, dllname, funcname): self.__dllname = dllname self.__funcname = funcname self.__func = getattr(getattr(ctypes.windll, dllname), funcname) def __copy_attribute(self, attribute): try: value = getattr(self, attribute) setattr(self.__func, attribute, value) except AttributeError: try: delattr(self.__func, attribute) except AttributeError: pass def __call__(self, *argv): self.__copy_attribute('argtypes') self.__copy_attribute('restype') self.__copy_attribute('errcheck') print("-"*10) print("%s ! %s %r" % (self.__dllname, self.__funcname, argv)) retval = self.__func(*argv) print("== %r" % (retval,)) return retval windll = WinDllHook() #============================================================================== # This is used later on to calculate the list of exported symbols. _all = None _all = set(vars().keys()) #============================================================================== def RaiseIfZero(result, func = None, arguments = ()): """ Error checking for most Win32 API calls. The function is assumed to return an integer, which is C{0} on error. In that case the C{WindowsError} exception is raised. """ if not result: raise ctypes.WinError() return result def RaiseIfNotZero(result, func = None, arguments = ()): """ Error checking for some odd Win32 API calls. The function is assumed to return an integer, which is zero on success. If the return value is nonzero the C{WindowsError} exception is raised. This is mostly useful for free() like functions, where the return value is the pointer to the memory block on failure or a C{NULL} pointer on success. """ if result: raise ctypes.WinError() return result def RaiseIfNotErrorSuccess(result, func = None, arguments = ()): """ Error checking for Win32 Registry API calls. The function is assumed to return a Win32 error code. If the code is not C{ERROR_SUCCESS} then a C{WindowsError} exception is raised. """ if result != ERROR_SUCCESS: raise ctypes.WinError(result) return result class GuessStringType(object): """ Decorator that guesses the correct version (A or W) to call based on the types of the strings passed as parameters. Calls the B{ANSI} version if the only string types are ANSI. Calls the B{Unicode} version if Unicode or mixed string types are passed. The default if no string arguments are passed depends on the value of the L{t_default} class variable. @type fn_ansi: function @ivar fn_ansi: ANSI version of the API function to call. @type fn_unicode: function @ivar fn_unicode: Unicode (wide) version of the API function to call. @type t_default: type @cvar t_default: Default string type to use. Possible values are: - type('') for ANSI - type(u'') for Unicode """ # ANSI and Unicode types t_ansi = type('') t_unicode = type(u'') # Default is ANSI for Python 2.x t_default = t_ansi def __init__(self, fn_ansi, fn_unicode): """ @type fn_ansi: function @param fn_ansi: ANSI version of the API function to call. @type fn_unicode: function @param fn_unicode: Unicode (wide) version of the API function to call. """ self.fn_ansi = fn_ansi self.fn_unicode = fn_unicode # Copy the wrapped function attributes. try: self.__name__ = self.fn_ansi.__name__[:-1] # remove the A or W except AttributeError: pass try: self.__module__ = self.fn_ansi.__module__ except AttributeError: pass try: self.__doc__ = self.fn_ansi.__doc__ except AttributeError: pass def __call__(self, *argv, **argd): # Shortcut to self.t_ansi t_ansi = self.t_ansi # Get the types of all arguments for the function v_types = [ type(item) for item in argv ] v_types.extend( [ type(value) for (key, value) in compat.iteritems(argd) ] ) # Get the appropriate function for the default type if self.t_default == t_ansi: fn = self.fn_ansi else: fn = self.fn_unicode # If at least one argument is a Unicode string... if self.t_unicode in v_types: # If al least one argument is an ANSI string, # convert all ANSI strings to Unicode if t_ansi in v_types: argv = list(argv) for index in compat.xrange(len(argv)): if v_types[index] == t_ansi: argv[index] = compat.unicode(argv[index]) for (key, value) in argd.items(): if type(value) == t_ansi: argd[key] = compat.unicode(value) # Use the W version fn = self.fn_unicode # If at least one argument is an ANSI string, # but there are no Unicode strings... elif t_ansi in v_types: # Use the A version fn = self.fn_ansi # Call the function and return the result return fn(*argv, **argd) class DefaultStringType(object): """ Decorator that uses the default version (A or W) to call based on the configuration of the L{GuessStringType} decorator. @see: L{GuessStringType.t_default} @type fn_ansi: function @ivar fn_ansi: ANSI version of the API function to call. @type fn_unicode: function @ivar fn_unicode: Unicode (wide) version of the API function to call. """ def __init__(self, fn_ansi, fn_unicode): """ @type fn_ansi: function @param fn_ansi: ANSI version of the API function to call. @type fn_unicode: function @param fn_unicode: Unicode (wide) version of the API function to call. """ self.fn_ansi = fn_ansi self.fn_unicode = fn_unicode # Copy the wrapped function attributes. try: self.__name__ = self.fn_ansi.__name__[:-1] # remove the A or W except AttributeError: pass try: self.__module__ = self.fn_ansi.__module__ except AttributeError: pass try: self.__doc__ = self.fn_ansi.__doc__ except AttributeError: pass def __call__(self, *argv, **argd): # Get the appropriate function based on the default. if GuessStringType.t_default == GuessStringType.t_ansi: fn = self.fn_ansi else: fn = self.fn_unicode # Call the function and return the result return fn(*argv, **argd) def MakeANSIVersion(fn): """ Decorator that generates an ANSI version of a Unicode (wide) only API call. @type fn: callable @param fn: Unicode (wide) version of the API function to call. """ @functools.wraps(fn) def wrapper(*argv, **argd): t_ansi = GuessStringType.t_ansi t_unicode = GuessStringType.t_unicode v_types = [ type(item) for item in argv ] v_types.extend( [ type(value) for (key, value) in compat.iteritems(argd) ] ) if t_ansi in v_types: argv = list(argv) for index in compat.xrange(len(argv)): if v_types[index] == t_ansi: argv[index] = t_unicode(argv[index]) for key, value in argd.items(): if type(value) == t_ansi: argd[key] = t_unicode(value) return fn(*argv, **argd) return wrapper def MakeWideVersion(fn): """ Decorator that generates a Unicode (wide) version of an ANSI only API call. @type fn: callable @param fn: ANSI version of the API function to call. """ @functools.wraps(fn) def wrapper(*argv, **argd): t_ansi = GuessStringType.t_ansi t_unicode = GuessStringType.t_unicode v_types = [ type(item) for item in argv ] v_types.extend( [ type(value) for (key, value) in compat.iteritems(argd) ] ) if t_unicode in v_types: argv = list(argv) for index in compat.xrange(len(argv)): if v_types[index] == t_unicode: argv[index] = t_ansi(argv[index]) for key, value in argd.items(): if type(value) == t_unicode: argd[key] = t_ansi(value) return fn(*argv, **argd) return wrapper #--- Types -------------------------------------------------------------------- # http://msdn.microsoft.com/en-us/library/aa383751(v=vs.85).aspx # Map of basic C types to Win32 types LPVOID = ctypes.c_void_p CHAR = ctypes.c_char WCHAR = ctypes.c_wchar BYTE = ctypes.c_ubyte SBYTE = ctypes.c_byte WORD = ctypes.c_uint16 SWORD = ctypes.c_int16 DWORD = ctypes.c_uint32 SDWORD = ctypes.c_int32 QWORD = ctypes.c_uint64 SQWORD = ctypes.c_int64 SHORT = ctypes.c_short USHORT = ctypes.c_ushort INT = ctypes.c_int UINT = ctypes.c_uint LONG = ctypes.c_long ULONG = ctypes.c_ulong LONGLONG = ctypes.c_int64 # c_longlong ULONGLONG = ctypes.c_uint64 # c_ulonglong LPSTR = ctypes.c_char_p LPWSTR = ctypes.c_wchar_p INT8 = ctypes.c_int8 INT16 = ctypes.c_int16 INT32 = ctypes.c_int32 INT64 = ctypes.c_int64 UINT8 = ctypes.c_uint8 UINT16 = ctypes.c_uint16 UINT32 = ctypes.c_uint32 UINT64 = ctypes.c_uint64 LONG32 = ctypes.c_int32 LONG64 = ctypes.c_int64 ULONG32 = ctypes.c_uint32 ULONG64 = ctypes.c_uint64 DWORD32 = ctypes.c_uint32 DWORD64 = ctypes.c_uint64 BOOL = ctypes.c_int FLOAT = ctypes.c_float # Map size_t to SIZE_T try: SIZE_T = ctypes.c_size_t SSIZE_T = ctypes.c_ssize_t except AttributeError: # Size of a pointer SIZE_T = {1:BYTE, 2:WORD, 4:DWORD, 8:QWORD}[sizeof(LPVOID)] SSIZE_T = {1:SBYTE, 2:SWORD, 4:SDWORD, 8:SQWORD}[sizeof(LPVOID)] PSIZE_T = POINTER(SIZE_T) # Not really pointers but pointer-sized integers DWORD_PTR = SIZE_T ULONG_PTR = SIZE_T LONG_PTR = SIZE_T # Other Win32 types, more may be added as needed PVOID = LPVOID PPVOID = POINTER(PVOID) PSTR = LPSTR PWSTR = LPWSTR PCHAR = LPSTR PWCHAR = LPWSTR LPBYTE = POINTER(BYTE) LPSBYTE = POINTER(SBYTE) LPWORD = POINTER(WORD) LPSWORD = POINTER(SWORD) LPDWORD = POINTER(DWORD) LPSDWORD = POINTER(SDWORD) LPULONG = POINTER(ULONG) LPLONG = POINTER(LONG) PDWORD = LPDWORD PDWORD_PTR = POINTER(DWORD_PTR) PULONG = LPULONG PLONG = LPLONG CCHAR = CHAR BOOLEAN = BYTE PBOOL = POINTER(BOOL) LPBOOL = PBOOL TCHAR = CHAR # XXX ANSI by default? UCHAR = BYTE DWORDLONG = ULONGLONG LPDWORD32 = POINTER(DWORD32) LPULONG32 = POINTER(ULONG32) LPDWORD64 = POINTER(DWORD64) LPULONG64 = POINTER(ULONG64) PDWORD32 = LPDWORD32 PULONG32 = LPULONG32 PDWORD64 = LPDWORD64 PULONG64 = LPULONG64 ATOM = WORD HANDLE = LPVOID PHANDLE = POINTER(HANDLE) LPHANDLE = PHANDLE HMODULE = HANDLE HINSTANCE = HANDLE HTASK = HANDLE HKEY = HANDLE PHKEY = POINTER(HKEY) HDESK = HANDLE HRSRC = HANDLE HSTR = HANDLE HWINSTA = HANDLE HKL = HANDLE HDWP = HANDLE HFILE = HANDLE HRESULT = LONG HGLOBAL = HANDLE HLOCAL = HANDLE HGDIOBJ = HANDLE HDC = HGDIOBJ HRGN = HGDIOBJ HBITMAP = HGDIOBJ HPALETTE = HGDIOBJ HPEN = HGDIOBJ HBRUSH = HGDIOBJ HMF = HGDIOBJ HEMF = HGDIOBJ HENHMETAFILE = HGDIOBJ HMETAFILE = HGDIOBJ HMETAFILEPICT = HGDIOBJ HWND = HANDLE NTSTATUS = LONG PNTSTATUS = POINTER(NTSTATUS) KAFFINITY = ULONG_PTR RVA = DWORD RVA64 = QWORD WPARAM = DWORD LPARAM = LPVOID LRESULT = LPVOID ACCESS_MASK = DWORD REGSAM = ACCESS_MASK PACCESS_MASK = POINTER(ACCESS_MASK) PREGSAM = POINTER(REGSAM) # Since the SID is an opaque structure, let's treat its pointers as void* PSID = PVOID # typedef union _LARGE_INTEGER { # struct { # DWORD LowPart; # LONG HighPart; # } ; # struct { # DWORD LowPart; # LONG HighPart; # } u; # LONGLONG QuadPart; # } LARGE_INTEGER, # *PLARGE_INTEGER; # XXX TODO # typedef struct _FLOAT128 { # __int64 LowPart; # __int64 HighPart; # } FLOAT128; class FLOAT128 (Structure): _fields_ = [ ("LowPart", QWORD), ("HighPart", QWORD), ] PFLOAT128 = POINTER(FLOAT128) # typedef struct DECLSPEC_ALIGN(16) _M128A { # ULONGLONG Low; # LONGLONG High; # } M128A, *PM128A; class M128A(Structure): _fields_ = [ ("Low", ULONGLONG), ("High", LONGLONG), ] PM128A = POINTER(M128A) #--- Constants ---------------------------------------------------------------- NULL = None INFINITE = -1 TRUE = 1 FALSE = 0 # http://blogs.msdn.com/oldnewthing/archive/2004/08/26/220873.aspx ANYSIZE_ARRAY = 1 # Invalid handle value is -1 casted to void pointer. try: INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value #-1 #0xFFFFFFFF except TypeError: if sizeof(ctypes.c_void_p) == 4: INVALID_HANDLE_VALUE = 0xFFFFFFFF elif sizeof(ctypes.c_void_p) == 8: INVALID_HANDLE_VALUE = 0xFFFFFFFFFFFFFFFF else: raise MAX_MODULE_NAME32 = 255 MAX_PATH = 260 # Error codes # TODO maybe add more error codes? # if they're too many they could be pickled instead, # or at the very least put in a new file ERROR_SUCCESS = 0 ERROR_INVALID_FUNCTION = 1 ERROR_FILE_NOT_FOUND = 2 ERROR_PATH_NOT_FOUND = 3 ERROR_ACCESS_DENIED = 5 ERROR_INVALID_HANDLE = 6 ERROR_NOT_ENOUGH_MEMORY = 8 ERROR_INVALID_DRIVE = 15 ERROR_NO_MORE_FILES = 18 ERROR_BAD_LENGTH = 24 ERROR_HANDLE_EOF = 38 ERROR_HANDLE_DISK_FULL = 39 ERROR_NOT_SUPPORTED = 50 ERROR_FILE_EXISTS = 80 ERROR_INVALID_PARAMETER = 87 ERROR_BUFFER_OVERFLOW = 111 ERROR_DISK_FULL = 112 ERROR_CALL_NOT_IMPLEMENTED = 120 ERROR_SEM_TIMEOUT = 121 ERROR_INSUFFICIENT_BUFFER = 122 ERROR_INVALID_NAME = 123 ERROR_MOD_NOT_FOUND = 126 ERROR_PROC_NOT_FOUND = 127 ERROR_DIR_NOT_EMPTY = 145 ERROR_BAD_THREADID_ADDR = 159 ERROR_BAD_ARGUMENTS = 160 ERROR_BAD_PATHNAME = 161 ERROR_ALREADY_EXISTS = 183 ERROR_INVALID_FLAG_NUMBER = 186 ERROR_ENVVAR_NOT_FOUND = 203 ERROR_FILENAME_EXCED_RANGE = 206 ERROR_MORE_DATA = 234 WAIT_TIMEOUT = 258 ERROR_NO_MORE_ITEMS = 259 ERROR_PARTIAL_COPY = 299 ERROR_INVALID_ADDRESS = 487 ERROR_THREAD_NOT_IN_PROCESS = 566 ERROR_CONTROL_C_EXIT = 572 ERROR_UNHANDLED_EXCEPTION = 574 ERROR_ASSERTION_FAILURE = 668 ERROR_WOW_ASSERTION = 670 ERROR_DBG_EXCEPTION_NOT_HANDLED = 688 ERROR_DBG_REPLY_LATER = 689 ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE = 690 ERROR_DBG_TERMINATE_THREAD = 691 ERROR_DBG_TERMINATE_PROCESS = 692 ERROR_DBG_CONTROL_C = 693 ERROR_DBG_PRINTEXCEPTION_C = 694 ERROR_DBG_RIPEXCEPTION = 695 ERROR_DBG_CONTROL_BREAK = 696 ERROR_DBG_COMMAND_EXCEPTION = 697 ERROR_DBG_EXCEPTION_HANDLED = 766 ERROR_DBG_CONTINUE = 767 ERROR_ELEVATION_REQUIRED = 740 ERROR_NOACCESS = 998 ERROR_CIRCULAR_DEPENDENCY = 1059 ERROR_SERVICE_DOES_NOT_EXIST = 1060 ERROR_SERVICE_CANNOT_ACCEPT_CTRL = 1061 ERROR_SERVICE_NOT_ACTIVE = 1062 ERROR_FAILED_SERVICE_CONTROLLER_CONNECT = 1063 ERROR_EXCEPTION_IN_SERVICE = 1064 ERROR_DATABASE_DOES_NOT_EXIST = 1065 ERROR_SERVICE_SPECIFIC_ERROR = 1066 ERROR_PROCESS_ABORTED = 1067 ERROR_SERVICE_DEPENDENCY_FAIL = 1068 ERROR_SERVICE_LOGON_FAILED = 1069 ERROR_SERVICE_START_HANG = 1070 ERROR_INVALID_SERVICE_LOCK = 1071 ERROR_SERVICE_MARKED_FOR_DELETE = 1072 ERROR_SERVICE_EXISTS = 1073 ERROR_ALREADY_RUNNING_LKG = 1074 ERROR_SERVICE_DEPENDENCY_DELETED = 1075 ERROR_BOOT_ALREADY_ACCEPTED = 1076 ERROR_SERVICE_NEVER_STARTED = 1077 ERROR_DUPLICATE_SERVICE_NAME = 1078 ERROR_DIFFERENT_SERVICE_ACCOUNT = 1079 ERROR_CANNOT_DETECT_DRIVER_FAILURE = 1080 ERROR_CANNOT_DETECT_PROCESS_ABORT = 1081 ERROR_NO_RECOVERY_PROGRAM = 1082 ERROR_SERVICE_NOT_IN_EXE = 1083 ERROR_NOT_SAFEBOOT_SERVICE = 1084 ERROR_DEBUGGER_INACTIVE = 1284 ERROR_PRIVILEGE_NOT_HELD = 1314 ERROR_NONE_MAPPED = 1332 RPC_S_SERVER_UNAVAILABLE = 1722 # Standard access rights import sys if sys.version_info[0] >= 3: long = int DELETE = long(0x00010000) READ_CONTROL = long(0x00020000) WRITE_DAC = long(0x00040000) WRITE_OWNER = long(0x00080000) SYNCHRONIZE = long(0x00100000) STANDARD_RIGHTS_REQUIRED = long(0x000F0000) STANDARD_RIGHTS_READ = READ_CONTROL STANDARD_RIGHTS_WRITE = READ_CONTROL STANDARD_RIGHTS_EXECUTE = READ_CONTROL STANDARD_RIGHTS_ALL = long(0x001F0000) SPECIFIC_RIGHTS_ALL = long(0x0000FFFF) #--- Structures --------------------------------------------------------------- # typedef struct _LSA_UNICODE_STRING { # USHORT Length; # USHORT MaximumLength; # PWSTR Buffer; # } LSA_UNICODE_STRING, # *PLSA_UNICODE_STRING, # UNICODE_STRING, # *PUNICODE_STRING; class UNICODE_STRING(Structure): _fields_ = [ ("Length", USHORT), ("MaximumLength", USHORT), ("Buffer", PVOID), ] # From MSDN: # # typedef struct _GUID { # DWORD Data1; # WORD Data2; # WORD Data3; # BYTE Data4[8]; # } GUID; class GUID(Structure): _fields_ = [ ("Data1", DWORD), ("Data2", WORD), ("Data3", WORD), ("Data4", BYTE * 8), ] # From MSDN: # # typedef struct _LIST_ENTRY { # struct _LIST_ENTRY *Flink; # struct _LIST_ENTRY *Blink; # } LIST_ENTRY, *PLIST_ENTRY, *RESTRICTED_POINTER PRLIST_ENTRY; class LIST_ENTRY(Structure): _fields_ = [ ("Flink", PVOID), # POINTER(LIST_ENTRY) ("Blink", PVOID), # POINTER(LIST_ENTRY) ] #============================================================================== # This calculates the list of exported symbols. _all = set(vars().keys()).difference(_all) ##__all__ = [_x for _x in _all if not _x.startswith('_')] ##__all__.sort() #==============================================================================
apache-2.0
CSIEMIAT/linux-3.6.0-MIAT
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
solin319/incubator-mxnet
example/cnn_text_classification/text_cnn.py
28
7469
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # -*- coding: utf-8 -*- import sys import os import mxnet as mx import numpy as np import argparse import logging import data_helpers logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description="CNN for text classification", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--pretrained-embedding', type=bool, default=False, help='use pre-trained word2vec') parser.add_argument('--num-embed', type=int, default=300, help='embedding layer size') parser.add_argument('--gpus', type=str, default='', help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ') parser.add_argument('--kv-store', type=str, default='local', help='key-value store type') parser.add_argument('--num-epochs', type=int, default=200, help='max num of epochs') parser.add_argument('--batch-size', type=int, default=50, help='the batch size.') parser.add_argument('--optimizer', type=str, default='rmsprop', help='the optimizer type') parser.add_argument('--lr', type=float, default=0.0005, help='initial learning rate') parser.add_argument('--dropout', type=float, default=0.0, help='dropout rate') parser.add_argument('--disp-batches', type=int, default=50, help='show progress for every n batches') parser.add_argument('--save-period', type=int, default=10, help='save checkpoint for every n epochs') def save_model(): if not os.path.exists("checkpoint"): os.mkdir("checkpoint") return mx.callback.do_checkpoint("checkpoint/checkpoint", args.save_period) def data_iter(batch_size, num_embed, pre_trained_word2vec=False): print('Loading data...') if pre_trained_word2vec: word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec') x, y = data_helpers.load_data_with_word2vec(word2vec) # reshpae for convolution input x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2])) embed_size = x.shape[-1] sentence_size = x.shape[2] vocab_size = -1 else: x, y, vocab, vocab_inv = data_helpers.load_data() embed_size = num_embed sentence_size = x.shape[1] vocab_size = len(vocab) # randomly shuffle data np.random.seed(10) shuffle_indices = np.random.permutation(np.arange(len(y))) x_shuffled = x[shuffle_indices] y_shuffled = y[shuffle_indices] # split train/valid set x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:] y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:] print('Train/Valid split: %d/%d' % (len(y_train), len(y_dev))) print('train shape:', x_train.shape) print('valid shape:', x_dev.shape) print('sentence max words', sentence_size) print('embedding size', embed_size) print('vocab size', vocab_size) train = mx.io.NDArrayIter( x_train, y_train, batch_size, shuffle=True) valid = mx.io.NDArrayIter( x_dev, y_dev, batch_size) return (train, valid, sentence_size, embed_size, vocab_size) def sym_gen(batch_size, sentence_size, num_embed, vocab_size, num_label=2, filter_list=[3, 4, 5], num_filter=100, dropout=0.0, pre_trained_word2vec=False): input_x = mx.sym.Variable('data') input_y = mx.sym.Variable('softmax_label') # embedding layer if not pre_trained_word2vec: embed_layer = mx.sym.Embedding(data=input_x, input_dim=vocab_size, output_dim=num_embed, name='vocab_embed') conv_input = mx.sym.Reshape(data=embed_layer, target_shape=(batch_size, 1, sentence_size, num_embed)) else: conv_input = input_x # create convolution + (max) pooling layer for each filter operation pooled_outputs = [] for i, filter_size in enumerate(filter_list): convi = mx.sym.Convolution(data=conv_input, kernel=(filter_size, num_embed), num_filter=num_filter) relui = mx.sym.Activation(data=convi, act_type='relu') pooli = mx.sym.Pooling(data=relui, pool_type='max', kernel=(sentence_size - filter_size + 1, 1), stride=(1,1)) pooled_outputs.append(pooli) # combine all pooled outputs total_filters = num_filter * len(filter_list) concat = mx.sym.Concat(*pooled_outputs, dim=1) h_pool = mx.sym.Reshape(data=concat, target_shape=(batch_size, total_filters)) # dropout layer if dropout > 0.0: h_drop = mx.sym.Dropout(data=h_pool, p=dropout) else: h_drop = h_pool # fully connected cls_weight = mx.sym.Variable('cls_weight') cls_bias = mx.sym.Variable('cls_bias') fc = mx.sym.FullyConnected(data=h_drop, weight=cls_weight, bias=cls_bias, num_hidden=num_label) # softmax output sm = mx.sym.SoftmaxOutput(data=fc, label=input_y, name='softmax') return sm, ('data',), ('softmax_label',) def train(symbol, train_iter, valid_iter, data_names, label_names): devs = mx.cpu() if args.gpus is None or args.gpus is '' else [ mx.gpu(int(i)) for i in args.gpus.split(',')] module = mx.mod.Module(symbol, data_names=data_names, label_names=label_names, context=devs) module.fit(train_data = train_iter, eval_data = valid_iter, eval_metric = 'acc', kvstore = args.kv_store, optimizer = args.optimizer, optimizer_params = { 'learning_rate': args.lr }, initializer = mx.initializer.Uniform(0.1), num_epoch = args.num_epochs, batch_end_callback = mx.callback.Speedometer(args.batch_size, args.disp_batches), epoch_end_callback = save_model()) if __name__ == '__main__': # parse args args = parser.parse_args() # data iter train_iter, valid_iter, sentence_size, embed_size, vocab_size = data_iter(args.batch_size, args.num_embed, args.pretrained_embedding) # network symbol symbol, data_names, label_names = sym_gen(args.batch_size, sentence_size, embed_size, vocab_size, num_label=2, filter_list=[3, 4, 5], num_filter=100, dropout=args.dropout, pre_trained_word2vec=args.pretrained_embedding) # train cnn model train(symbol, train_iter, valid_iter, data_names, label_names)
apache-2.0
AOSP-TEAM/android_kernel_google_tuna
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
11088
3246
# Core.py - Python extension for perf script, core functions # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. from collections import defaultdict def autodict(): return defaultdict(autodict) flag_fields = autodict() symbolic_fields = autodict() def define_flag_field(event_name, field_name, delim): flag_fields[event_name][field_name]['delim'] = delim def define_flag_value(event_name, field_name, value, field_str): flag_fields[event_name][field_name]['values'][value] = field_str def define_symbolic_field(event_name, field_name): # nothing to do, really pass def define_symbolic_value(event_name, field_name, value, field_str): symbolic_fields[event_name][field_name]['values'][value] = field_str def flag_str(event_name, field_name, value): string = "" if flag_fields[event_name][field_name]: print_delim = 0 keys = flag_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string += flag_fields[event_name][field_name]['values'][idx] break if idx and (value & idx) == idx: if print_delim and flag_fields[event_name][field_name]['delim']: string += " " + flag_fields[event_name][field_name]['delim'] + " " string += flag_fields[event_name][field_name]['values'][idx] print_delim = 1 value &= ~idx return string def symbol_str(event_name, field_name, value): string = "" if symbolic_fields[event_name][field_name]: keys = symbolic_fields[event_name][field_name]['values'].keys() keys.sort() for idx in keys: if not value and not idx: string = symbolic_fields[event_name][field_name]['values'][idx] break if (value == idx): string = symbolic_fields[event_name][field_name]['values'][idx] break return string trace_flags = { 0x00: "NONE", \ 0x01: "IRQS_OFF", \ 0x02: "IRQS_NOSUPPORT", \ 0x04: "NEED_RESCHED", \ 0x08: "HARDIRQ", \ 0x10: "SOFTIRQ" } def trace_flag_str(value): string = "" print_delim = 0 keys = trace_flags.keys() for idx in keys: if not value and not idx: string += "NONE" break if idx and (value & idx) == idx: if print_delim: string += " | "; string += trace_flags[idx] print_delim = 1 value &= ~idx return string def taskState(state): states = { 0 : "R", 1 : "S", 2 : "D", 64: "DEAD" } if state not in states: return "Unknown" return states[state] class EventHeaders: def __init__(self, common_cpu, common_secs, common_nsecs, common_pid, common_comm): self.cpu = common_cpu self.secs = common_secs self.nsecs = common_nsecs self.pid = common_pid self.comm = common_comm def ts(self): return (self.secs * (10 ** 9)) + self.nsecs def ts_format(self): return "%d.%d" % (self.secs, int(self.nsecs / 1000))
gpl-2.0
fyfcauc/android_external_chromium-org
tools/telemetry/telemetry/page/page_test_unittest.py
24
3967
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from telemetry.core import util from telemetry.page import page as page_module from telemetry.page import page_test from telemetry.page.actions import all_page_actions from telemetry.page.actions import page_action def _CreatePage(test_filename): url = 'file://' + test_filename page = page_module.Page(url, None, base_dir=util.GetUnittestDataDir()) return page class DoNothingPageTest(page_test.PageTest): def __init__(self, action_name_to_run=''): super(DoNothingPageTest, self).__init__('DoNothing', action_name_to_run) def DoNothing(self, page, tab, results): pass class AppendAction(page_action.PageAction): def RunAction(self, page, tab, previous_action): self.var.append(True) class WrapAppendAction(page_action.PageAction): def RunsPreviousAction(self): return True def RunAction(self, page, tab, previous_action): self.var.append('before') previous_action.WillRunAction(page, tab) previous_action.RunAction(page, tab, None) self.var.append('after') class PageTestUnitTest(unittest.TestCase): def setUp(self): super(PageTestUnitTest, self).setUp() all_page_actions.RegisterClassForTest('append', AppendAction) all_page_actions.RegisterClassForTest('wrap_append', WrapAppendAction) self._page_test = DoNothingPageTest('action_to_run') self._page = _CreatePage('blank.html') def testRunActions(self): action_called = [] action_to_run = [ { 'action': 'append', 'var': action_called } ] setattr(self._page, 'action_to_run', action_to_run) self._page_test.Run(None, self._page, None, None) self.assertTrue(action_called) def testPreviousAction(self): action_list = [] action_to_run = [ { 'action': 'append', 'var': action_list }, { 'action': 'wrap_append', 'var': action_list } ] setattr(self._page, 'action_to_run', action_to_run) self._page_test.Run(None, self._page, None, None) self.assertEqual(action_list, ['before', True, 'after']) def testReferenceAction(self): action_list = [] action_to_run = [ { 'action': 'referenced_action_1' }, { 'action': 'referenced_action_2' } ] referenced_action_1 = { 'action': 'append', 'var': action_list } referenced_action_2 = { 'action': 'wrap_append', 'var': action_list } setattr(self._page, 'action_to_run', action_to_run) setattr(self._page, 'referenced_action_1', referenced_action_1) setattr(self._page, 'referenced_action_2', referenced_action_2) self._page_test.Run(None, self._page, None, None) self.assertEqual(action_list, ['before', True, 'after']) def testRepeatAction(self): action_list = [] action_to_run = { 'action': 'append', 'var': action_list, 'repeat': 10 } setattr(self._page, 'action_to_run', action_to_run) self._page_test.Run(None, self._page, None, None) self.assertEqual(len(action_list), 10) def testRepeatReferenceAction(self): action_list = [] action_to_run = { 'action': 'referenced_action', 'repeat': 2 } referenced_action = [ { 'action': 'append', 'var': action_list }, { 'action': 'wrap_append', 'var': action_list } ] setattr(self._page, 'action_to_run', action_to_run) setattr(self._page, 'referenced_action', referenced_action) self._page_test.Run(None, self._page, None, None) self.assertEqual(action_list, ['before', True, 'after', 'before', True, 'after']) def testRepeatPreviousActionFails(self): action_list = [] action_to_run = { 'action': 'wrap_append', 'var': action_list, 'repeat': 2 } setattr(self._page, 'action_to_run', action_to_run) self.assertRaises(page_action.PageActionFailed, lambda: self._page_test.Run(None, self._page, None, None))
bsd-3-clause
mdeemer/XlsxWriter
xlsxwriter/test/comparison/test_hyperlink11.py
8
1912
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'hyperlink11.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_link_format_explicit(self): """Test the creation of a simple XlsxWriter file with hyperlinks. This example has link formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() url_format = workbook.add_format({'color': 'blue', 'underline': 1}) worksheet.write_url('A1', 'http://www.perl.org/', url_format) workbook.close() self.assertExcelEqual() def test_link_format_implicit(self): """Test the creation of a simple XlsxWriter file with hyperlinks. This example has link formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.write_url('A1', 'http://www.perl.org/') workbook.close() self.assertExcelEqual() def test_link_format_none(self): """Test the creation of a simple XlsxWriter file with hyperlinks. This example has link formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.write_url('A1', 'http://www.perl.org/', None) workbook.close() self.assertExcelEqual()
bsd-2-clause
laydros/commission
sale_commission/models/sale_order.py
8
4065
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 Pexego Sistemas Informáticos (<http://www.pexego.es>). # Copyright (C) 2015 Avanzosc (<http://www.avanzosc.es>) # Copyright (C) 2015 Pedro M. Baeza (<http://www.serviciosbaeza.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api class SaleOrder(models.Model): _inherit = "sale.order" @api.one @api.depends('order_line.agents.amount') def _get_commission_total(self): self.commission_total = 0.0 for line in self.order_line: self.commission_total += sum(x.amount for x in line.agents) commission_total = fields.Float( string="Commissions", compute="_get_commission_total", store=True) class SaleOrderLine(models.Model): _inherit = "sale.order.line" @api.model def _default_agents(self): agents = [] if self.env.context.get('partner_id'): partner = self.env['res.partner'].browse( self.env.context['partner_id']) for agent in partner.agents: agents.append({'agent': agent.id, 'commission': agent.commission.id}) return [(0, 0, x) for x in agents] agents = fields.One2many( string="Agents & commissions", comodel_name='sale.order.line.agent', inverse_name='sale_line', copy=True, readonly=True, default=_default_agents) commission_free = fields.Boolean( string="Comm. free", related="product_id.commission_free", store=True, readonly=True) @api.model def _prepare_order_line_invoice_line(self, line, account_id=False): vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line( line, account_id=account_id) vals['agents'] = [ (0, 0, {'agent': x.agent.id, 'commission': x.commission.id}) for x in line.agents] return vals class SaleOrderLineAgent(models.Model): _name = "sale.order.line.agent" _rec_name = "agent" sale_line = fields.Many2one( comodel_name="sale.order.line", required=True, ondelete="cascade") agent = fields.Many2one( comodel_name="res.partner", required=True, ondelete="restrict", domain="[('agent', '=', True')]") commission = fields.Many2one( comodel_name="sale.commission", required=True, ondelete="restrict") amount = fields.Float(compute="_get_amount", store=True) _sql_constraints = [ ('unique_agent', 'UNIQUE(sale_line, agent)', 'You can only add one time each agent.') ] @api.one @api.onchange('agent') def onchange_agent(self): self.commission = self.agent.commission @api.one @api.depends('commission.commission_type', 'sale_line.price_subtotal') def _get_amount(self): self.amount = 0.0 if (not self.sale_line.product_id.commission_free and self.commission): subtotal = self.sale_line.price_subtotal if self.commission.commission_type == 'fixed': self.amount = subtotal * (self.commission.fix_qty / 100.0) else: self.amount = self.commission.calculate_section(subtotal)
agpl-3.0
crazy-canux/django
django/core/checks/model_checks.py
525
2390
# -*- coding: utf-8 -*- from __future__ import unicode_literals import inspect import types from django.apps import apps from django.core.checks import Error, Tags, register @register(Tags.models) def check_all_models(app_configs=None, **kwargs): errors = [] for model in apps.get_models(): if app_configs is None or model._meta.app_config in app_configs: if not inspect.ismethod(model.check): errors.append( Error( "The '%s.check()' class method is " "currently overridden by %r." % ( model.__name__, model.check), hint=None, obj=model, id='models.E020' ) ) else: errors.extend(model.check(**kwargs)) return errors @register(Tags.models, Tags.signals) def check_model_signals(app_configs=None, **kwargs): """ Ensure lazily referenced model signals senders are installed. """ # Avoid circular import from django.db import models errors = [] for name in dir(models.signals): obj = getattr(models.signals, name) if isinstance(obj, models.signals.ModelSignal): for reference, receivers in obj.unresolved_references.items(): for receiver, _, _ in receivers: # The receiver is either a function or an instance of class # defining a `__call__` method. if isinstance(receiver, types.FunctionType): description = "The '%s' function" % receiver.__name__ else: description = "An instance of the '%s' class" % receiver.__class__.__name__ errors.append( Error( "%s was connected to the '%s' signal " "with a lazy reference to the '%s' sender, " "which has not been installed." % ( description, name, '.'.join(reference) ), obj=receiver.__module__, hint=None, id='signals.E001' ) ) return errors
bsd-3-clause
wweiradio/django
tests/template_tests/test_custom.py
152
20056
from __future__ import unicode_literals import os from django.template import Context, Engine, TemplateSyntaxError from django.template.base import Node from django.template.library import InvalidTemplateLibrary from django.test import SimpleTestCase, ignore_warnings from django.test.utils import extend_sys_path from django.utils import six from django.utils.deprecation import RemovedInDjango110Warning from .templatetags import custom, inclusion from .utils import ROOT LIBRARIES = { 'custom': 'template_tests.templatetags.custom', 'inclusion': 'template_tests.templatetags.inclusion', } class CustomFilterTests(SimpleTestCase): def test_filter(self): engine = Engine(libraries=LIBRARIES) t = engine.from_string("{% load custom %}{{ string|trim:5 }}") self.assertEqual( t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})), "abcde" ) class TagTestCase(SimpleTestCase): @classmethod def setUpClass(cls): cls.engine = Engine(app_dirs=True, libraries=LIBRARIES) super(TagTestCase, cls).setUpClass() def verify_tag(self, tag, name): self.assertEqual(tag.__name__, name) self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name) self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name) class SimpleTagTests(TagTestCase): def test_simple_tags(self): c = Context({'value': 42}) templates = [ ('{% load custom %}{% no_params %}', 'no_params - Expected result'), ('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'), ('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'), ('{% load custom %}{% no_params_with_context %}', 'no_params_with_context - Expected result (context value: 42)'), ('{% load custom %}{% params_and_context 37 %}', 'params_and_context - Expected result (context value: 42): 37'), ('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'), ('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'), ('{% load custom %}{% simple_one_default 37 two="hello" %}', 'simple_one_default - Expected result: 37, hello'), ('{% load custom %}{% simple_one_default one=99 two="hello" %}', 'simple_one_default - Expected result: 99, hello'), ('{% load custom %}{% simple_one_default 37 42 %}', 'simple_one_default - Expected result: 37, 42'), ('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'), ('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}', 'simple_unlimited_args - Expected result: 37, 42, 56, 89'), ('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '), ('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}', 'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'), ('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}', 'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'), ] for entry in templates: t = self.engine.from_string(entry[0]) self.assertEqual(t.render(c), entry[1]) for entry in templates: t = self.engine.from_string("%s as var %%}Result: {{ var }}" % entry[0][0:-2]) self.assertEqual(t.render(c), "Result: %s" % entry[1]) def test_simple_tag_errors(self): errors = [ ("'simple_one_default' received unexpected keyword argument 'three'", '{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'), ("'simple_two_params' received too many positional arguments", '{% load custom %}{% simple_two_params 37 42 56 %}'), ("'simple_one_default' received too many positional arguments", '{% load custom %}{% simple_one_default 37 42 56 %}'), ("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)", '{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'), ("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", '{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'), ] for entry in errors: with self.assertRaisesMessage(TemplateSyntaxError, entry[0]): self.engine.from_string(entry[1]) for entry in errors: with self.assertRaisesMessage(TemplateSyntaxError, entry[0]): self.engine.from_string("%s as var %%}" % entry[1][0:-2]) def test_simple_tag_escaping_autoescape_off(self): c = Context({'name': "Jack & Jill"}, autoescape=False) t = self.engine.from_string("{% load custom %}{% escape_naive %}") self.assertEqual(t.render(c), "Hello Jack & Jill!") def test_simple_tag_naive_escaping(self): c = Context({'name': "Jack & Jill"}) t = self.engine.from_string("{% load custom %}{% escape_naive %}") self.assertEqual(t.render(c), "Hello Jack &amp; Jill!") def test_simple_tag_explicit_escaping(self): # Check we don't double escape c = Context({'name': "Jack & Jill"}) t = self.engine.from_string("{% load custom %}{% escape_explicit %}") self.assertEqual(t.render(c), "Hello Jack &amp; Jill!") def test_simple_tag_format_html_escaping(self): # Check we don't double escape c = Context({'name': "Jack & Jill"}) t = self.engine.from_string("{% load custom %}{% escape_format_html %}") self.assertEqual(t.render(c), "Hello Jack &amp; Jill!") def test_simple_tag_registration(self): # Test that the decorators preserve the decorated function's docstring, name and attributes. self.verify_tag(custom.no_params, 'no_params') self.verify_tag(custom.one_param, 'one_param') self.verify_tag(custom.explicit_no_context, 'explicit_no_context') self.verify_tag(custom.no_params_with_context, 'no_params_with_context') self.verify_tag(custom.params_and_context, 'params_and_context') self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs') self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter') def test_simple_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True msg = ( "'simple_tag_without_context_parameter' is decorated with " "takes_context=True so it must have a first argument of 'context'" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.from_string('{% load custom %}{% simple_tag_without_context_parameter 123 %}') class InclusionTagTests(TagTestCase): def test_inclusion_tags(self): c = Context({'value': 42}) templates = [ ('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'), ('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_explicit_no_context 37 %}', 'inclusion_explicit_no_context - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_no_params_with_context %}', 'inclusion_no_params_with_context - Expected result (context value: 42)\n'), ('{% load inclusion %}{% inclusion_params_and_context 37 %}', 'inclusion_params_and_context - Expected result (context value: 42): 37\n'), ('{% load inclusion %}{% inclusion_two_params 37 42 %}', 'inclusion_two_params - Expected result: 37, 42\n'), ('{% load inclusion %}{% inclusion_one_default 37 %}', 'inclusion_one_default - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}', 'inclusion_one_default - Expected result: 37, hello\n'), ('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}', 'inclusion_one_default - Expected result: 99, hello\n'), ('{% load inclusion %}{% inclusion_one_default 37 42 %}', 'inclusion_one_default - Expected result: 37, 42\n'), ('{% load inclusion %}{% inclusion_unlimited_args 37 %}', 'inclusion_unlimited_args - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}', 'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'), ('{% load inclusion %}{% inclusion_only_unlimited_args %}', 'inclusion_only_unlimited_args - Expected result: \n'), ('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}', 'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'), ('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}', 'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'), ] for entry in templates: t = self.engine.from_string(entry[0]) self.assertEqual(t.render(c), entry[1]) def test_inclusion_tag_errors(self): errors = [ ("'inclusion_one_default' received unexpected keyword argument 'three'", '{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'), ("'inclusion_two_params' received too many positional arguments", '{% load inclusion %}{% inclusion_two_params 37 42 56 %}'), ("'inclusion_one_default' received too many positional arguments", '{% load inclusion %}{% inclusion_one_default 37 42 56 %}'), ("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'", '{% load inclusion %}{% inclusion_one_default %}'), ("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'", '{% load inclusion %}{% inclusion_unlimited_args %}'), ( "'inclusion_unlimited_args_kwargs' received some positional argument(s) " "after some keyword argument(s)", '{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}', ), ("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'", '{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'), ] for entry in errors: with self.assertRaisesMessage(TemplateSyntaxError, entry[0]): self.engine.from_string(entry[1]) def test_include_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True msg = ( "'inclusion_tag_without_context_parameter' is decorated with " "takes_context=True so it must have a first argument of 'context'" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.from_string('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}') def test_inclusion_tags_from_template(self): c = Context({'value': 42}) templates = [ ('{% load inclusion %}{% inclusion_no_params_from_template %}', 'inclusion_no_params_from_template - Expected result\n'), ('{% load inclusion %}{% inclusion_one_param_from_template 37 %}', 'inclusion_one_param_from_template - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}', 'inclusion_explicit_no_context_from_template - Expected result: 37\n'), ('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}', 'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'), ('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}', 'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'), ('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}', 'inclusion_two_params_from_template - Expected result: 37, 42\n'), ('{% load inclusion %}{% inclusion_one_default_from_template 37 %}', 'inclusion_one_default_from_template - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}', 'inclusion_one_default_from_template - Expected result: 37, 42\n'), ('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}', 'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'), ('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}', 'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'), ('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}', 'inclusion_only_unlimited_args_from_template - Expected result: \n'), ('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}', 'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'), ] for entry in templates: t = self.engine.from_string(entry[0]) self.assertEqual(t.render(c), entry[1]) def test_inclusion_tag_registration(self): # Test that the decorators preserve the decorated function's docstring, name and attributes. self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params') self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param') self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context') self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context') self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context') self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params') self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default') self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args') self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args') self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter') self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n') self.verify_tag(inclusion.inclusion_tag_current_app, 'inclusion_tag_current_app') self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs') @ignore_warnings(category=RemovedInDjango110Warning) def test_15070_current_app(self): """ Test that inclusion tag passes down `current_app` of context to the Context of the included/rendered template as well. """ c = Context({}) t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_current_app %}') self.assertEqual(t.render(c).strip(), 'None') # That part produces the deprecation warning c = Context({}, current_app='advanced') self.assertEqual(t.render(c).strip(), 'advanced') def test_15070_use_l10n(self): """ Test that inclusion tag passes down `use_l10n` of context to the Context of the included/rendered template as well. """ c = Context({}) t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_use_l10n %}') self.assertEqual(t.render(c).strip(), 'None') c.use_l10n = True self.assertEqual(t.render(c).strip(), 'True') def test_no_render_side_effect(self): """ #23441 -- InclusionNode shouldn't modify its nodelist at render time. """ engine = Engine(app_dirs=True, libraries=LIBRARIES) template = engine.from_string('{% load inclusion %}{% inclusion_no_params %}') count = template.nodelist.get_nodes_by_type(Node) template.render(Context({})) self.assertEqual(template.nodelist.get_nodes_by_type(Node), count) def test_render_context_is_cleared(self): """ #24555 -- InclusionNode should push and pop the render_context stack when rendering. Otherwise, leftover values such as blocks from extending can interfere with subsequent rendering. """ engine = Engine(app_dirs=True, libraries=LIBRARIES) template = engine.from_string('{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}') self.assertEqual(template.render(Context({})).strip(), 'one\ntwo') class AssignmentTagTests(TagTestCase): def test_assignment_tags(self): c = Context({'value': 42}) t = self.engine.from_string('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}') self.assertEqual(t.render(c), 'The result is: assignment_no_params - Expected result') def test_assignment_tag_registration(self): # Test that the decorators preserve the decorated function's docstring, name and attributes. self.verify_tag(custom.assignment_no_params, 'assignment_no_params') def test_assignment_tag_missing_context(self): # The 'context' parameter must be present when takes_context is True msg = ( "'assignment_tag_without_context_parameter' is decorated with " "takes_context=True so it must have a first argument of 'context'" ) with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.from_string('{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}') class TemplateTagLoadingTests(SimpleTestCase): @classmethod def setUpClass(cls): cls.egg_dir = os.path.join(ROOT, 'eggs') super(TemplateTagLoadingTests, cls).setUpClass() def test_load_error(self): msg = ( "Invalid template library specified. ImportError raised when " "trying to load 'template_tests.broken_tag': cannot import name " "'?Xtemplate'?" ) with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg): Engine(libraries={ 'broken_tag': 'template_tests.broken_tag', }) def test_load_error_egg(self): egg_name = '%s/tagsegg.egg' % self.egg_dir msg = ( "Invalid template library specified. ImportError raised when " "trying to load 'tagsegg.templatetags.broken_egg': cannot " "import name '?Xtemplate'?" ) with extend_sys_path(egg_name): with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg): Engine(libraries={ 'broken_egg': 'tagsegg.templatetags.broken_egg', }) def test_load_working_egg(self): ttext = "{% load working_egg %}" egg_name = '%s/tagsegg.egg' % self.egg_dir with extend_sys_path(egg_name): engine = Engine(libraries={ 'working_egg': 'tagsegg.templatetags.working_egg', }) engine.from_string(ttext)
bsd-3-clause
Newman101/scipy
scipy/sparse/csc.py
33
6947
"""Compressed Sparse Column matrix format""" from __future__ import division, print_function, absolute_import __docformat__ = "restructuredtext en" __all__ = ['csc_matrix', 'isspmatrix_csc'] import numpy as np from scipy._lib.six import xrange from .base import spmatrix from ._sparsetools import csc_tocsr from . import _sparsetools from .sputils import upcast, isintlike, IndexMixin, get_index_dtype from .compressed import _cs_matrix class csc_matrix(_cs_matrix, IndexMixin): """ Compressed Sparse Column matrix This can be instantiated in several ways: csc_matrix(D) with a dense matrix or rank-2 ndarray D csc_matrix(S) with another sparse matrix S (equivalent to S.tocsc()) csc_matrix((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) where ``data``, ``row_ind`` and ``col_ind`` satisfy the relationship ``a[row_ind[k], col_ind[k]] = data[k]``. csc_matrix((data, indices, indptr), [shape=(M, N)]) is the standard CSC representation where the row indices for column i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz Number of nonzero elements data Data array of the matrix indices CSC format index array indptr CSC format index pointer array has_sorted_indices Whether indices are sorted Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the CSC format - efficient arithmetic operations CSC + CSC, CSC * CSC, etc. - efficient column slicing - fast matrix vector products (CSR, BSR may be faster) Disadvantages of the CSC format - slow row slicing operations (consider CSR) - changes to the sparsity structure are expensive (consider LIL or DOK) Examples -------- >>> import numpy as np >>> from scipy.sparse import csc_matrix >>> csc_matrix((3, 4), dtype=np.int8).toarray() array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=int8) >>> row = np.array([0, 2, 2, 0, 1, 2]) >>> col = np.array([0, 0, 1, 2, 2, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) >>> indptr = np.array([0, 2, 3, 6]) >>> indices = np.array([0, 2, 2, 0, 1, 2]) >>> data = np.array([1, 2, 3, 4, 5, 6]) >>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray() array([[1, 0, 4], [0, 0, 5], [2, 3, 6]]) """ format = 'csc' def transpose(self, axes=None, copy=False): if axes is not None: raise ValueError(("Sparse matrices do not support " "an 'axes' parameter because swapping " "dimensions is the only logical permutation.")) M, N = self.shape from .csr import csr_matrix return csr_matrix((self.data, self.indices, self.indptr), (N, M), copy=copy) transpose.__doc__ = spmatrix.transpose.__doc__ def __iter__(self): csr = self.tocsr() for r in xrange(self.shape[0]): yield csr[r,:] def tocsc(self, copy=False): if copy: return self.copy() else: return self tocsc.__doc__ = spmatrix.tocsc.__doc__ def tocsr(self, copy=False): M,N = self.shape idx_dtype = get_index_dtype((self.indptr, self.indices), maxval=max(self.nnz, N)) indptr = np.empty(M + 1, dtype=idx_dtype) indices = np.empty(self.nnz, dtype=idx_dtype) data = np.empty(self.nnz, dtype=upcast(self.dtype)) csc_tocsr(M, N, self.indptr.astype(idx_dtype), self.indices.astype(idx_dtype), self.data, indptr, indices, data) from .csr import csr_matrix A = csr_matrix((data, indices, indptr), shape=self.shape) A.has_sorted_indices = True return A tocsr.__doc__ = spmatrix.tocsr.__doc__ def __getitem__(self, key): # Use CSR to implement fancy indexing. row, col = self._unpack_index(key) # Things that return submatrices. row or col is a int or slice. if (isinstance(row, slice) or isinstance(col, slice) or isintlike(row) or isintlike(col)): return self.T[col, row].T # Things that return a sequence of values. else: return self.T[col, row] def nonzero(self): # CSC can't use _cs_matrix's .nonzero method because it # returns the indices sorted for self transposed. # Get row and col indices, from _cs_matrix.tocoo major_dim, minor_dim = self._swap(self.shape) minor_indices = self.indices major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) _sparsetools.expandptr(major_dim, self.indptr, major_indices) row, col = self._swap((major_indices, minor_indices)) # Remove explicit zeros nz_mask = self.data != 0 row = row[nz_mask] col = col[nz_mask] # Sort them to be in C-style order ind = np.argsort(row, kind='mergesort') row = row[ind] col = col[ind] return row, col nonzero.__doc__ = _cs_matrix.nonzero.__doc__ def getrow(self, i): """Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector). """ # we convert to CSR to maintain compatibility with old impl. # in spmatrix.getrow() return self._get_submatrix(i, slice(None)).tocsr() def getcol(self, i): """Returns a copy of column i of the matrix, as a (m x 1) CSC matrix (column vector). """ return self._get_submatrix(slice(None), i) # these functions are used by the parent class (_cs_matrix) # to remove redudancy between csc_matrix and csr_matrix def _swap(self,x): """swap the members of x if this is a column-oriented matrix """ return (x[1],x[0]) def isspmatrix_csc(x): return isinstance(x, csc_matrix)
bsd-3-clause
chunywang/crosswalk-test-suite
webapi/webapi-rawsockets-w3c-tests/inst.apk.py
1996
3186
#!/usr/bin/env python import os import shutil import glob import time import sys import subprocess from optparse import OptionParser, make_option SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARAMETERS = None ADB_CMD = "adb" def doCMD(cmd): # Do not need handle timeout in this short script, let tool do it print "-->> \"%s\"" % cmd output = [] cmd_return_code = 1 cmd_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) while True: output_line = cmd_proc.stdout.readline().strip("\r\n") cmd_return_code = cmd_proc.poll() if output_line == '' and cmd_return_code is not None: break sys.stdout.write("%s\n" % output_line) sys.stdout.flush() output.append(output_line) return (cmd_return_code, output) def uninstPKGs(): action_status = True for root, dirs, files in os.walk(SCRIPT_DIR): for file in files: if file.endswith(".apk"): cmd = "%s -s %s uninstall org.xwalk.%s" % ( ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0])) (return_code, output) = doCMD(cmd) for line in output: if "Failure" in line: action_status = False break return action_status def instPKGs(): action_status = True for root, dirs, files in os.walk(SCRIPT_DIR): for file in files: if file.endswith(".apk"): cmd = "%s -s %s install %s" % (ADB_CMD, PARAMETERS.device, os.path.join(root, file)) (return_code, output) = doCMD(cmd) for line in output: if "Failure" in line: action_status = False break return action_status def main(): try: usage = "usage: inst.py -i" opts_parser = OptionParser(usage=usage) opts_parser.add_option( "-s", dest="device", action="store", help="Specify device") opts_parser.add_option( "-i", dest="binstpkg", action="store_true", help="Install package") opts_parser.add_option( "-u", dest="buninstpkg", action="store_true", help="Uninstall package") global PARAMETERS (PARAMETERS, args) = opts_parser.parse_args() except Exception as e: print "Got wrong option: %s, exit ..." % e sys.exit(1) if not PARAMETERS.device: (return_code, output) = doCMD("adb devices") for line in output: if str.find(line, "\tdevice") != -1: PARAMETERS.device = line.split("\t")[0] break if not PARAMETERS.device: print "No device found" sys.exit(1) if PARAMETERS.binstpkg and PARAMETERS.buninstpkg: print "-i and -u are conflict" sys.exit(1) if PARAMETERS.buninstpkg: if not uninstPKGs(): sys.exit(1) else: if not instPKGs(): sys.exit(1) if __name__ == "__main__": main() sys.exit(0)
bsd-3-clause
glaubitz/fs-uae-debian
arcade/OpenGL/GLX/NV/copy_image.py
8
1125
'''OpenGL extension NV.copy_image This module customises the behaviour of the OpenGL.raw.GLX.NV.copy_image to provide a more Python-friendly API Overview (from the spec) This extension enables efficient image data transfer between image objects (i.e. textures and renderbuffers) without the need to bind the objects or otherwise configure the rendering pipeline. The WGL and GLX versions allow copying between images in different contexts, even if those contexts are in different sharelists or even on different physical devices. The official definition of this extension is available here: http://www.opengl.org/registry/specs/NV/copy_image.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLX import _types, _glgets from OpenGL.raw.GLX.NV.copy_image import * from OpenGL.raw.GLX.NV.copy_image import _EXTENSION_NAME def glInitCopyImageNV(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
gpl-2.0
robmcmullen/peppy
peppy/third_party/pubsub/core/pubsub3.py
1
8095
''' This is the top-level API to pubsub, API version 3. This version is selected by default by the pubsub configuration module, pubsubconf. To load a different API version, see the pubsubconf docs (from pubsub import pubsubconf; help(pubsubconf)). TODO: add isMsgReceivable(listener, topicName) to find out if listener is subscribed to topicName or any of its subtopics. :copyright: Copyright 2006-2009 by Oliver Schoenborn, all rights reserved. :license: BSD, see LICENSE.txt for details. ''' PUBSUB_VERSION = 3 # DO NOT CHANGE SVN_VERSION = "$Rev: 238 $".split()[1] # DO NOT CHANGE VERSION_STR = "3.1.0b1.200912.r" + SVN_VERSION from listener import \ Listener, \ getID as getListenerID, \ ListenerInadequate from topicobj import \ Topic, \ SenderMissingReqdArgs, \ SenderUnknownOptArgs, \ ListenerSpecInvalid, \ ListenerNotValidatable, \ ExcHandlerError from topicmgr import \ TopicManager as _TopicManager, \ ListenerSpecIncomplete, \ UndefinedTopic, \ UndefinedSubtopic, \ ALL_TOPICS from topicdefnprovider import \ ITopicDefnProvider, TopicDefnProvider, \ registerTypeForImport as registerTopicDefnProviderType, \ IMPORT_MODULE as TOPIC_TREE_FROM_MODULE, \ IMPORT_STRING as TOPIC_TREE_FROM_STRING, \ IMPORT_CLASS as TOPIC_TREE_FROM_CLASS, \ exportTreeAsSpec, ITopicTreeTraverser from publisher import Publisher __all__ = [ # listener stuff: 'Listener', 'ListenerInadequate', 'isValid', 'validate', # topic stuff: 'ALL_TOPICS', 'Topic', 'topics', 'topicsMap', 'AUTO_TOPIC', 'getOrCreateTopic', 'getTopic', 'newTopic', 'delTopic', 'ListenerSpecIncomplete', 'ListenerNotValidatable', 'UndefinedTopic', 'UndefinedSubtopic', 'ExcHandlerError', 'getAssociatedTopics', 'getDefaultTopicMgr', 'getDefaultRootAllTopics', # topioc defn provider stuff 'addTopicDefnProvider', 'clearTopicDefnProviders', 'registerTopicDefnProviderType', 'TOPIC_TREE_FROM_MODULE', 'TOPIC_TREE_FROM_CLASS', 'TOPIC_TREE_FROM_STRING', 'importMyTopicTree', 'exportMyTopicTree', 'ITopicTreeTraverser', # publisher stuff: 'Publisher', 'subscribe', 'unsubscribe', 'isSubscribed', 'unsubAll', 'sendMessage', 'SenderMissingReqdArgs', 'SenderUnknownOptArgs', 'getListenerExcHandler', 'setListenerExcHandler', 'addNotificationHandler', 'setNotificationFlags', 'clearNotificationHandlers', 'setTopicUnspecifiedFatal', # misc: 'PUBSUB_VERSION', ] # --------------------------------------------- _publisher = Publisher() subscribe = _publisher.subscribe unsubscribe = _publisher.unsubscribe unsubAll = _publisher.unsubAll sendMessage = _publisher.sendMessage getListenerExcHandler = _publisher.getListenerExcHandler setListenerExcHandler = _publisher.setListenerExcHandler addNotificationHandler = _publisher.addNotificationHandler clearNotificationHandlers = _publisher.clearNotificationHandlers setNotificationFlags = _publisher.setNotificationFlags getNotificationFlags = _publisher.getNotificationFlags setTopicUnspecifiedFatal = _publisher.setTopicUnspecifiedFatal Publisher = _publisher # for backward compat with pubsub1 # --------------------------------------------- _topicMgr = _publisher.getTopicMgr() topics = _topicMgr._allTopics topicsMap = _topicMgr._topicsMap AUTO_TOPIC = Listener.AUTO_TOPIC def isValid(listener, topicName): '''Return true only if listener can subscribe to messages of type topicName.''' return _topicMgr.getTopic(topicName).isValid(listener) def validate(listener, topicName): '''Checks if listener can subscribe to topicName. If not, raises ListenerInadequate, otherwise just returns.''' _topicMgr.getTopic(topicName).validate(listener) def isSubscribed(listener, topicName): '''Returns true if listener has subscribed to topicName, false otherwise. WARNING: a false return is not a guarantee that listener won't get messages of topicName: it could receive messages of a subtopic of topicName. ''' return _topicMgr.getTopic(topicName).hasListener(listener) def getDefaultTopicMgr(): '''Get the topic manager that is created by default when you import package.''' return _topicMgr def getDefaultRootAllTopics(): '''Get the topic that is parent of all root (ie top-level) topics: - this topic satisfies isAll()==True, isRoot()==False, getParent() is None; - all top-level topics satisfy isAll()==False, isRoot()==True, getParent() is getDefaultRootAllTopics(); - all other topics satisfy neither. ''' return _topicMgr._allTopics getOrCreateTopic = _topicMgr.getOrCreateTopic newTopic = _topicMgr.newTopic delTopic = _topicMgr.delTopic getTopic = _topicMgr.getTopic getAssociatedTopics = _topicMgr.getTopics addTopicDefnProvider = _topicMgr.addDefnProvider clearTopicDefnProviders = _topicMgr.clearDefnProviders def importMyTopicTree(source, format = TOPIC_TREE_FROM_MODULE, lazy=False): '''Import topic definitions from a source. The format of the source defaults to TOPIC_TREE_FROM_MODULE, ie default is to import from a module (typically, exported by exportMyTopicTree(source). The doc string for the source is returned (for a module source, this is the module doc string; etc). If lazy is True, the topics will be put in topic tree only upon first use by the application, otherwise, all topics that don't already exist are incorporated (this may result in a smaller topic tree if an application has evolved significantly). Other source formats are TOPIC_TREE_FROM_STRING and TOPIC_TREE_FROM_CLASS. They are explained in the package API documentation. Notes: - This function can be called several times, but should be called only once per source. - More source formats can be defined via pub.registerTopicDefnProviderType(), which must be given a class that adheres to the pub.ITopicDefnProvider interface. - If lazy=True, then a later call to exportMyTopicTree() will only export those topics that have been used by the application ''' provider = TopicDefnProvider(source, format) addTopicDefnProvider(provider) if not lazy: for topicName in provider: _topicMgr.getOrCreateTopic(topicName) return provider.getTreeDoc() def _backupIfExists(filename, bak): import os, shutil if os.path.exists(filename): backupName = '%s.%s' % (filename, bak) shutil.copy(filename, backupName) def exportMyTopicTree(moduleName = None, rootTopicName=None, rootTopic=None, bak='bak', moduleDoc=None): '''Export the topic tree to a string and return the string. The export only traverses the children of rootTopic. Notes: - If moduleName is given, the string is also written to moduleName.py in os.getcwd() (the file is overwritten). If bak is not None, the module file is first copied to moduleName.py.bak. - If rootTopicName or rootTopic are not specified, the pub.ALL_TOPICS topic will used. - The moduleDoc is the doc string for the module. - If importMyTopicTree() was called with lazy=True, then only those topics that were used by application will be exported.''' if rootTopicName: rootTopic = _topicMgr.getTopic(rootTopicName) if rootTopic is None: rootTopic = getDefaultRootAllTopics() # create exporter if moduleName is None: from StringIO import StringIO capture = StringIO() exportTreeAsSpec(rootTopic, fileObj=capture, treeDoc=moduleDoc) return capture.getvalue() else: filename = '%s.py' % moduleName if bak: _backupIfExists(filename, bak) moduleFile = file(filename, 'w') try: exportTreeAsSpec(rootTopic, fileObj=moduleFile, treeDoc=moduleDoc) finally: moduleFile.close()
gpl-2.0
jschuhmacher/cvm
src/train/cerberus.py
2
5944
#!/usr/bin/env python # vim: set fileencoding=utf-8 ts=4 sw=4 expandtab: ################################################################################ # File: cerberus.py # # Copyright (c) 2009, Jelle Schühmacher <j.schuhmacher@student.science.ru.nl> # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. ################################################################################ ''' Cerberus is prototype analysis and detection module for an intrusion detection system. It uses the Core Vector Machine (CVM) for analysis. ''' # System import sys import traceback import logging import cPickle as pickle # 3rd-party import argparse # Local import common.data import common.model import train.trainer import classify.predictor def data_collection( header_file, data_file ): ''' Collect data ''' logging.info ( 'Data collection phase...' ) dataset = common.data.Dataset() dataset.read( header_file, data_file ) return dataset def feature_selection( dataset ): ''' In the future do some feature selection ''' logging.info ( 'Feature selection phase...' ) logging.info ( ' Not yet implemented' ) return dataset def prepare_training( dataset, C, EPS, kernel, gamma ): ''' Prepare for training ''' logging.info ( 'Prepare for training phase...' ) trainer = train.trainer.Trainer( dataset, C , EPS, kernel, gamma ) return trainer def do_training( trainer ): ''' Construct classifiers from the training set''' logging.info ( 'Training phase...' ) models = trainer.train() return models def load_models( model_file ): ''' Load trained models from file ''' logging.info ( 'Loading models...' ) models = pickle.load( model_file ) return models def save_models( models, model_file ): ''' Save trained models to file ''' logging.info ( 'Saving models...' ) pickle.dump( models, model_file, pickle.HIGHEST_PROTOCOL ) def prepare_testing( models, dataset ): ''' Initialisation for the testing phase ''' logging.info ( 'Prepare for testing phase...' ) predictor = classify.predictor.Predictor( models, dataset ) return predictor def do_testing( predictor ): ''' Do the testing phase ''' logging.info ( 'Testing phase...' ) predictor.concurrent_predict() #predictor.predict() def initialise_cli (): ''' Set up command line arguments ''' parser = argparse.ArgumentParser( description=__doc__ ) parser.add_argument( 'header', type=argparse.FileType( 'r' ), help='Read feature information from this file.' ) parser.add_argument( 'dataset', type=str, help='Read data from this file.' ) parser.add_argument( '--log', type=argparse.FileType( 'a' ), default=sys.stdout, help='Send log output to this file \ (defaults to stdout)' ) parser.add_argument( '--C', type=float, default=1e6, help='Complexity constant (defaults to 1000000)' ) parser.add_argument( '--EPS', type=float, default=1e-6, help='Epsilon, defaults to 1e-6' ) parser.add_argument( '--gamma', type=float, default=0.1, help='Gamma for RBF kernel' ) parser.add_argument( '--kernel', type=int, default=1, help='Kernel type: 0 -> dot, 1 -> RBF' ) group = parser.add_mutually_exclusive_group( required=True ) group.add_argument( '--model_out', type=argparse.FileType( 'w' ), help='Store trained models in this file.' ) group.add_argument( '--model_in', type=argparse.FileType( 'r' ), help='Load trained models from this file.' ) return parser def initialise_logging ( log_file ): ''' Initialise the logging module ''' logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(levelname)-8s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=log_file, ) logging.info( 'Initialised Cerberus' ) def main(): ''' Entrypoint ''' parser = initialise_cli() arguments = parser.parse_args() initialise_logging( arguments.log ) try: if arguments.model_in == None and arguments.model_out != None: trainset = data_collection( arguments.header, arguments.dataset ) features = feature_selection( trainset ) trainer = prepare_training( trainset, arguments.C, arguments.EPS, arguments.kernel, arguments.gamma ) models = do_training( trainer ) save_models( models, arguments.model_out ) arguments.header.seek( 0 ) elif arguments.model_in != None and arguments.model_out == None: models = load_models( arguments.model_in ) testset = data_collection( arguments.header, arguments.dataset ) predictor = prepare_testing( models, testset ) do_testing( predictor ) except: exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() logging.error( traceback.format_exc() ) if __name__ == '__main__': sys.exit( main() )
gpl-3.0
nyee/RMG-Py
rmgpy/data/kinetics/groups.py
7
26065
#!/usr/bin/python # -*- coding: utf-8 -*- ################################################################################ # # RMG - Reaction Mechanism Generator # # Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the # RMG Team (rmg_dev@mit.edu) # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ################################################################################ """ This module contains functionality for working with kinetics family functional groups, including support for using group additivity to estimate rate coefficients. """ import logging import math import numpy from copy import deepcopy from rmgpy.data.base import Database, DatabaseError, Entry, Group, LogicNode, getAllCombinations, makeLogicNode from rmgpy.kinetics import Arrhenius, ArrheniusEP, KineticsData from rmgpy.species import Species from rmgpy.quantity import constants from .common import KineticsError, UndeterminableKineticsError ################################################################################ class KineticsGroups(Database): """ A class for working with an RMG kinetics family group additivity values. """ def __init__(self, entries=None, top=None, label='', name='', shortDesc='', longDesc='', forwardTemplate=None, forwardRecipe=None, reverseTemplate=None, reverseRecipe=None, forbidden=None ): Database.__init__(self, entries, top, label, name, shortDesc, longDesc) self.numReactants = 0 def __repr__(self): return '<KineticsGroups "{0}">'.format(self.label) def loadEntry(self, index, label, group, kinetics, reference=None, referenceType='', shortDesc='', longDesc=''): if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{': item = makeLogicNode(group) else: item = Group().fromAdjacencyList(group) if label in self.entries: raise DatabaseError("Duplicate group name {label} found in kinetics groups for {family} family.".format(label=label,family=self.label)) self.entries[label] = Entry( index = index, label = label, item = item, data = kinetics, reference = reference, referenceType = referenceType, shortDesc = shortDesc, longDesc = longDesc.strip(), ) def getReactionTemplate(self, reaction): """ For a given `reaction` with properly-labeled :class:`Molecule` objects as the reactants, determine the most specific nodes in the tree that describe the reaction. """ # Get forward reaction template and remove any duplicates forwardTemplate = self.top[:] temporary = [] symmetricTree = False for entry in forwardTemplate: if entry not in temporary: temporary.append(entry) else: # duplicate node found at top of tree # eg. R_recombination: ['Y_rad', 'Y_rad'] assert len(forwardTemplate)==2 , 'Can currently only do symmetric trees with nothing else in them' symmetricTree = True forwardTemplate = temporary # Descend reactant trees as far as possible template = [] for entry in forwardTemplate: # entry is a top-level node that should be matched group = entry.item # Identify the atom labels in a group if it is not a logical node atomList = [] if not isinstance(entry.item, LogicNode): atomList = group.getLabeledAtoms() for reactant in reaction.reactants: if isinstance(reactant, Species): reactant = reactant.molecule[0] # Match labeled atoms # Check that this reactant has each of the atom labels in this group. If it is a LogicNode, the atomList is empty and # it will proceed directly to the descendTree step. if not all([reactant.containsLabeledAtom(label) for label in atomList]): continue # don't try to match this structure - the atoms aren't there! # Match structures atoms = reactant.getLabeledAtoms() # Descend the tree, making sure to match atomlabels exactly using strict = True matched_node = self.descendTree(reactant, atoms, root=entry, strict=True) if matched_node is not None: template.append(matched_node) #else: # logging.warning("Couldn't find match for {0} in {1}".format(entry,atomList)) # logging.warning(reactant.toAdjacencyList()) # Get fresh templates (with duplicate nodes back in) forwardTemplate = self.top[:] if self.label.lower().startswith('r_recombination'): forwardTemplate.append(forwardTemplate[0]) # Check that we were able to match the template. # template is a list of the actual matched nodes # forwardTemplate is a list of the top level nodes that should be matched if len(template) != len(forwardTemplate): # print 'len(template):', len(template) # print 'len(forwardTemplate):', len(forwardTemplate) msg = 'Unable to find matching template for reaction {0} in reaction family {1}.'.format(str(reaction), str(self)) msg += 'Trying to match {0} but matched {1}'.format(str(forwardTemplate),str(template)) # print 'reactants' # for reactant in reaction.reactants: # print reactant.toAdjacencyList() + '\n' # print 'products' # for product in reaction.products: # print product.toAdjacencyList() + '\n' raise UndeterminableKineticsError(reaction, message=msg) return template def estimateKineticsUsingGroupAdditivity(self, template, referenceKinetics, degeneracy=1): """ Determine the appropriate kinetics for a reaction with the given `template` using group additivity. """ # Start with the generic kinetics of the top-level nodes # Make a copy so we don't modify the original kinetics = deepcopy(referenceKinetics) # Now add in more specific corrections if possible for node in template: entry = node comment_line = "Matched node " while entry.data is None and entry not in self.top: # Keep climbing tree until you find a (non-top) node with data. comment_line += "{0} >> ".format(entry.label) entry = entry.parent if entry.data is not None and entry not in self.top: kinetics = self.__multiplyKineticsData(kinetics, entry.data) comment_line += "{0} ({1})".format(entry.label, entry.longDesc.split('\n')[0]) elif entry in self.top: comment_line += "{0} (Top node)".format(entry.label) kinetics.comment += comment_line + '\n' # Also include reaction-path degeneracy if isinstance(kinetics, KineticsData): kinetics.kdata.value_si *= degeneracy elif isinstance(kinetics, Arrhenius): kinetics.A.value_si *= degeneracy elif kinetics is not None: raise KineticsError('Unexpected kinetics type "{0}" encountered while generating kinetics from group values.'.format(kinetics.__class__)) kinetics.comment += "Multiplied by reaction path degeneracy {0}".format(degeneracy) return kinetics def __multiplyKineticsData(self, kinetics1, kinetics2): """ Multiply two kinetics objects `kinetics1` and `kinetics2` of the same class together, returning their product as a new kinetics object of that class. Currently this only works for :class:`KineticsData` or :class:`Arrhenius` objects. """ if isinstance(kinetics1, KineticsData) and isinstance(kinetics2, KineticsData): if len(kinetics1.Tdata.value_si) != len(kinetics2.Tdata.value_si) or any([T1 != T2 for T1, T2 in zip(kinetics1.Tdata.value_si, kinetics2.Tdata.value_si)]): raise KineticsError('Cannot add these KineticsData objects due to their having different temperature points.') kinetics = KineticsData( Tdata = (kinetics1.Tdata.value, kinetics2.Tdata.units), kdata = (kinetics1.kdata.value * kinetics2.kdata.value, kinetics1.kdata.units), ) elif isinstance(kinetics1, Arrhenius) and isinstance(kinetics2, Arrhenius): assert kinetics1.A.units == kinetics2.A.units assert kinetics1.Ea.units == kinetics2.Ea.units assert kinetics1.T0.units == kinetics2.T0.units assert kinetics1.T0.value == kinetics2.T0.value kinetics = Arrhenius( A = (kinetics1.A.value * kinetics2.A.value, kinetics1.A.units), n = (kinetics1.n.value + kinetics2.n.value, kinetics1.n.units), Ea = (kinetics1.Ea.value + kinetics2.Ea.value, kinetics1.Ea.units), T0 = (kinetics1.T0.value, kinetics1.T0.units), ) else: raise KineticsError('Unable to multiply kinetics types "{0}" and "{1}".'.format(kinetics1.__class__, kinetics2.__class__)) if kinetics1.Tmin is not None and kinetics2.Tmin is not None: kinetics.Tmin = kinetics1.Tmin if kinetics1.Tmin.value_si > kinetics2.Tmin.value_si else kinetics2.Tmin elif kinetics1.Tmin is not None and kinetics2.Tmin is None: kinetics.Tmin = kinetics1.Tmin elif kinetics1.Tmin is None and kinetics2.Tmin is not None: kinetics.Tmin = kinetics2.Tmin if kinetics1.Tmax is not None and kinetics2.Tmax is not None: kinetics.Tmax = kinetics1.Tmax if kinetics1.Tmax.value_si < kinetics2.Tmax.value_si else kinetics2.Tmax elif kinetics1.Tmax is not None and kinetics2.Tmax is None: kinetics.Tmax = kinetics1.Tmax elif kinetics1.Tmax is None and kinetics2.Tmax is not None: kinetics.Tmax = kinetics2.Tmax if kinetics1.Pmin is not None and kinetics2.Pmin is not None: kinetics.Pmin = kinetics1.Pmin if kinetics1.Pmin.value_si > kinetics2.Pmin.value_si else kinetics2.Pmin elif kinetics1.Pmin is not None and kinetics2.Pmin is None: kinetics.Pmin = kinetics1.Pmin elif kinetics1.Pmin is None and kinetics2.Pmin is not None: kinetics.Pmin = kinetics2.Pmin if kinetics1.Pmax is not None and kinetics2.Pmax is not None: kinetics.Pmax = kinetics1.Pmax if kinetics1.Pmax.value_si < kinetics2.Pmax.value_si else kinetics2.Pmax elif kinetics1.Pmax is not None and kinetics2.Pmax is None: kinetics.Pmax = kinetics1.Pmax elif kinetics1.Pmax is None and kinetics2.Pmax is not None: kinetics.Pmax = kinetics2.Pmax if kinetics1.comment == '': kinetics.comment = kinetics2.comment elif kinetics2.comment == '': kinetics.comment = kinetics1.comment else: kinetics.comment = kinetics1.comment + ' + ' + kinetics2.comment return kinetics def generateGroupAdditivityValues(self, trainingSet, kunits, method='Arrhenius'): """ Generate the group additivity values using the given `trainingSet`, a list of 2-tuples of the form ``(template, kinetics)``. You must also specify the `kunits` for the family and the `method` to use when generating the group values. Returns ``True`` if the group values have changed significantly since the last time they were fitted, or ``False`` otherwise. """ # keep track of previous values so we can detect if they change old_entries = dict() for label,entry in self.entries.items(): if entry.data is not None: old_entries[label] = entry.data # Determine a complete list of the entries in the database, sorted as in the tree groupEntries = self.top[:] for entry in self.top: groupEntries.extend(self.descendants(entry)) # Determine a unique list of the groups we will be able to fit parameters for groupList = [] for template, kinetics in trainingSet: for group in template: if group not in self.top: groupList.append(group) groupList.extend(self.ancestors(group)[:-1]) groupList = list(set(groupList)) groupList.sort(key=lambda x: x.index) if method == 'KineticsData': # Fit a discrete set of k(T) data points by training against k(T) data Tdata = numpy.array([300,400,500,600,800,1000,1500,2000]) # Initialize dictionaries of fitted group values and uncertainties groupValues = {}; groupUncertainties = {}; groupCounts = {}; groupComments = {} for entry in groupEntries: groupValues[entry] = [] groupUncertainties[entry] = [] groupCounts[entry] = [] groupComments[entry] = set() # Generate least-squares matrix and vector A = []; b = [] kdata = [] for template, kinetics in trainingSet: if isinstance(kinetics, (Arrhenius, KineticsData)): kd = [kinetics.getRateCoefficient(T) for T in Tdata] elif isinstance(kinetics, ArrheniusEP): kd = [kinetics.getRateCoefficient(T, 0) for T in Tdata] else: raise Exception('Unexpected kinetics model of type {0} for template {1}.'.format(kinetics.__class__, template)) kdata.append(kd) # Create every combination of each group and its ancestors with each other combinations = [] for group in template: groups = [group]; groups.extend(self.ancestors(group)) combinations.append(groups) combinations = getAllCombinations(combinations) # Add a row to the matrix for each combination for groups in combinations: Arow = [1 if group in groups else 0 for group in groupList] Arow.append(1) brow = [math.log10(k) for k in kd] A.append(Arow); b.append(brow) for group in groups: groupComments[group].add("{0!s}".format(template)) if len(A) == 0: logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label)) return A = numpy.array(A) b = numpy.array(b) kdata = numpy.array(kdata) x, residues, rank, s = numpy.linalg.lstsq(A, b) for t, T in enumerate(Tdata): # Determine error in each group (on log scale) stdev = numpy.zeros(len(groupList)+1, numpy.float64) count = numpy.zeros(len(groupList)+1, numpy.int) for index in range(len(trainingSet)): template, kinetics = trainingSet[index] kd = math.log10(kdata[index,t]) km = x[-1,t] + sum([x[groupList.index(group),t] for group in template if group in groupList]) variance = (km - kd)**2 for group in template: groups = [group]; groups.extend(self.ancestors(group)) for g in groups: if g not in self.top: ind = groupList.index(g) stdev[ind] += variance count[ind] += 1 stdev[-1] += variance count[-1] += 1 stdev = numpy.sqrt(stdev / (count - 1)) import scipy.stats ci = scipy.stats.t.ppf(0.975, count - 1) * stdev # Update dictionaries of fitted group values and uncertainties for entry in groupEntries: if entry == self.top[0]: groupValues[entry].append(10**x[-1,t]) groupUncertainties[entry].append(10**ci[-1]) groupCounts[entry].append(count[-1]) elif entry in groupList: index = groupList.index(entry) groupValues[entry].append(10**x[index,t]) groupUncertainties[entry].append(10**ci[index]) groupCounts[entry].append(count[index]) else: groupValues[entry] = None groupUncertainties[entry] = None groupCounts[entry] = None # Store the fitted group values and uncertainties on the associated entries for entry in groupEntries: if groupValues[entry] is not None: entry.data = KineticsData(Tdata=(Tdata,"K"), kdata=(groupValues[entry],kunits)) if not any(numpy.isnan(numpy.array(groupUncertainties[entry]))): entry.data.kdata.uncertainties = numpy.array(groupUncertainties[entry]) entry.data.kdata.uncertaintyType = '*|/' entry.shortDesc = "Group additive kinetics." entry.longDesc = "Fitted to {0} rates.\n".format(groupCounts[entry]) entry.longDesc += "\n".join(groupComments[entry]) else: entry.data = None elif method == 'Arrhenius': # Fit Arrhenius parameters (A, n, Ea) by training against k(T) data Tdata = numpy.array([300,400,500,600,800,1000,1500,2000]) logTdata = numpy.log(Tdata) Tinvdata = 1000. / (constants.R * Tdata) A = []; b = [] kdata = [] for template, kinetics in trainingSet: if isinstance(kinetics, (Arrhenius, KineticsData)): kd = [kinetics.getRateCoefficient(T) for T in Tdata] elif isinstance(kinetics, ArrheniusEP): kd = [kinetics.getRateCoefficient(T, 0) for T in Tdata] else: raise Exception('Unexpected kinetics model of type {0} for template {1}.'.format(kinetics.__class__, template)) kdata.append(kd) # Create every combination of each group and its ancestors with each other combinations = [] for group in template: groups = [group]; groups.extend(self.ancestors(group)) combinations.append(groups) combinations = getAllCombinations(combinations) # Add a row to the matrix for each combination at each temperature for t, T in enumerate(Tdata): logT = logTdata[t] Tinv = Tinvdata[t] for groups in combinations: Arow = [] for group in groupList: if group in groups: Arow.extend([1,logT,-Tinv]) else: Arow.extend([0,0,0]) Arow.extend([1,logT,-Tinv]) brow = math.log(kd[t]) A.append(Arow); b.append(brow) if len(A) == 0: logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label)) return A = numpy.array(A) b = numpy.array(b) kdata = numpy.array(kdata) x, residues, rank, s = numpy.linalg.lstsq(A, b) # Store the results self.top[0].data = Arrhenius( A = (math.exp(x[-3]),kunits), n = x[-2], Ea = (x[-1],"kJ/mol"), T0 = (1,"K"), ) for i, group in enumerate(groupList): group.data = Arrhenius( A = (math.exp(x[3*i]),kunits), n = x[3*i+1], Ea = (x[3*i+2],"kJ/mol"), T0 = (1,"K"), ) elif method == 'Arrhenius2': # Fit Arrhenius parameters (A, n, Ea) by training against (A, n, Ea) values A = []; b = [] for template, kinetics in trainingSet: # Create every combination of each group and its ancestors with each other combinations = [] for group in template: groups = [group]; groups.extend(self.ancestors(group)) combinations.append(groups) combinations = getAllCombinations(combinations) # Add a row to the matrix for each parameter if isinstance(kinetics, Arrhenius) or (isinstance(kinetics, ArrheniusEP) and kinetics.alpha.value_si == 0): for groups in combinations: Arow = [] for group in groupList: if group in groups: Arow.append(1) else: Arow.append(0) Arow.append(1) Ea = kinetics.E0.value_si if isinstance(kinetics, ArrheniusEP) else kinetics.Ea.value_si brow = [math.log(kinetics.A.value_si), kinetics.n.value_si, Ea / 1000.] A.append(Arow); b.append(brow) if len(A) == 0: logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label)) return A = numpy.array(A) b = numpy.array(b) x, residues, rank, s = numpy.linalg.lstsq(A, b) # Store the results self.top[0].data = Arrhenius( A = (math.exp(x[-1,0]),kunits), n = x[-1,1], Ea = (x[-1,2],"kJ/mol"), T0 = (1,"K"), ) for i, group in enumerate(groupList): group.data = Arrhenius( A = (math.exp(x[i,0]),kunits), n = x[i,1], Ea = (x[i,2],"kJ/mol"), T0 = (1,"K"), ) # Add a note to the history of each changed item indicating that we've generated new group values changed = False for label, entry in self.entries.items(): if entry.data is not None and old_entries.has_key(label): if (isinstance(entry.data, KineticsData) and isinstance(old_entries[label], KineticsData) and len(entry.data.kdata.value_si) == len(old_entries[label].kdata.value_si) and all(abs(entry.data.kdata.value_si / old_entries[label].kdata.value_si - 1) < 0.01)): #print "New group values within 1% of old." pass elif (isinstance(entry.data, Arrhenius) and isinstance(old_entries[label], Arrhenius) and abs(entry.data.A.value_si / old_entries[label].A.value_si - 1) < 0.01 and abs(entry.data.n.value_si / old_entries[label].n.value_si - 1) < 0.01 and abs(entry.data.Ea.value_si / old_entries[label].Ea.value_si - 1) < 0.01 and abs(entry.data.T0.value_si / old_entries[label].T0.value_si - 1) < 0.01): #print "New group values within 1% of old." pass else: changed = True break else: changed = True break return changed
mit
Denisolt/IEEE-NYIT-MA
local/lib/python2.7/site-packages/django/contrib/gis/gdal/field.py
355
6739
from ctypes import byref, c_int from datetime import date, datetime, time from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import ds as capi from django.utils.encoding import force_text # For more information, see the OGR C API source code: # http://www.gdal.org/ogr/ogr__api_8h.html # # The OGR_Fld_* routines are relevant here. class Field(GDALBase): """ This class wraps an OGR Field, and needs to be instantiated from a Feature object. """ def __init__(self, feat, index): """ Initializes on the feature object and the integer index of the field within the feature. """ # Setting the feature pointer and index. self._feat = feat self._index = index # Getting the pointer for this field. fld_ptr = capi.get_feat_field_defn(feat.ptr, index) if not fld_ptr: raise GDALException('Cannot create OGR Field, invalid pointer given.') self.ptr = fld_ptr # Setting the class depending upon the OGR Field Type (OFT) self.__class__ = OGRFieldTypes[self.type] # OFTReal with no precision should be an OFTInteger. if isinstance(self, OFTReal) and self.precision == 0: self.__class__ = OFTInteger self._double = True def __str__(self): "Returns the string representation of the Field." return str(self.value).strip() # #### Field Methods #### def as_double(self): "Retrieves the Field's value as a double (float)." return capi.get_field_as_double(self._feat.ptr, self._index) def as_int(self, is_64=False): "Retrieves the Field's value as an integer." if is_64: return capi.get_field_as_integer64(self._feat.ptr, self._index) else: return capi.get_field_as_integer(self._feat.ptr, self._index) def as_string(self): "Retrieves the Field's value as a string." string = capi.get_field_as_string(self._feat.ptr, self._index) return force_text(string, encoding=self._feat.encoding, strings_only=True) def as_datetime(self): "Retrieves the Field's value as a tuple of date & time components." yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)] status = capi.get_field_as_datetime( self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd), byref(hh), byref(mn), byref(ss), byref(tz)) if status: return (yy, mm, dd, hh, mn, ss, tz) else: raise GDALException('Unable to retrieve date & time information from the field.') # #### Field Properties #### @property def name(self): "Returns the name of this Field." name = capi.get_field_name(self.ptr) return force_text(name, encoding=self._feat.encoding, strings_only=True) @property def precision(self): "Returns the precision of this Field." return capi.get_field_precision(self.ptr) @property def type(self): "Returns the OGR type of this Field." return capi.get_field_type(self.ptr) @property def type_name(self): "Return the OGR field type name for this Field." return capi.get_field_type_name(self.type) @property def value(self): "Returns the value of this Field." # Default is to get the field as a string. return self.as_string() @property def width(self): "Returns the width of this Field." return capi.get_field_width(self.ptr) # ### The Field sub-classes for each OGR Field type. ### class OFTInteger(Field): _double = False _bit64 = False @property def value(self): "Returns an integer contained in this field." if self._double: # If this is really from an OFTReal field with no precision, # read as a double and cast as Python int (to prevent overflow). return int(self.as_double()) else: return self.as_int(self._bit64) @property def type(self): """ GDAL uses OFTReals to represent OFTIntegers in created shapefiles -- forcing the type here since the underlying field type may actually be OFTReal. """ return 0 class OFTReal(Field): @property def value(self): "Returns a float contained in this field." return self.as_double() # String & Binary fields, just subclasses class OFTString(Field): pass class OFTWideString(Field): pass class OFTBinary(Field): pass # OFTDate, OFTTime, OFTDateTime fields. class OFTDate(Field): @property def value(self): "Returns a Python `date` object for the OFTDate field." try: yy, mm, dd, hh, mn, ss, tz = self.as_datetime() return date(yy.value, mm.value, dd.value) except (ValueError, GDALException): return None class OFTDateTime(Field): @property def value(self): "Returns a Python `datetime` object for this OFTDateTime field." # TODO: Adapt timezone information. # See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html # The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous), # 100=GMT, 104=GMT+1, 80=GMT-5, etc. try: yy, mm, dd, hh, mn, ss, tz = self.as_datetime() return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value) except (ValueError, GDALException): return None class OFTTime(Field): @property def value(self): "Returns a Python `time` object for this OFTTime field." try: yy, mm, dd, hh, mn, ss, tz = self.as_datetime() return time(hh.value, mn.value, ss.value) except (ValueError, GDALException): return None class OFTInteger64(OFTInteger): _bit64 = True # List fields are also just subclasses class OFTIntegerList(Field): pass class OFTRealList(Field): pass class OFTStringList(Field): pass class OFTWideStringList(Field): pass class OFTInteger64List(Field): pass # Class mapping dictionary for OFT Types and reverse mapping. OGRFieldTypes = { 0: OFTInteger, 1: OFTIntegerList, 2: OFTReal, 3: OFTRealList, 4: OFTString, 5: OFTStringList, 6: OFTWideString, 7: OFTWideStringList, 8: OFTBinary, 9: OFTDate, 10: OFTTime, 11: OFTDateTime, # New 64-bit integer types in GDAL 2 12: OFTInteger64, 13: OFTInteger64List, } ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
gpl-3.0
jimi-c/ansible
lib/ansible/modules/cloud/cloudstack/cs_ip_address.py
22
8009
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2015, Darren Worrall <darren@iweb.co.uk> # Copyright (c) 2015, René Moser <mail@renemoser.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_ip_address short_description: Manages public IP address associations on Apache CloudStack based clouds. description: - Acquires and associates a public IP to an account or project. - Due to API limitations this is not an idempotent call, so be sure to only conditionally call this when C(state=present). - Tagging the IP address can also make the call idempotent. version_added: '2.0' author: - "Darren Worrall (@dazworrall)" - "René Moser (@resmo)" options: ip_address: description: - Public IP address. - Required if C(state=absent) and C(tags) is not set domain: description: - Domain the IP address is related to. network: description: - Network the IP address is related to. vpc: description: - VPC the IP address is related to. version_added: "2.2" account: description: - Account the IP address is related to. project: description: - Name of the project the IP address is related to. zone: description: - Name of the zone in which the IP address is in. - If not set, default zone is used. state: description: - State of the IP address. default: present choices: [ present, absent ] tags: description: - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). - Tags can be used as an unique identifier for the IP Addresses. - In this case, at least one of them must be unique to ensure idempontency. aliases: [ 'tag' ] version_added: "2.6" poll_async: description: - Poll async jobs until job has finished. type: bool default: 'yes' extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' - name: Associate an IP address conditonally local_action: module: cs_ip_address network: My Network register: ip_address when: instance.public_ip is undefined - name: Disassociate an IP address local_action: module: cs_ip_address ip_address: 1.2.3.4 state: absent - name: Associate an IP address with tags local_action: module: cs_ip_address network: My Network tags: - key: myCustomID - value: 5510c31a-416e-11e8-9013-02000a6b00bf register: ip_address - name: Disassociate an IP address with tags local_action: module: cs_ip_address state: absent tags: - key: myCustomID - value: 5510c31a-416e-11e8-9013-02000a6b00bf ''' RETURN = ''' --- id: description: UUID of the Public IP address. returned: success type: string sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f ip_address: description: Public IP address. returned: success type: string sample: 1.2.3.4 zone: description: Name of zone the IP address is related to. returned: success type: string sample: ch-gva-2 project: description: Name of project the IP address is related to. returned: success type: string sample: Production account: description: Account the IP address is related to. returned: success type: string sample: example account domain: description: Domain the IP address is related to. returned: success type: string sample: example domain tags: description: List of resource tags associated with the IP address. returned: success type: dict sample: '[ { "key": "myCustomID", "value": "5510c31a-416e-11e8-9013-02000a6b00bf" } ]' version_added: "2.6" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.cloudstack import ( AnsibleCloudStack, cs_argument_spec, cs_required_together, ) class AnsibleCloudStackIPAddress(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackIPAddress, self).__init__(module) self.returns = { 'ipaddress': 'ip_address', } def get_ip_address(self, key=None): if self.ip_address: return self._get_by_key(key, self.ip_address) args = { 'ipaddress': self.module.params.get('ip_address'), 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'vpcid': self.get_vpc(key='id'), } ip_addresses = self.cs.listPublicIpAddresses(**args) if ip_addresses: tags = self.module.params.get('tags') for ip_addr in ip_addresses['publicipaddress']: if ip_addr['ipaddress'] == args['ipaddress'] != '': self.ip_address = ip_addresses['publicipaddress'][0] elif tags: if sorted([tag for tag in tags if tag in ip_addr['tags']]) == sorted(tags): self.ip_address = ip_addr return self._get_by_key(key, self.ip_address) def present_ip_address(self): ip_address = self.get_ip_address() if not ip_address: ip_address = self.associate_ip_address(ip_address) if ip_address: ip_address = self.ensure_tags(resource=ip_address, resource_type='publicipaddress') return ip_address def associate_ip_address(self, ip_address): self.result['changed'] = True args = { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'networkid': self.get_network(key='id'), 'zoneid': self.get_zone(key='id'), 'vpcid': self.get_vpc(key='id'), } ip_address = None if not self.module.check_mode: res = self.cs.associateIpAddress(**args) poll_async = self.module.params.get('poll_async') if poll_async: ip_address = self.poll_job(res, 'ipaddress') return ip_address def disassociate_ip_address(self): ip_address = self.get_ip_address() if not ip_address: return None if ip_address['isstaticnat']: self.module.fail_json(msg="IP address is allocated via static nat") self.result['changed'] = True if not self.module.check_mode: self.module.params['tags'] = [] ip_address = self.ensure_tags(resource=ip_address, resource_type='publicipaddress') res = self.cs.disassociateIpAddress(id=ip_address['id']) poll_async = self.module.params.get('poll_async') if poll_async: self.poll_job(res, 'ipaddress') return ip_address def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( ip_address=dict(required=False), state=dict(choices=['present', 'absent'], default='present'), vpc=dict(), network=dict(), zone=dict(), domain=dict(), account=dict(), project=dict(), tags=dict(type='list', aliases=['tag']), poll_async=dict(type='bool', default=True), )) module = AnsibleModule( argument_spec=argument_spec, required_together=cs_required_together(), required_if=[ ('state', 'absent', ['ip_address', 'tags'], True), ], supports_check_mode=True ) acs_ip_address = AnsibleCloudStackIPAddress(module) state = module.params.get('state') if state in ['absent']: ip_address = acs_ip_address.disassociate_ip_address() else: ip_address = acs_ip_address.present_ip_address() result = acs_ip_address.get_result(ip_address) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
strogo/turbion
turbion/bits/utils/tests/merging.py
1
2375
from datetime import date from django.db import models from django.test import TestCase from django.contrib.auth.models import User from turbion.bits.utils import merging class MyProfile(models.Model): user_ptr = models.ForeignKey(User, unique=True) nickname = models.CharField(max_length=100) www = models.URLField() birth = models.DateField() class Meta: app_label="turbion" class OtherProfile(models.Model): user = models.ForeignKey(User, unique=True) nickname = models.CharField(max_length=100) website = models.URLField() dob = models.DateField() class Meta: app_label="turbion" class MyProfileLayer(merging.ModelLayer): model = MyProfile fields = ["nickname"] aliases = { "site": "www", "day_of_birth": "birth" } key = 'user_ptr' class OtherProfileLayer(merging.ModelLayer): model = OtherProfile fields = ["nickname"] aliases = { "site": "website", "day_of_birth": "dob" } key = 'user' create = True merging.track([MyProfileLayer, OtherProfileLayer]) class Merge(TestCase): def setUp(self): self.user = User.objects.create_user( "test", "foobar@foo.bar" ) self.my_profile = MyProfile.objects.create( user_ptr=self.user, nickname="test_foo", www="http://foo.bar", birth=date.today(), ) def _test_objects(self, other): my_profile = MyProfile.objects.get(pk=self.my_profile.pk) self.assertEqual(other.nickname, my_profile.nickname) self.assertEqual(other.website, my_profile.www) self.assertEqual(other.dob, my_profile.birth) def test_other_profile_existance(self): self.assertEqual( OtherProfile.objects.filter(user=self.user).count(), 1 ) other = OtherProfile.objects.get(user=self.user) self._test_objects(other) def test_other_change(self): other = OtherProfile.objects.get(user=self.user) other.website = "http://bar.foo" other.save() self._test_objects(other) def test_my_change(self): self.my_profile.website = "http://bar.foo" self.my_profile.save() other = OtherProfile.objects.get(user=self.user) self._test_objects(other)
bsd-3-clause
BrainDamage/Flexget
flexget/plugins/filter/seen_info_hash.py
8
1940
from __future__ import unicode_literals, division, absolute_import from flexget import plugin from flexget.event import event from flexget.plugins.filter.seen import FilterSeen class FilterSeenInfoHash(FilterSeen): """Prevents the same torrent from being downloaded twice by remembering the infohash of all downloaded torrents.""" def __init__(self): # remember and filter by these fields self.fields = ['torrent_info_hash'] self.keyword = 'seen_info_hash' def validator(self): from flexget import validator return validator.factory('boolean') @plugin.priority(180) def on_task_filter(self, task, config): # Return if we are disabled. if config is False: return # First make sure all the torrent_info_hash fields are in upper case for entry in task.entries: if isinstance(entry.get('torrent_info_hash'), basestring): entry['torrent_info_hash'] = entry['torrent_info_hash'].upper() FilterSeen.on_task_filter(self, task, config, remember_rejected=True) def on_task_modify(self, task, config): # Return if we are disabled. if config is False: return # Run the filter again after the torrent plugin has populated the infohash self.on_task_filter(task, config) # Make sure no duplicates were accepted this run accepted_infohashes = set() for entry in task.accepted: if 'torrent_info_hash' in entry: infohash = entry['torrent_info_hash'] if infohash in accepted_infohashes: entry.reject('Already accepted torrent with this infohash once for this task') else: accepted_infohashes.add(infohash) @event('plugin.register') def register_plugin(): plugin.register(FilterSeenInfoHash, 'seen_info_hash', builtin=True, api_ver=2)
mit
salguarnieri/intellij-community
python/lib/Lib/site-packages/django/contrib/gis/management/commands/inspectdb.py
311
1553
from optparse import make_option from django.core.management.base import CommandError from django.core.management.commands.inspectdb import Command as InspectDBCommand class Command(InspectDBCommand): db_module = 'django.contrib.gis.db' gis_tables = {} def get_field_type(self, connection, table_name, row): field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row) if field_type == 'GeometryField': geo_col = row[0] # Getting a more specific field type and any additional parameters # from the `get_geometry_type` routine for the spatial backend. field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col) field_params.update(geo_params) # Adding the table name and column to the `gis_tables` dictionary, this # allows us to track which tables need a GeoManager. if table_name in self.gis_tables: self.gis_tables[table_name].append(geo_col) else: self.gis_tables[table_name] = [geo_col] return field_type, field_params, field_notes def get_meta(self, table_name): meta_lines = super(Command, self).get_meta(table_name) if table_name in self.gis_tables: # If the table is a geographic one, then we need make # GeoManager the default manager for the model. meta_lines.insert(0, ' objects = models.GeoManager()') return meta_lines
apache-2.0
ronakkhunt/kuma
vendor/packages/translate/lang/km.py
29
1949
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2007 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """This module represents the Khmer language. .. seealso:: http://en.wikipedia.org/wiki/Khmer_language """ import re from translate.lang import common class km(common.Common): """This class represents Khmer.""" khmerpunc = u"។៕៖៘" """These marks are only used for Khmer.""" punctuation = u"".join([common.Common.commonpunc, common.Common.quotes, common.Common.miscpunc, khmerpunc]) sentenceend = u"!?…។៕៘" sentencere = re.compile(r"""(?s) #make . also match newlines .*? #anything, but match non-greedy [%s] #the puntuation for sentence ending \s+ #the spacing after the puntuation (?=[^a-z\d])#lookahead that next part starts with caps """ % sentenceend, re.VERBOSE) #\u00a0 is non-breaking space puncdict = { u".": u"\u00a0។", u":": u"\u00a0៖", u"!": u"\u00a0!", u"?": u"\u00a0?", } ignoretests = ["startcaps", "simplecaps"] mozilla_nplurals = 2 mozilla_pluralequation = "n!=1 ? 1 : 0"
mpl-2.0
nsat/gnuradio
grc/core/Block.py
3
30838
""" Copyright 2008-2015 Free Software Foundation, Inc. This file is part of GNU Radio GNU Radio Companion is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GNU Radio Companion is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ import collections import itertools from Cheetah.Template import Template from .utils import epy_block_io, odict from . Constants import ( BLOCK_FLAG_NEED_QT_GUI, BLOCK_FLAG_NEED_WX_GUI, ADVANCED_PARAM_TAB, DEFAULT_PARAM_TAB, BLOCK_FLAG_THROTTLE, BLOCK_FLAG_DISABLE_BYPASS, BLOCK_FLAG_DEPRECATED, BLOCK_ENABLED, BLOCK_BYPASSED, BLOCK_DISABLED ) from . Element import Element def _get_keys(lst): return [elem.get_key() for elem in lst] def _get_elem(lst, key): try: return lst[_get_keys(lst).index(key)] except ValueError: raise ValueError('Key "{}" not found in {}.'.format(key, _get_keys(lst))) class Block(Element): is_block = True def __init__(self, flow_graph, n): """ Make a new block from nested data. Args: flow: graph the parent element n: the nested odict Returns: block a new block """ # Grab the data self._doc = (n.find('doc') or '').strip('\n').replace('\\\n', '') self._imports = map(lambda i: i.strip(), n.findall('import')) self._make = n.find('make') self._var_make = n.find('var_make') self._checks = n.findall('check') self._callbacks = n.findall('callback') self._bus_structure_source = n.find('bus_structure_source') or '' self._bus_structure_sink = n.find('bus_structure_sink') or '' self.port_counters = [itertools.count(), itertools.count()] # Build the block Element.__init__(self, flow_graph) # Grab the data params = n.findall('param') sources = n.findall('source') sinks = n.findall('sink') self._name = n.find('name') self._key = n.find('key') category = (n.find('category') or '').split('/') self.category = [cat.strip() for cat in category if cat.strip()] self._flags = n.find('flags') or '' # Backwards compatibility if n.find('throttle') and BLOCK_FLAG_THROTTLE not in self._flags: self._flags += BLOCK_FLAG_THROTTLE self._grc_source = n.find('grc_source') or '' self._block_wrapper_path = n.find('block_wrapper_path') self._bussify_sink = n.find('bus_sink') self._bussify_source = n.find('bus_source') self._var_value = n.find('var_value') or '$value' # Get list of param tabs n_tabs = n.find('param_tab_order') or None self._param_tab_labels = n_tabs.findall('tab') if n_tabs is not None else [DEFAULT_PARAM_TAB] # Create the param objects self._params = list() # Add the id param self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({ 'name': 'ID', 'key': 'id', 'type': 'id', }) )) self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({ 'name': 'Enabled', 'key': '_enabled', 'type': 'raw', 'value': 'True', 'hide': 'all', }) )) for param in itertools.imap(lambda n: self.get_parent().get_parent().Param(block=self, n=n), params): key = param.get_key() # Test against repeated keys if key in self.get_param_keys(): raise Exception('Key "{}" already exists in params'.format(key)) # Store the param self.get_params().append(param) # Create the source objects self._sources = list() for source in map(lambda n: self.get_parent().get_parent().Port(block=self, n=n, dir='source'), sources): key = source.get_key() # Test against repeated keys if key in self.get_source_keys(): raise Exception('Key "{}" already exists in sources'.format(key)) # Store the port self.get_sources().append(source) self.back_ofthe_bus(self.get_sources()) # Create the sink objects self._sinks = list() for sink in map(lambda n: self.get_parent().get_parent().Port(block=self, n=n, dir='sink'), sinks): key = sink.get_key() # Test against repeated keys if key in self.get_sink_keys(): raise Exception('Key "{}" already exists in sinks'.format(key)) # Store the port self.get_sinks().append(sink) self.back_ofthe_bus(self.get_sinks()) self.current_bus_structure = {'source': '', 'sink': ''} # Virtual source/sink and pad source/sink blocks are # indistinguishable from normal GR blocks. Make explicit # checks for them here since they have no work function or # buffers to manage. self.is_virtual_or_pad = self._key in ( "virtual_source", "virtual_sink", "pad_source", "pad_sink") self.is_variable = self._key.startswith('variable') self.is_import = (self._key == 'import') # Disable blocks that are virtual/pads or variables if self.is_virtual_or_pad or self.is_variable: self._flags += BLOCK_FLAG_DISABLE_BYPASS if not (self.is_virtual_or_pad or self.is_variable or self._key == 'options'): self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({'name': 'Block Alias', 'key': 'alias', 'type': 'string', 'hide': 'part', 'tab': ADVANCED_PARAM_TAB }) )) if (len(sources) or len(sinks)) and not self.is_virtual_or_pad: self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({'name': 'Core Affinity', 'key': 'affinity', 'type': 'int_vector', 'hide': 'part', 'tab': ADVANCED_PARAM_TAB }) )) if len(sources) and not self.is_virtual_or_pad: self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({'name': 'Min Output Buffer', 'key': 'minoutbuf', 'type': 'int', 'hide': 'part', 'value': '0', 'tab': ADVANCED_PARAM_TAB }) )) self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({'name': 'Max Output Buffer', 'key': 'maxoutbuf', 'type': 'int', 'hide': 'part', 'value': '0', 'tab': ADVANCED_PARAM_TAB }) )) self.get_params().append(self.get_parent().get_parent().Param( block=self, n=odict({'name': 'Comment', 'key': 'comment', 'type': '_multiline', 'hide': 'part', 'value': '', 'tab': ADVANCED_PARAM_TAB }) )) self._epy_source_hash = -1 # for epy blocks self._epy_reload_error = None if self._bussify_sink: self.bussify({'name': 'bus', 'type': 'bus'}, 'sink') if self._bussify_source: self.bussify({'name': 'bus', 'type': 'bus'}, 'source') def get_bus_structure(self, direction): if direction == 'source': bus_structure = self._bus_structure_source else: bus_structure = self._bus_structure_sink bus_structure = self.resolve_dependencies(bus_structure) if not bus_structure: return '' # TODO: Don't like empty strings. should change this to None eventually try: clean_bus_structure = self.get_parent().evaluate(bus_structure) return clean_bus_structure except: return '' def validate(self): """ Validate this block. Call the base class validate. Evaluate the checks: each check must evaluate to True. """ Element.validate(self) # Evaluate the checks for check in self._checks: check_res = self.resolve_dependencies(check) try: if not self.get_parent().evaluate(check_res): self.add_error_message('Check "{}" failed.'.format(check)) except: self.add_error_message('Check "{}" did not evaluate.'.format(check)) # For variables check the value (only if var_value is used if self.is_variable and self._var_value != '$value': value = self._var_value try: value = self.get_var_value() self.get_parent().evaluate(value) except Exception as err: self.add_error_message('Value "{}" cannot be evaluated:\n{}'.format(value, err)) # check if this is a GUI block and matches the selected generate option current_generate_option = self.get_parent().get_option('generate_options') def check_generate_mode(label, flag, valid_options): block_requires_mode = ( flag in self.get_flags() or self.get_name().upper().startswith(label) ) if block_requires_mode and current_generate_option not in valid_options: self.add_error_message("Can't generate this block in mode: {} ".format( repr(current_generate_option))) check_generate_mode('WX GUI', BLOCK_FLAG_NEED_WX_GUI, ('wx_gui',)) check_generate_mode('QT GUI', BLOCK_FLAG_NEED_QT_GUI, ('qt_gui', 'hb_qt_gui')) if self._epy_reload_error: self.get_param('_source_code').add_error_message(str(self._epy_reload_error)) def rewrite(self): """ Add and remove ports to adjust for the nports. """ Element.rewrite(self) # Check and run any custom rewrite function for this block getattr(self, 'rewrite_' + self._key, lambda: None)() # Adjust nports, disconnect hidden ports for ports in (self.get_sources(), self.get_sinks()): for i, master_port in enumerate(ports): nports = master_port.get_nports() or 1 num_ports = 1 + len(master_port.get_clones()) if master_port.get_hide(): for connection in master_port.get_connections(): self.get_parent().remove_element(connection) if not nports and num_ports == 1: # Not a master port and no left-over clones continue # Remove excess cloned ports for port in master_port.get_clones()[nports-1:]: # Remove excess connections for connection in port.get_connections(): self.get_parent().remove_element(connection) master_port.remove_clone(port) ports.remove(port) # Add more cloned ports for j in range(num_ports, nports): port = master_port.add_clone() ports.insert(ports.index(master_port) + j, port) self.back_ofthe_bus(ports) # Renumber non-message/message ports domain_specific_port_index = collections.defaultdict(int) for port in filter(lambda p: p.get_key().isdigit(), ports): domain = port.get_domain() port._key = str(domain_specific_port_index[domain]) domain_specific_port_index[domain] += 1 def port_controller_modify(self, direction): """ Change the port controller. Args: direction: +1 or -1 Returns: true for change """ changed = False # Concat the nports string from the private nports settings of all ports nports_str = ' '.join([port._nports for port in self.get_ports()]) # Modify all params whose keys appear in the nports string for param in self.get_params(): if param.is_enum() or param.get_key() not in nports_str: continue # Try to increment the port controller by direction try: value = param.get_evaluated() value = value + direction if 0 < value: param.set_value(value) changed = True except: pass return changed def get_doc(self): platform = self.get_parent().get_parent() documentation = platform.block_docstrings.get(self._key, {}) from_xml = self._doc.strip() if from_xml: documentation[''] = from_xml return documentation def get_imports(self, raw=False): """ Resolve all import statements. Split each import statement at newlines. Combine all import statements into a list. Filter empty imports. Returns: a list of import statements """ if raw: return self._imports return filter(lambda i: i, sum(map(lambda i: self.resolve_dependencies(i).split('\n'), self._imports), [])) def get_make(self, raw=False): if raw: return self._make return self.resolve_dependencies(self._make) def get_var_make(self): return self.resolve_dependencies(self._var_make) def get_var_value(self): return self.resolve_dependencies(self._var_value) def get_callbacks(self): """ Get a list of function callbacks for this block. Returns: a list of strings """ def make_callback(callback): callback = self.resolve_dependencies(callback) if 'self.' in callback: return callback return 'self.{}.{}'.format(self.get_id(), callback) return map(make_callback, self._callbacks) def is_virtual_sink(self): return self.get_key() == 'virtual_sink' def is_virtual_source(self): return self.get_key() == 'virtual_source' ########################################################################### # Custom rewrite functions ########################################################################### def rewrite_epy_block(self): flowgraph = self.get_parent() platform = flowgraph.get_parent() param_blk = self.get_param('_io_cache') param_src = self.get_param('_source_code') src = param_src.get_value() src_hash = hash((self.get_id(), src)) if src_hash == self._epy_source_hash: return try: blk_io = epy_block_io.extract(src) except Exception as e: self._epy_reload_error = ValueError(str(e)) try: # Load last working block io blk_io_args = eval(param_blk.get_value()) if len(blk_io_args) == 6: blk_io_args += ([],) # add empty callbacks blk_io = epy_block_io.BlockIO(*blk_io_args) except Exception: return else: self._epy_reload_error = None # Clear previous errors param_blk.set_value(repr(tuple(blk_io))) # print "Rewriting embedded python block {!r}".format(self.get_id()) self._epy_source_hash = src_hash self._name = blk_io.name or blk_io.cls self._doc = blk_io.doc self._imports[0] = 'import ' + self.get_id() self._make = '{0}.{1}({2})'.format(self.get_id(), blk_io.cls, ', '.join( '{0}=${{ {0} }}'.format(key) for key, _ in blk_io.params)) self._callbacks = ['{0} = ${{ {0} }}'.format(attr) for attr in blk_io.callbacks] params = {} for param in list(self._params): if hasattr(param, '__epy_param__'): params[param.get_key()] = param self._params.remove(param) for key, value in blk_io.params: try: param = params[key] param.set_default(value) except KeyError: # need to make a new param name = key.replace('_', ' ').title() n = odict(dict(name=name, key=key, type='raw', value=value)) param = platform.Param(block=self, n=n) setattr(param, '__epy_param__', True) self._params.append(param) def update_ports(label, ports, port_specs, direction): ports_to_remove = list(ports) iter_ports = iter(ports) ports_new = [] port_current = next(iter_ports, None) for key, port_type in port_specs: reuse_port = ( port_current is not None and port_current.get_type() == port_type and (key.isdigit() or port_current.get_key() == key) ) if reuse_port: ports_to_remove.remove(port_current) port, port_current = port_current, next(iter_ports, None) else: n = odict(dict(name=label + str(key), type=port_type, key=key)) if port_type == 'message': n['name'] = key n['optional'] = '1' port = platform.Port(block=self, n=n, dir=direction) ports_new.append(port) # replace old port list with new one del ports[:] ports.extend(ports_new) # remove excess port connections for port in ports_to_remove: for connection in port.get_connections(): flowgraph.remove_element(connection) update_ports('in', self.get_sinks(), blk_io.sinks, 'sink') update_ports('out', self.get_sources(), blk_io.sources, 'source') self.rewrite() def back_ofthe_bus(self, portlist): portlist.sort(key=lambda p: p._type == 'bus') def filter_bus_port(self, ports): buslist = [p for p in ports if p._type == 'bus'] return buslist or ports # Main functions to get and set the block state # Also kept get_enabled and set_enabled to keep compatibility def get_state(self): """ Gets the block's current state. Returns: ENABLED - 0 BYPASSED - 1 DISABLED - 2 """ try: return int(eval(self.get_param('_enabled').get_value())) except: return BLOCK_ENABLED def set_state(self, state): """ Sets the state for the block. Args: ENABLED - 0 BYPASSED - 1 DISABLED - 2 """ if state in [BLOCK_ENABLED, BLOCK_BYPASSED, BLOCK_DISABLED]: self.get_param('_enabled').set_value(str(state)) else: self.get_param('_enabled').set_value(str(BLOCK_ENABLED)) # Enable/Disable Aliases def get_enabled(self): """ Get the enabled state of the block. Returns: true for enabled """ return not (self.get_state() == BLOCK_DISABLED) def set_enabled(self, enabled): """ Set the enabled state of the block. Args: enabled: true for enabled Returns: True if block changed state """ old_state = self.get_state() new_state = BLOCK_ENABLED if enabled else BLOCK_DISABLED self.set_state(new_state) return old_state != new_state # Block bypassing def get_bypassed(self): """ Check if the block is bypassed """ return self.get_state() == BLOCK_BYPASSED def set_bypassed(self): """ Bypass the block Returns: True if block chagnes state """ if self.get_state() != BLOCK_BYPASSED and self.can_bypass(): self.set_state(BLOCK_BYPASSED) return True return False def can_bypass(self): """ Check the number of sinks and sources and see if this block can be bypassed """ # Check to make sure this is a single path block # Could possibly support 1 to many blocks if len(self.get_sources()) != 1 or len(self.get_sinks()) != 1: return False if not (self.get_sources()[0].get_type() == self.get_sinks()[0].get_type()): return False if self.bypass_disabled(): return False return True def __str__(self): return 'Block - {} - {}({})'.format(self.get_id(), self.get_name(), self.get_key()) def get_id(self): return self.get_param('id').get_value() def get_name(self): return self._name def get_key(self): return self._key def get_ports(self): return self.get_sources() + self.get_sinks() def get_ports_gui(self): return self.filter_bus_port(self.get_sources()) + self.filter_bus_port(self.get_sinks()) def get_children(self): return self.get_ports() + self.get_params() def get_children_gui(self): return self.get_ports_gui() + self.get_params() def get_block_wrapper_path(self): return self._block_wrapper_path def get_comment(self): return self.get_param('comment').get_value() def get_flags(self): return self._flags def throtteling(self): return BLOCK_FLAG_THROTTLE in self._flags def bypass_disabled(self): return BLOCK_FLAG_DISABLE_BYPASS in self._flags @property def is_deprecated(self): return BLOCK_FLAG_DEPRECATED in self._flags ############################################## # Access Params ############################################## def get_param_tab_labels(self): return self._param_tab_labels def get_param_keys(self): return _get_keys(self._params) def get_param(self, key): return _get_elem(self._params, key) def get_params(self): return self._params def has_param(self, key): try: _get_elem(self._params, key) return True except: return False ############################################## # Access Sinks ############################################## def get_sink_keys(self): return _get_keys(self._sinks) def get_sink(self, key): return _get_elem(self._sinks, key) def get_sinks(self): return self._sinks def get_sinks_gui(self): return self.filter_bus_port(self.get_sinks()) ############################################## # Access Sources ############################################## def get_source_keys(self): return _get_keys(self._sources) def get_source(self, key): return _get_elem(self._sources, key) def get_sources(self): return self._sources def get_sources_gui(self): return self.filter_bus_port(self.get_sources()) def get_connections(self): return sum([port.get_connections() for port in self.get_ports()], []) def resolve_dependencies(self, tmpl): """ Resolve a paramater dependency with cheetah templates. Args: tmpl: the string with dependencies Returns: the resolved value """ tmpl = str(tmpl) if '$' not in tmpl: return tmpl n = dict((param.get_key(), param.template_arg) for param in self.get_params()) # TODO: cache that try: return str(Template(tmpl, n)) except Exception as err: return "Template error: {}\n {}".format(tmpl, err) ############################################## # Controller Modify ############################################## def type_controller_modify(self, direction): """ Change the type controller. Args: direction: +1 or -1 Returns: true for change """ changed = False type_param = None for param in filter(lambda p: p.is_enum(), self.get_params()): children = self.get_ports() + self.get_params() # Priority to the type controller if param.get_key() in ' '.join(map(lambda p: p._type, children)): type_param = param # Use param if type param is unset if not type_param: type_param = param if type_param: # Try to increment the enum by direction try: keys = type_param.get_option_keys() old_index = keys.index(type_param.get_value()) new_index = (old_index + direction + len(keys)) % len(keys) type_param.set_value(keys[new_index]) changed = True except: pass return changed def form_bus_structure(self, direc): if direc == 'source': get_p = self.get_sources get_p_gui = self.get_sources_gui bus_structure = self.get_bus_structure('source') else: get_p = self.get_sinks get_p_gui = self.get_sinks_gui bus_structure = self.get_bus_structure('sink') struct = [range(len(get_p()))] if True in map(lambda a: isinstance(a.get_nports(), int), get_p()): structlet = [] last = 0 for j in [i.get_nports() for i in get_p() if isinstance(i.get_nports(), int)]: structlet.extend(map(lambda a: a+last, range(j))) last = structlet[-1] + 1 struct = [structlet] if bus_structure: struct = bus_structure self.current_bus_structure[direc] = struct return struct def bussify(self, n, direc): if direc == 'source': get_p = self.get_sources get_p_gui = self.get_sources_gui bus_structure = self.get_bus_structure('source') else: get_p = self.get_sinks get_p_gui = self.get_sinks_gui bus_structure = self.get_bus_structure('sink') for elt in get_p(): for connect in elt.get_connections(): self.get_parent().remove_element(connect) if ('bus' not in map(lambda a: a.get_type(), get_p())) and len(get_p()) > 0: struct = self.form_bus_structure(direc) self.current_bus_structure[direc] = struct if get_p()[0].get_nports(): n['nports'] = str(1) for i in range(len(struct)): n['key'] = str(len(get_p())) n = odict(n) port = self.get_parent().get_parent().Port(block=self, n=n, dir=direc) get_p().append(port) elif 'bus' in map(lambda a: a.get_type(), get_p()): for elt in get_p_gui(): get_p().remove(elt) self.current_bus_structure[direc] = '' ############################################## # Import/Export Methods ############################################## def export_data(self): """ Export this block's params to nested data. Returns: a nested data odict """ n = odict() n['key'] = self.get_key() n['param'] = map(lambda p: p.export_data(), sorted(self.get_params(), key=str)) if 'bus' in map(lambda a: a.get_type(), self.get_sinks()): n['bus_sink'] = str(1) if 'bus' in map(lambda a: a.get_type(), self.get_sources()): n['bus_source'] = str(1) return n def get_hash(self): return hash(tuple(map(hash, self.get_params()))) def import_data(self, n): """ Import this block's params from nested data. Any param keys that do not exist will be ignored. Since params can be dynamically created based another param, call rewrite, and repeat the load until the params stick. This call to rewrite will also create any dynamic ports that are needed for the connections creation phase. Args: n: the nested data odict """ my_hash = 0 while self.get_hash() != my_hash: params_n = n.findall('param') for param_n in params_n: key = param_n.find('key') value = param_n.find('value') # The key must exist in this block's params if key in self.get_param_keys(): self.get_param(key).set_value(value) # Store hash and call rewrite my_hash = self.get_hash() self.rewrite() bussinks = n.findall('bus_sink') if len(bussinks) > 0 and not self._bussify_sink: self.bussify({'name': 'bus', 'type': 'bus'}, 'sink') elif len(bussinks) > 0: self.bussify({'name': 'bus', 'type': 'bus'}, 'sink') self.bussify({'name': 'bus', 'type': 'bus'}, 'sink') bussrcs = n.findall('bus_source') if len(bussrcs) > 0 and not self._bussify_source: self.bussify({'name': 'bus', 'type': 'bus'}, 'source') elif len(bussrcs) > 0: self.bussify({'name': 'bus', 'type': 'bus'}, 'source') self.bussify({'name': 'bus', 'type': 'bus'}, 'source')
gpl-3.0
menardorama/ReadyNAS-Add-ons
headphones-1.0.0/debian/headphones/apps/headphones/lib/apscheduler/executors/base.py
33
4649
from abc import ABCMeta, abstractmethod from collections import defaultdict from datetime import datetime, timedelta from traceback import format_tb import logging import sys from pytz import utc import six from apscheduler.events import JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED class MaxInstancesReachedError(Exception): def __init__(self, job): super(MaxInstancesReachedError, self).__init__( 'Job "%s" has already reached its maximum number of instances (%d)' % (job.id, job.max_instances)) class BaseExecutor(six.with_metaclass(ABCMeta, object)): """Abstract base class that defines the interface that every executor must implement.""" _scheduler = None _lock = None _logger = logging.getLogger('apscheduler.executors') def __init__(self): super(BaseExecutor, self).__init__() self._instances = defaultdict(lambda: 0) def start(self, scheduler, alias): """ Called by the scheduler when the scheduler is being started or when the executor is being added to an already running scheduler. :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this executor :param str|unicode alias: alias of this executor as it was assigned to the scheduler """ self._scheduler = scheduler self._lock = scheduler._create_lock() self._logger = logging.getLogger('apscheduler.executors.%s' % alias) def shutdown(self, wait=True): """ Shuts down this executor. :param bool wait: ``True`` to wait until all submitted jobs have been executed """ def submit_job(self, job, run_times): """ Submits job for execution. :param Job job: job to execute :param list[datetime] run_times: list of datetimes specifying when the job should have been run :raises MaxInstancesReachedError: if the maximum number of allowed instances for this job has been reached """ assert self._lock is not None, 'This executor has not been started yet' with self._lock: if self._instances[job.id] >= job.max_instances: raise MaxInstancesReachedError(job) self._do_submit_job(job, run_times) self._instances[job.id] += 1 @abstractmethod def _do_submit_job(self, job, run_times): """Performs the actual task of scheduling `run_job` to be called.""" def _run_job_success(self, job_id, events): """Called by the executor with the list of generated events when `run_job` has been successfully called.""" with self._lock: self._instances[job_id] -= 1 for event in events: self._scheduler._dispatch_event(event) def _run_job_error(self, job_id, exc, traceback=None): """Called by the executor with the exception if there is an error calling `run_job`.""" with self._lock: self._instances[job_id] -= 1 exc_info = (exc.__class__, exc, traceback) self._logger.error('Error running job %s', job_id, exc_info=exc_info) def run_job(job, jobstore_alias, run_times, logger_name): """Called by executors to run the job. Returns a list of scheduler events to be dispatched by the scheduler.""" events = [] logger = logging.getLogger(logger_name) for run_time in run_times: # See if the job missed its run time window, and handle possible misfires accordingly if job.misfire_grace_time is not None: difference = datetime.now(utc) - run_time grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, run_time)) logger.warning('Run time of job "%s" was missed by %s', job, difference) continue logger.info('Running job "%s" (scheduled at %s)', job, run_time) try: retval = job.func(*job.args, **job.kwargs) except: exc, tb = sys.exc_info()[1:] formatted_tb = ''.join(format_tb(tb)) events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc, traceback=formatted_tb)) logger.exception('Job "%s" raised an exception', job) else: events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval)) logger.info('Job "%s" executed successfully', job) return events
gpl-2.0
davidzchen/tensorflow
tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py
6
5123
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the sequence datasets serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.platform import test class SkipDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase, parameterized.TestCase): def _build_skip_dataset(self, count): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).skip(count) @combinations.generate(test_base.default_test_combinations()) def testSkipFewerThanInputs(self): count = 4 num_outputs = 10 - count self.run_core_tests(lambda: self._build_skip_dataset(count), num_outputs) @combinations.generate(test_base.default_test_combinations()) def testSkipVarious(self): # Skip more than inputs self.run_core_tests(lambda: self._build_skip_dataset(20), 0) # Skip exactly the input size self.run_core_tests(lambda: self._build_skip_dataset(10), 0) self.run_core_tests(lambda: self._build_skip_dataset(-1), 0) # Skip nothing self.run_core_tests(lambda: self._build_skip_dataset(0), 10) @combinations.generate(test_base.default_test_combinations()) def testInvalidSkip(self): with self.assertRaisesRegex(ValueError, 'Shape must be rank 0 but is rank 1'): self.run_core_tests(lambda: self._build_skip_dataset([1, 2]), 0) class TakeDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase, parameterized.TestCase): def _build_take_dataset(self, count): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).take(count) @combinations.generate(test_base.default_test_combinations()) def testTakeFewerThanInputs(self): count = 4 self.run_core_tests(lambda: self._build_take_dataset(count), count) @combinations.generate(test_base.default_test_combinations()) def testTakeVarious(self): # Take more than inputs self.run_core_tests(lambda: self._build_take_dataset(20), 10) # Take exactly the input size self.run_core_tests(lambda: self._build_take_dataset(10), 10) # Take all self.run_core_tests(lambda: self._build_take_dataset(-1), 10) # Take nothing self.run_core_tests(lambda: self._build_take_dataset(0), 0) def testInvalidTake(self): with self.assertRaisesRegex(ValueError, 'Shape must be rank 0 but is rank 1'): self.run_core_tests(lambda: self._build_take_dataset([1, 2]), 0) class RepeatDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase, parameterized.TestCase): def _build_repeat_dataset(self, count, take_count=3): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).take( take_count).repeat(count) @combinations.generate(test_base.default_test_combinations()) def testFiniteRepeat(self): count = 10 self.run_core_tests(lambda: self._build_repeat_dataset(count), 3 * count) @combinations.generate(test_base.default_test_combinations()) def testEmptyRepeat(self): self.run_core_tests(lambda: self._build_repeat_dataset(0), 0) @combinations.generate(test_base.default_test_combinations()) def testInfiniteRepeat(self): self.verify_unused_iterator( lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False) self.verify_multiple_breaks( lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False) self.verify_reset_restored_iterator( lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False) # Test repeat empty dataset self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), 0) @combinations.generate(test_base.default_test_combinations()) def testInvalidRepeat(self): with self.assertRaisesRegex(ValueError, 'Shape must be rank 0 but is rank 1'): self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0), 0) if __name__ == '__main__': test.main()
apache-2.0
puruckertom/poptox
poptox/logistic/logistic_algorithm.py
1
1472
# -*- coding: utf-8 -*- """ Created on Tue Jan 03 13:30:41 2012 @author: T.H """ import webapp2 as webapp from google.appengine.ext.webapp.util import run_wsgi_app from google.appengine.ext.webapp import template import os class genericAlgorithmPage(webapp.RequestHandler): def get(self): text_file1 = open('logistic/logistic_algorithm.txt','r') x = text_file1.read() templatepath = os.path.dirname(__file__) + '/../templates/' html = template.render(templatepath + '01pop_uberheader.html', {'title'}) html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'logistic','page':'algorithm'}) html = html + template.render(templatepath + '03pop_ubertext_links_left.html', {}) html = html + template.render(templatepath + '04uberalgorithm_start.html', { 'model':'logistic', 'model_attributes':'Logistic Model Algorithms', 'text_paragraph':x}) html = html + template.render(templatepath + '04ubertext_end.html', {}) html = html + template.render(templatepath + '05pop_ubertext_links_right.html', {}) html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''}) self.response.out.write(html) app = webapp.WSGIApplication([('/.*', genericAlgorithmPage)], debug=True) def main(): run_wsgi_app(app) if __name__ == '__main__': main()
unlicense
Connexions/cnx-upgrade
cnxupgrade/tests/test_upgrades_cnxml7.py
1
4511
# -*- coding: utf-8 -*- # ### # Copyright (c) 2013, Rice University # This software is subject to the provisions of the GNU Affero General # Public License version 3 (AGPLv3). # See LICENCE.txt for details. # ### """Tests for cnx-upgrades cnxml7 """ import os import psycopg2 import unittest import lxml.etree from . import * def get_data_file(filename): path = os.path.join(TESTING_DATA_DIRECTORY, filename) with open(path) as f: return f.read() class MainTestCase(unittest.TestCase): """Tests for cnxupgrade.upgrades.cnxml7.main """ fixture = postgresql_fixture def setUp(self): self.fixture.setUp() @db_connect def setup_test_data(self, cursor, moduleid, version): cursor.execute("INSERT INTO abstracts VALUES (1, '', '');") cursor.execute("INSERT INTO document_controls (uuid) VALUES ('209deb1f-1a46-4369-9e0d-18674cf58a3e')") cursor.execute(''' INSERT INTO modules VALUES (1, 'Module', %s, '209deb1f-1a46-4369-9e0d-18674cf58a3e', %s, 'Preface to College Physics', '2013-07-31 14:07:20.542211-05', '2013-07-31 14:07:20.542211-05', 1, 11, '', '', '', NULL, NULL, 'en', '{e5a07af6-09b9-4b74-aa7a-b7510bee90b8}', '{e5a07af6-09b9-4b74-aa7a-b7510bee90b8, 1df3bab1-1dc7-4017-9b3a-960a87e706b1}', '{9366c786-e3c8-4960-83d4-aec1269ac5e5}', NULL, NULL, NULL, 7, NULL); INSERT INTO files (file) VALUES (%s); INSERT INTO module_files (fileid, module_ident, filename, uuid) VALUES (1, 1, 'index.cnxml', '209deb1f-1a46-4369-9e0d-18674cf58a3e'); ''', (moduleid, version, memoryview(get_data_file( '{}-{}.cnxml'.format(moduleid, version))))) # remove index.cnxml.html cursor.execute("DELETE FROM module_files WHERE filename = 'index.cnxml.html'") def tearDown(self): self.fixture.tearDown() def call_target(self): from ..upgrades.cnxml7.main import main with psycopg2.connect(DB_CONNECTION_STRING) as db_conn: return list(main(db_conn)) @db_connect def get_new_cnxml(self, cursor): cursor.execute('SELECT f.file FROM module_files mf ' 'JOIN files f ON mf.fileid = f.fileid ' "WHERE mf.filename = 'index_auto_generated.cnxml'") return cursor.fetchone()[0][:] def test_successful(self): self.setup_test_data('m10470', '2.2') result = self.call_target() self.assertEqual(result, [('m10470', '2.2', 1, u'0.5', True, '')]) self.assertTrue('cnxml-version="0.7"' in self.get_new_cnxml()) def test_no_upgrade_necessary(self): self.setup_test_data('m11425', '1.19') result = self.call_target() self.assertEqual(result, [('m11425', '1.19', 1, u'0.7', True, '')]) self.assertTrue('cnxml-version="0.7"' in self.get_new_cnxml()) def test_missing_namespace_declaration(self): self.setup_test_data('m12563', '1.6') result = self.call_target() self.assertEqual(result, [('m12563', '1.6', 1, u'0.5', True, '')]) self.assertTrue('cnxml-version="0.7"' in self.get_new_cnxml()) class TransformsTestCase(unittest.TestCase): """Tests for cnxupgrade.upgrades.cnxml7.transforms """ def call_target(self, source, version=None): from ..upgrades.cnxml7 import transforms return transforms.upgrade_document(source, version=version) def test_no_upgrade_necessary(self): cnxml = get_data_file('m11425-1.19.cnxml') new_cnxml, result, message = self.call_target(cnxml, version='0.7') self.assertEqual(message, '') self.assertEqual(result, True) self.assertEqual(new_cnxml, cnxml) def test_successful(self): cnxml = get_data_file('m10470-2.2.cnxml') new_cnxml, result, message = self.call_target(cnxml, version='0.5') self.assertEqual(message, '') self.assertTrue(result) self.assertTrue('cnxml-version="0.7"' in new_cnxml) # Assert generated cnxml is valid xml self.assertTrue(lxml.etree.fromstring(new_cnxml) is not None) def test_missing_namespace_declaration(self): cnxml = get_data_file('m12563-1.6.cnxml') new_cnxml, result, message = self.call_target(cnxml, version='0.5') self.assertEqual(message, '') self.assertTrue(result) self.assertTrue('cnxml-version="0.7"' in new_cnxml) # Assert generated cnxml is valid xml self.assertTrue(lxml.etree.fromstring(new_cnxml) is not None)
agpl-3.0
twstrike/le_for_patching
letsencrypt-apache/letsencrypt_apache/tests/complex_parsing_test.py
6
4551
"""Tests for letsencrypt_apache.parser.""" import os import shutil import unittest from letsencrypt import errors from letsencrypt_apache.tests import util class ComplexParserTest(util.ParserTest): """Apache Parser Test.""" def setUp(self): # pylint: disable=arguments-differ super(ComplexParserTest, self).setUp( "complex_parsing", "complex_parsing") self.setup_variables() # This needs to happen after due to setup_variables not being run # until after self.parser.init_modules() # pylint: disable=protected-access def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.config_dir) shutil.rmtree(self.work_dir) def setup_variables(self): """Set up variables for parser.""" self.parser.variables.update( { "COMPLEX": "", "tls_port": "1234", "fnmatch_filename": "test_fnmatch.conf", "tls_port_str": "1234" } ) def test_filter_args_num(self): """Note: This may also fail do to Include conf-enabled/ syntax.""" matches = self.parser.find_dir("TestArgsDirective") self.assertEqual(len(self.parser.filter_args_num(matches, 1)), 3) self.assertEqual(len(self.parser.filter_args_num(matches, 2)), 2) self.assertEqual(len(self.parser.filter_args_num(matches, 3)), 1) def test_basic_variable_parsing(self): matches = self.parser.find_dir("TestVariablePort") self.assertEqual(len(matches), 1) self.assertEqual(self.parser.get_arg(matches[0]), "1234") def test_basic_variable_parsing_quotes(self): matches = self.parser.find_dir("TestVariablePortStr") self.assertEqual(len(matches), 1) self.assertEqual(self.parser.get_arg(matches[0]), "1234") def test_invalid_variable_parsing(self): del self.parser.variables["tls_port"] matches = self.parser.find_dir("TestVariablePort") self.assertRaises( errors.PluginError, self.parser.get_arg, matches[0]) def test_basic_ifdefine(self): self.assertEqual(len(self.parser.find_dir("VAR_DIRECTIVE")), 2) self.assertEqual(len(self.parser.find_dir("INVALID_VAR_DIRECTIVE")), 0) def test_basic_ifmodule(self): self.assertEqual(len(self.parser.find_dir("MOD_DIRECTIVE")), 2) self.assertEqual( len(self.parser.find_dir("INVALID_MOD_DIRECTIVE")), 0) def test_nested(self): self.assertEqual(len(self.parser.find_dir("NESTED_DIRECTIVE")), 3) self.assertEqual( len(self.parser.find_dir("INVALID_NESTED_DIRECTIVE")), 0) def test_load_modules(self): """If only first is found, there is bad variable parsing.""" self.assertTrue("status_module" in self.parser.modules) self.assertTrue("mod_status.c" in self.parser.modules) # This is in an IfDefine self.assertTrue("ssl_module" in self.parser.modules) self.assertTrue("mod_ssl.c" in self.parser.modules) def verify_fnmatch(self, arg, hit=True): """Test if Include was correctly parsed.""" from letsencrypt_apache import parser self.parser.add_dir(parser.get_aug_path(self.parser.loc["default"]), "Include", [arg]) if hit: self.assertTrue(self.parser.find_dir("FNMATCH_DIRECTIVE")) else: self.assertFalse(self.parser.find_dir("FNMATCH_DIRECTIVE")) # NOTE: Only run one test per function otherwise you will have # inf recursion def test_include(self): self.verify_fnmatch("test_fnmatch.?onf") def test_include_complex(self): self.verify_fnmatch("../complex_parsing/[te][te]st_*.?onf") def test_include_fullpath(self): self.verify_fnmatch(os.path.join(self.config_path, "test_fnmatch.conf")) def test_include_fullpath_trailing_slash(self): self.verify_fnmatch(self.config_path + "//") def test_include_single_quotes(self): self.verify_fnmatch("'" + self.config_path + "'") def test_include_double_quotes(self): self.verify_fnmatch('"' + self.config_path + '"') def test_include_variable(self): self.verify_fnmatch("../complex_parsing/${fnmatch_filename}") def test_include_missing(self): # This should miss self.verify_fnmatch("test_*.onf", False) if __name__ == "__main__": unittest.main() # pragma: no cover
apache-2.0
crosenth/bioy
bioy_pkg/subcommands/pull_reads.py
2
1748
# This file is part of Bioy # # Bioy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Bioy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Bioy. If not, see <http://www.gnu.org/licenses/>. """ Parse barcode, primer, and read from a fastq file """ import logging import sys from itertools import ifilter from bioy_pkg.sequtils import fastalite from bioy_pkg.utils import opener, Opener, Csv2Dict log = logging.getLogger(__name__) def build_parser(parser): parser.add_argument('fasta', type = lambda f: fastalite(opener(f)), help = 'input file containing raw reads') parser.add_argument('--sample-id', help = 'sample id to pull reads for') parser.add_argument('--map-file', type = Csv2Dict(value = 'sample_id', fieldnames=['sequence_id','sample_id']), help = 'csv(.bz2) file containing sequence_id,sample_id in the rows.') parser.add_argument('-o', '--out', type = Opener('w'), default = sys.stdout, help = 'fasta output file') def action(args): sample_filter = lambda s: args.map_file[s.description] == args.sample_id seqs = ifilter(sample_filter, args.fasta) args.out.writelines('>{}\n{}\n'.format(s.description, s.seq) for s in seqs)
gpl-3.0
pigeonflight/strider-plone
docker/appengine/lib/django-1.3/django/db/backends/oracle/base.py
150
32300
""" Oracle database backend for Django. Requires cx_Oracle: http://cx-oracle.sourceforge.net/ """ import datetime import sys import time from decimal import Decimal def _setup_environment(environ): import platform # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except ImportError, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: import os os.environ.update(environ) _setup_environment([ # Oracle takes client-side character set encoding from the environment. ('NLS_LANG', '.UTF8'), # This prevents unicode from getting mangled by getting encoded into the # potentially non-unicode database character set. ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'), ]) try: import cx_Oracle as Database except ImportError, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) from django.db import utils from django.db.backends import * from django.db.backends.signals import connection_created from django.db.backends.oracle.client import DatabaseClient from django.db.backends.oracle.creation import DatabaseCreation from django.db.backends.oracle.introspection import DatabaseIntrospection from django.utils.encoding import smart_str, force_unicode DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError # Check whether cx_Oracle was compiled with the WITH_UNICODE option. This will # also be True in Python 3.0. if int(Database.version.split('.', 1)[0]) >= 5 and not hasattr(Database, 'UNICODE'): convert_unicode = force_unicode else: convert_unicode = smart_str class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () needs_datetime_string_cast = False interprets_empty_strings_as_nulls = True uses_savepoints = True can_return_id_from_insert = True allow_sliced_subqueries = False supports_subqueries_in_group_by = False supports_timezones = False supports_bitwise_or = False can_defer_constraint_checks = True ignores_nulls_in_unique_constraints = False class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.oracle.compiler" def autoinc_sql(self, table, column): # To simulate auto-incrementing primary keys in Oracle, we have to # create a sequence and a trigger. sq_name = get_sequence_name(table) tr_name = get_trigger_name(table) tbl_name = self.quote_name(table) col_name = self.quote_name(column) sequence_sql = """ DECLARE i INTEGER; BEGIN SELECT COUNT(*) INTO i FROM USER_CATALOG WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE'; IF i = 0 THEN EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"'; END IF; END; /""" % locals() trigger_sql = """ CREATE OR REPLACE TRIGGER "%(tr_name)s" BEFORE INSERT ON %(tbl_name)s FOR EACH ROW WHEN (new.%(col_name)s IS NULL) BEGIN SELECT "%(sq_name)s".nextval INTO :new.%(col_name)s FROM dual; END; /""" % locals() return sequence_sql, trigger_sql def date_extract_sql(self, lookup_type, field_name): # http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163 if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name else: return "EXTRACT(%s FROM %s)" % (lookup_type, field_name) def date_interval_sql(self, sql, connector, timedelta): """ Implements the interval functionality for expressions format for Oracle: (datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6)) """ minutes, seconds = divmod(timedelta.seconds, 60) hours, minutes = divmod(minutes, 60) days = str(timedelta.days) day_precision = len(days) fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))" return fmt % (sql, connector, days, hours, minutes, seconds, timedelta.microseconds, day_precision) def date_trunc_sql(self, lookup_type, field_name): # Oracle uses TRUNC() for both dates and numbers. # http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151 if lookup_type == 'day': sql = 'TRUNC(%s)' % field_name else: sql = "TRUNC(%s, '%s')" % (field_name, lookup_type) return sql def convert_values(self, value, field): if isinstance(value, Database.LOB): value = value.read() if field and field.get_internal_type() == 'TextField': value = force_unicode(value) # Oracle stores empty strings as null. We need to undo this in # order to adhere to the Django convention of using the empty # string instead of null, but only if the field accepts the # empty string. if value is None and field and field.empty_strings_allowed: value = u'' # Convert 1 or 0 to True or False elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): value = bool(value) # Force floats to the correct type elif value is not None and field and field.get_internal_type() == 'FloatField': value = float(value) # Convert floats to decimals elif value is not None and field and field.get_internal_type() == 'DecimalField': value = util.typecast_decimal(field.format_number(value)) # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. We use the type # of the Field to determine which to cast to, but it's not # always available. # As a workaround, we cast to date if all the time-related # values are 0, or to time if the date is 1/1/1900. # This could be cleaned a bit by adding a method to the Field # classes to normalize values from the database (the to_python # method is used for validation and isn't what we want here). elif isinstance(value, Database.Timestamp): # In Python 2.3, the cx_Oracle driver returns its own # Timestamp object that we must convert to a datetime class. if not isinstance(value, datetime.datetime): value = datetime.datetime(value.year, value.month, value.day, value.hour, value.minute, value.second, value.fsecond) if field and field.get_internal_type() == 'DateTimeField': pass elif field and field.get_internal_type() == 'DateField': value = value.date() elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1): value = value.time() elif value.hour == value.minute == value.second == value.microsecond == 0: value = value.date() return value def datetime_cast_sql(self): return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def drop_sequence_sql(self, table): return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table)) def fetch_returned_insert_id(self, cursor): return long(cursor._insert_id_var.getvalue()) def field_cast_sql(self, db_type): if db_type and db_type.endswith('LOB'): return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def last_insert_id(self, cursor, table_name, pk_name): sq_name = get_sequence_name(table_name) cursor.execute('SELECT "%s".currval FROM dual' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return u'' return force_unicode(value.read()) def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % util.truncate_name(name.upper(), self.max_name_length()) return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup_9(self, lookup_type): raise NotImplementedError("Regexes are not supported in Oracle before version 10g.") def regex_lookup_10(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def regex_lookup(self, lookup_type): # If regex_lookup is called before it's been initialized, then create # a cursor to initialize it and recur. from django.db import connection connection.cursor() return connection.ops.regex_lookup(lookup_type) def return_insert_id(self): return "RETURNING %s INTO %%s", (InsertIdVar(),) def savepoint_create_sql(self, sid): return convert_unicode("SAVEPOINT " + self.quote_name(sid)) def savepoint_rollback_sql(self, sid): return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid)) def sql_flush(self, style, tables, sequences): # Return a list of 'TRUNCATE x;', 'TRUNCATE y;', # 'TRUNCATE z;'... style SQL statements if tables: # Oracle does support TRUNCATE, but it seems to get us into # FK referential trouble, whereas DELETE FROM table works. sql = ['%s %s %s;' % \ (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table))) for table in tables] # Since we've just deleted all the rows, running our sequence # ALTER code will reset the sequence to 0. for sequence_info in sequences: sequence_name = get_sequence_name(sequence_info['table']) table_name = self.quote_name(sequence_info['table']) column_name = self.quote_name(sequence_info['column'] or 'id') query = _get_sequence_reset_sql() % {'sequence': sequence_name, 'table': table_name, 'column': column_name} sql.append(query) return sql else: return [] def sequence_reset_sql(self, style, model_list): from django.db import models output = [] query = _get_sequence_reset_sql() for model in model_list: for f in model._meta.local_fields: if isinstance(f, models.AutoField): table_name = self.quote_name(model._meta.db_table) sequence_name = get_sequence_name(model._meta.db_table) column_name = self.quote_name(f.column) output.append(query % {'sequence': sequence_name, 'table': table_name, 'column': column_name}) # Only one AutoField is allowed per model, so don't # continue to loop break for f in model._meta.many_to_many: if not f.rel.through: table_name = self.quote_name(f.m2m_db_table()) sequence_name = get_sequence_name(f.m2m_db_table()) column_name = self.quote_name('id') output.append(query % {'sequence': sequence_name, 'table': table_name, 'column': column_name}) return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""), self.quote_name(tablespace)) def value_to_db_datetime(self, value): # Oracle doesn't support tz-aware datetimes if getattr(value, 'tzinfo', None) is not None: raise ValueError("Oracle backend does not support timezone-aware datetimes.") return super(DatabaseOperations, self).value_to_db_datetime(value) def value_to_db_time(self, value): if value is None: return None if isinstance(value, basestring): return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6])) # Oracle doesn't support tz-aware datetimes if value.tzinfo is not None: raise ValueError("Oracle backend does not support timezone-aware datetimes.") return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def year_lookup_bounds_for_date_field(self, value): first = '%s-01-01' second = '%s-12-31' return [first % value, second % value] def combine_expression(self, connector, sub_expressions): "Oracle requires special cases for %% and & operators in query expressions" if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': raise NotImplementedError("Bit-wise or is not supported in Oracle.") return super(DatabaseOperations, self).combine_expression(connector, sub_expressions) class _UninitializedOperatorsDescriptor(object): def __get__(self, instance, owner): # If connection.operators is looked up before a connection has been # created, transparently initialize connection.operators to avert an # AttributeError. if instance is None: raise AttributeError("operators not available as class attribute") # Creating a cursor will initialize the operators. instance.cursor().close() return instance.__dict__['operators'] class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'oracle' operators = _UninitializedOperatorsDescriptor() _standard_operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", } _likec_operators = _standard_operators.copy() _likec_operators.update({ 'contains': "LIKEC %s ESCAPE '\\'", 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'", 'startswith': "LIKEC %s ESCAPE '\\'", 'endswith': "LIKEC %s ESCAPE '\\'", 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'", 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'", }) def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.oracle_version = None self.features = DatabaseFeatures(self) use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True) self.features.can_return_id_from_insert = use_returning_into self.ops = DatabaseOperations() self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def _valid_connection(self): return self.connection is not None def _connect_string(self): settings_dict = self.settings_dict if not settings_dict['HOST'].strip(): settings_dict['HOST'] = 'localhost' if settings_dict['PORT'].strip(): dsn = Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME']) else: dsn = settings_dict['NAME'] return "%s/%s@%s" % (settings_dict['USER'], settings_dict['PASSWORD'], dsn) def _cursor(self): cursor = None if not self._valid_connection(): conn_string = convert_unicode(self._connect_string()) conn_params = self.settings_dict['OPTIONS'].copy() if 'use_returning_into' in conn_params: del conn_params['use_returning_into'] self.connection = Database.connect(conn_string, **conn_params) cursor = FormatStylePlaceholderCursor(self.connection) # Set oracle date to ansi date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in TO_CHAR(). cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' " "NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF' " "NLS_TERRITORY = 'AMERICA'") if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except utils.DatabaseError: self.operators = self._likec_operators else: self.operators = self._standard_operators try: self.oracle_version = int(self.connection.version.split('.')[0]) # There's no way for the DatabaseOperations class to know the # currently active Oracle version, so we do some setups here. # TODO: Multi-db support will need a better solution (a way to # communicate the current version). if self.oracle_version <= 9: self.ops.regex_lookup = self.ops.regex_lookup_9 else: self.ops.regex_lookup = self.ops.regex_lookup_10 except ValueError: pass try: self.connection.stmtcachesize = 20 except: # Django docs specify cx_Oracle version 4.3.1 or higher, but # stmtcachesize is available only in 4.3.2 and up. pass connection_created.send(sender=self.__class__, connection=self) if not cursor: cursor = FormatStylePlaceholderCursor(self.connection) return cursor # Oracle doesn't support savepoint commits. Ignore them. def _savepoint_commit(self, sid): pass def _commit(self): if self.connection is not None: try: return self.connection.commit() except Database.IntegrityError, e: # In case cx_Oracle implements (now or in a future version) # raising this specific exception raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception # with the following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # We convert that particular case to our IntegrityError exception x = e.args[0] if hasattr(x, 'code') and hasattr(x, 'message') \ and x.code == 2091 and 'ORA-02291' in x.message: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] class OracleParam(object): """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): if hasattr(param, 'bind_parameter'): self.smart_str = param.bind_parameter(cursor) else: self.smart_str = convert_unicode(param, cursor.charset, strings_only) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif isinstance(param, basestring) and len(param) > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB else: self.input_size = None class VariableWrapper(object): """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instanciate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == 'var': self.__dict__[key] = value else: setattr(self.var, key, value) class InsertIdVar(object): """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ def bind_parameter(self, cursor): param = cursor.cursor.var(Database.NUMBER) cursor._insert_id_var = param return param class FormatStylePlaceholderCursor(object): """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". We also do automatic conversion between Unicode on the Python side and UTF-8 -- for talking to Oracle -- in here. """ charset = 'utf-8' def __init__(self, connection): self.cursor = connection.cursor() # Necessary to retrieve decimal values without rounding error. self.cursor.numbersAsStrings = True # Default arraysize of 1 is highly sub-optimal. self.cursor.arraysize = 100 def _format_params(self, params): return tuple([OracleParam(p, self, True) for p in params]) def _guess_input_sizes(self, params_list): sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size self.setinputsizes(*sizes) def _param_generator(self, params): return [p.smart_str for p in params] def execute(self, query, params=None): if params is None: params = [] else: params = self._format_params(params) args = [(':arg%d' % i) for i in range(len(params))] # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) self._guess_input_sizes([params]) try: return self.cursor.execute(query, self._param_generator(params)) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] def executemany(self, query, params=None): try: args = [(':arg%d' % i) for i in range(len(params[0]))] except (IndexError, TypeError): # No params given, nothing to do return None # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) formatted = [self._format_params(i) for i in params] self._guess_input_sizes(formatted) try: return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2] def fetchone(self): row = self.cursor.fetchone() if row is None: return row return _rowfactory(row, self.cursor) def fetchmany(self, size=None): if size is None: size = self.arraysize return tuple([_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size)]) def fetchall(self): return tuple([_rowfactory(r, self.cursor) for r in self.cursor.fetchall()]) def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return getattr(self.cursor, attr) def __iter__(self): return CursorIterator(self.cursor) class CursorIterator(object): """Cursor iterator wrapper that invokes our custom row factory.""" def __init__(self, cursor): self.cursor = cursor self.iter = iter(cursor) def __iter__(self): return self def next(self): return _rowfactory(self.iter.next(), self.cursor) def _rowfactory(row, cursor): # Cast numeric values as the appropriate Python type based upon the # cursor description, and convert strings to unicode. casted = [] for value, desc in zip(row, cursor.description): if value is not None and desc[1] is Database.NUMBER: precision, scale = desc[4:6] if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point # This will normally be an integer from a sequence, # but it could be a decimal value. if '.' in value: value = Decimal(value) else: value = int(value) else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. value = float(value) elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntField and DecimalField columns. if scale == 0: value = int(value) else: value = Decimal(value) elif '.' in value: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. value = Decimal(value) else: value = int(value) elif desc[1] in (Database.STRING, Database.FIXED_CHAR, Database.LONG_STRING): value = to_unicode(value) casted.append(value) return tuple(casted) def to_unicode(s): """ Convert strings to Unicode objects (and return all other data types unchanged). """ if isinstance(s, basestring): return force_unicode(s) return s def _get_sequence_reset_sql(): # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. return """ DECLARE table_value integer; seq_value integer; BEGIN SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = '%(sequence)s'; WHILE table_value > seq_value LOOP SELECT "%(sequence)s".nextval INTO seq_value FROM dual; END LOOP; END; /""" def get_sequence_name(table): name_length = DatabaseOperations().max_name_length() - 3 return '%s_SQ' % util.truncate_name(table, name_length).upper() def get_trigger_name(table): name_length = DatabaseOperations().max_name_length() - 3 return '%s_TR' % util.truncate_name(table, name_length).upper()
mit
mohclips/k5-ansible-modules
k5_server_console_output.py
1
8174
#!/usr/bin/python ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: k5_server_console_output short_description: Display the URL to the NoVNC Console version_added: "1.0" description: - returns the openstack console output. options: server_name: description: - Name of the server. required: true default: None console_length: description: - Number of lines to tail from the console output required: true default: None requirements: - "python >= 2.6" ''' EXAMPLES = ''' # Get server console output - k5_server_console_output: server_name: test01 length: 50 k5_auth: "{{ k5_auth_facts }}" ''' RETURN = ''' k5_server_console_output description: List of the output details. returned: On success when the server is found type: list sample: "Generation complete.", "", "Ubuntu 14.04.3 LTS nx-demo01-1a-jumpserver ttyS0", "", "nx-demo01-1a-jumpserver login: Cloud-init v. 0.7.5 running 'modules:final' at Fri, 06 Jan 2017 23:08:22 +0000. Up 31.10 seconds.", "ci-info: ++++++++++Authorized keys from /home/ubuntu/.ssh/authorized_keys for user ubuntu+++++++++++", "ci-info: +---------+-------------------------------------------------+---------+-------------------+", "ci-info: | Keytype | Fingerprint (md5) | Options | Comment |", "ci-info: +---------+-------------------------------------------------+---------+-------------------+", "ci-info: | ssh-rsa | ce:b7:7c:b9:48:98:d5:9a:1c:5e:8e:4e:56:81:ee:02 | - | Generated by Nova |", "ci-info: +---------+-------------------------------------------------+---------+-------------------+", "ec2: ", "ec2: #############################################################", "ec2: -----BEGIN SSH HOST KEY FINGERPRINTS-----", "ec2: 1024 39:46:9c:ef:af:91:bf:be:a4:a4:3e:a2:0c:16:34:ed root@nx-demo01-1a-jumpserver (DSA)", "ec2: 256 43:b4:59:1b:e8:ff:94:46:64:f8:76:15:37:ed:45:2c root@nx-demo01-1a-jumpserver (ECDSA)", "ec2: 2048 6f:2a:0e:bb:ab:fe:78:a7:01:bc:b6:4c:5c:ca:90:1a root@nx-demo01-1a-jumpserver (RSA)", "ec2: -----END SSH HOST KEY FINGERPRINTS-----", "ec2: #############################################################", "-----BEGIN SSH HOST KEY KEYS-----", "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPshjqQvI/QzWzuMBvg5H6lRQljJaBiOnsdrISbz8W/poNWKjloRbp6MT9XF7xU1GCJoJsp3KM/NjWI85tUa4Yc= root@nx-demo01-1a-jumpserver", "ssh-rsa AAAAB3NzaC1yc2EAAAA5AQABAAABAQDHA7Th3EZSOr6Kji2gaFDcgsO+jBETHGbJiPKPanvlYOrmsyKney0ugGcpt1cOC+OsthS968eLJQWjIKjyQHSW8/5NjissrNX6fNyE8UAKyfgcvR9jYa/nrNld7Y5cucZI6tkBRl4Z2wB5hLfEQQdCCt7uGv9s6gAMGOk5hSiXtcbLmGs/jH9KCYnNsVc6IehQ/Svf+7wJj7o721KAvlWLrP8DDJyfa6Oic481z2nMsCRxJf2MupAHzee1v9MrHdJPh191eV6d9Bsruit+64YK6KslsNSVlBb7zadDIYMme8L2YWKf0Y1VX5wRNbPyoVQzaMQCoIwxI9bsbJ7sn9h/ root@nx-demo01-1a-jumpserver", "-----END SSH HOST KEY KEYS-----", "Cloud-init v. 0.7.5 finished at Fri, 06 Jan 2017 23:08:22 +0000. Datasource DataSourceOpenStack [net,ver=2]. Up 31.28 seconds", "" ], "msg": "Get Output Successful" ''' import requests import os import json from ansible.module_utils.basic import * ############## Common debug ############### k5_debug = False k5_debug_out = [] def k5_debug_get(): """Return our debug list""" return k5_debug_out def k5_debug_clear(): """Clear our debug list""" k5_debug_out = [] def k5_debug_add(s): """Add string to debug list if env K5_DEBUG is defined""" if k5_debug: k5_debug_out.append(s) ############## router functions ############# def k5_get_endpoint(e,name): """Pull particular endpoint name from dict""" return e['endpoints'][name] def k5_get_server_facts(module, k5_facts): """Get server facts""" endpoint = k5_facts['endpoints']['compute'] auth_token = k5_facts['auth_token'] session = requests.Session() headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token } url = endpoint + '/servers/detail' k5_debug_add('endpoint: {0}'.format(endpoint)) k5_debug_add('REQ: {0}'.format(url)) k5_debug_add('headers: {0}'.format(headers)) try: response = session.request('GET', url, headers=headers) except requests.exceptions.RequestException as e: module.fail_json(msg=e) # we failed to get data if response.status_code not in (200,): module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out) if 'servers' in response.json(): return response.json() else: module.fail_json(msg="Missing servers in response to server details request") def k5_get_server_console_output(module): """Get console output""" global k5_debug k5_debug_clear() if 'K5_DEBUG' in os.environ: k5_debug = True if 'auth_spec' in module.params['k5_auth']: k5_facts = module.params['k5_auth'] else: module.fail_json(msg="k5_auth_facts not found, have you run k5_auth?") endpoint = k5_facts['endpoints']['compute'] auth_token = k5_facts['auth_token'] tenant_id = k5_facts['auth_spec']['os_project_id'] server_name = module.params['server_name'] console_length = module.params['console_length'] # we need the server_id not server_name, so grab it server_facts = k5_get_server_facts(module, k5_facts) server_id = '' for s in server_facts['servers']: if s['name'] == server_name: server_id = s['id'] break if server_id == '': if k5_debug: module.exit_json(changed=False, msg="Server " + server_name + " not found", debug=k5_debug_out) else: module.exit_json(changed=False, msg="Server " + server_name + " not found") k5_debug_add('auth_token: {0}'.format(auth_token)) k5_debug_add('server_name: {0}'.format(server_name)) k5_debug_add('server_id: {0}'.format(server_id)) session = requests.Session() headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token } url = endpoint + '/servers/' + server_id + '/action' query_json = { "os-getConsoleOutput": { "length": console_length } } k5_debug_add('endpoint: {0}'.format(endpoint)) k5_debug_add('REQ: {0}'.format(url)) k5_debug_add('headers: {0}'.format(headers)) k5_debug_add('json: {0}'.format(query_json)) try: response = session.request('POST', url, headers=headers, json=query_json) except requests.exceptions.RequestException as e: module.fail_json(msg=e) # we failed to make a change if response.status_code not in (200,): module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out) output = response.json()['output'].split('\n') # because ansible cannot diplay multiline strings if k5_debug: module.exit_json(changed=True, msg="Get Output Successful", k5_server_console_output=output, debug=k5_debug_out ) module.exit_json(changed=True, msg="Get Output Successful", k5_server_console_output=output ) ###################################################################################### def main(): module = AnsibleModule( argument_spec=dict( server_name = dict(required=True, default=None, type='str'), console_length = dict(required=True, default=None, type='int'), k5_auth = dict(required=True, default=None, type='dict') ) ) k5_get_server_console_output(module) ###################################################################################### if __name__ == '__main__': main()
gpl-3.0
ferranti/watchdog
vendor/BeautifulSoup.py
112
77863
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html Here, have some legalese: Copyright (c) 2004-2008, Leonard Richardson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the the Beautiful Soup Consortium and All Night Kosher Bakery nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.7a" __copyright__ = "Copyright (c) 2004-2008 Leonard Richardson" __license__ = "New-style BSD" from sgmllib import SGMLParser, SGMLParseError import codecs import markupbase import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} try: set except NameError: from sets import Set as set #These hacks make Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match DEFAULT_OUTPUT_ENCODING = "utf-8" # First, the classes that represent markup elements. class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.contents.index(self) if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: # We're replacing this element with one of its siblings. index = self.parent.contents.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None return self def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if (isinstance(newChild, basestring) or isinstance(newChild, unicode)) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent != None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent == self: index = self.find(newChild) if index and index < position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def append(self, tag): """Appends the given tag to the contents of this tag.""" self.insert(len(self.contents), tag) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator, **kwargs) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: # Build a SoupStrainer strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __new__(cls, value): """Create a new NavigableString. When unpickling a NavigableString, this method is called with the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be passed in to the superclass's __new__ or the superclass won't know how to handle non-ASCII characters. """ if isinstance(value, unicode): return unicode.__new__(cls, value) return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) def __getnewargs__(self): return (NavigableString.__str__(self),) def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return str(self).decode(DEFAULT_OUTPUT_ENCODING) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" def _invert(h): "Cheap function to invert a hash." i = {} for k,v in h.items(): i[v] = k return i XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", "quot" : '"', "amp" : "&", "lt" : "<", "gt" : ">" } XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) def _convertEntities(self, match): """Used in a call to re.sub to replace HTML, XML, and numeric entities with the appropriate Unicode characters. If HTML entities are being converted, any unrecognized entities are escaped.""" x = match.group(1) if self.convertHTMLEntities and x in name2codepoint: return unichr(name2codepoint[x]) elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: if self.convertXMLEntities: return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] else: return u'&%s;' % x elif len(x) > 0 and x[0] == '#': # Handle numeric entities if len(x) > 1 and x[1] == 'x': return unichr(int(x[2:], 16)) else: return unichr(int(x[1:])) elif self.escapeUnrecognizedEntities: return u'&amp;%s;' % x else: return u'&%s;' % x def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False self.convertHTMLEntities = parser.convertHTMLEntities self.convertXMLEntities = parser.convertXMLEntities self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities # Convert any HTML, XML, or numeric entities in the attribute values. convert = lambda(k, val): (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", self._convertEntities, val)) self.attrs = map(convert, self.attrs) def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + ")") def _sub_entity(self, x): """Used with a regular expression to substitute the appropriate XML entity for an XML special character.""" return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isString(val): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" if "'" in val: # TODO: replace with apos when # appropriate. val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def decompose(self): """Recursively destroys the contents of this tree.""" contents = [i for i in self.contents] for i in contents: if isinstance(i, Tag): i.decompose() else: i.extract() self.extract() def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): for i in range(0, len(self.contents)): yield self.contents[i] raise StopIteration def recursiveChildGenerator(self): stack = [(self, 0)] while stack: tag, start = stack.pop() if isinstance(tag, Tag): for i in range(start, len(tag.contents)): a = tag.contents[i] yield a if isinstance(a, Tag) and tag.contents: if i < len(tag.contents) - 1: stack.append((tag, i+1)) stack.append((a, 0)) break raise StopIteration # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isString(attrs): kwargs['class'] = attrs attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if isList(markup) and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isString(markup): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst == True and type(matchAgainst) == types.BooleanType: result = markup != None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isString(markup): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif isList(matchAgainst): result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isString(markup): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return hasattr(l, '__iter__') \ or (type(l) in (types.ListType, types.TupleType)) def isString(s): """Convenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.""" try: return isinstance(s, unicode) or isinstance(s, basestring) except NameError: return isinstance(s, str) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} PRESERVE_WHITESPACE_TAGS = [] MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" XHTML_ENTITIES = "xhtml" # TODO: This only exists for backwards-compatibility ALL_ENTITIES = XHTML_ENTITIES # Used when determining whether a text node is all whitespace and # can be replaced with a single space. A text node that contains # fancy Unicode spaces (usually non-breaking) should be left # alone. STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None, isHTML=False): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities # Set the rules for how we'll deal with the entities we # encounter if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None if convertEntities == self.HTML_ENTITIES: self.convertXMLEntities = False self.convertHTMLEntities = True self.escapeUnrecognizedEntities = True elif convertEntities == self.XHTML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = True self.escapeUnrecognizedEntities = False elif convertEntities == self.XML_ENTITIES: self.convertXMLEntities = True self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False else: self.convertXMLEntities = False self.convertHTMLEntities = False self.escapeUnrecognizedEntities = False self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed(isHTML=isHTML) except StopParsing: pass self.markup = None # The markup can now be GCed def convert_charref(self, name): """This method fixes a bug in Python's SGMLParser.""" try: n = int(name) except ValueError: return if not 0 <= n <= 127 : # ASCII ends at 127, not 255 return return self.convert_codepoint(n) def _feed(self, inDocumentEncoding=None, isHTML=False): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding self.declaredHTMLEncoding = dammit.declaredHTMLEncoding if markup: if self.markupMassage: if not isList(self.markupMassage): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) # TODO: We get rid of markupMassage so that the # soup object can be deepcopied later on. Some # Python installations can't copy regexes. If anyone # was relying on the existence of markupMassage, this # might cause problems. del(self.markupMassage) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ or methodName.find('do_') == 0: return SGMLParser.__getattr__(self, methodName) elif methodName.find('__') != 0: return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableString): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = u''.join(self.currentData) if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and not set([tag.name for tag in self.tagStack]).intersection( self.PRESERVE_WHITESPACE_TAGS)): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurance. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML and/or XML entity references to the corresponding Unicode characters.""" data = None if self.convertHTMLEntities: try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data and self.convertXMLEntities: data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) if not data and self.convertHTMLEntities and \ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): # TODO: We've got a problem here. We're told this is # an entity reference, but it's not an XML entity # reference or an HTML entity reference. Nonetheless, # the logical thing to do is to pass it through as an # unrecognized entity reference. # # Except: when the input is "&carol;" this function # will be called with input "carol". When the input is # "AT&T", this function will be called with input # "T". We have no way of knowing whether a semicolon # was present originally, so we don't know whether # this is an unknown entity or just a misplaced # ampersand. # # The more common case is a misplaced ampersand, so I # escape the ampersand and omit the trailing semicolon. data = "&amp;%s" % ref if not data: # This case is different from the one above, because we # haven't already gone through a supposedly comprehensive # mapping of entities to Unicode characters. We might not # have gone through any mapping at all. So the chances are # very high that this is a real entity, and not a # misplaced ampersand. data = "&%s;" % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurance of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurance of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES kwargs['isHTML'] = True BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base']) PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) QUOTE_TAGS = {'script' : None, 'textarea' : None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisiness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except ImportError: chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except ImportError: pass try: import iconv_codec except ImportError: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml', isHTML=False): self.declaredHTMLEncoding = None self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup, isHTML) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if type(sub) == types.TupleType: if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data, isHTML=False): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass except: xml_encoding_match = None xml_encoding_match = re.compile( '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) if not xml_encoding_match and isHTML: regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) xml_encoding_match = regexp.search(xml_data) if xml_encoding_match is not None: xml_encoding = xml_encoding_match.groups()[0].lower() if isHTML: self.declaredHTMLEncoding = xml_encoding if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except (LookupError, ValueError): pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print soup.prettify()
agpl-3.0
google/makerspace-auth
software/authbox/tests/test_config.py
2
3709
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for authbox.config""" import unittest import authbox.config class ConfigTest(unittest.TestCase): def test_parse_time(self): cfg = authbox.config.Config # ints self.assertEqual(61, cfg.parse_time("61")) self.assertEqual(61, cfg.parse_time("61s")) self.assertEqual(90, cfg.parse_time("1m30s")) self.assertEqual(3660, cfg.parse_time("61m")) self.assertEqual(7200, cfg.parse_time("2h")) self.assertEqual(86400, cfg.parse_time("1d")) self.assertEqual(1.5, cfg.parse_time("1.5s")) self.assertEqual(4320, cfg.parse_time("1.2h")) self.assertRaises(Exception, cfg.parse_time, "") self.assertRaises(Exception, cfg.parse_time, "30x") self.assertEqual(5, cfg.parse_time(5)) def test_get_int_seconds(self): c = authbox.config.Config(None) c._config.add_section("section") c._config.set("section", "a", "15m") self.assertEqual(15 * 60, c.get_int_seconds("section", "a", 999)) def test_get_int_seconds_absent(self): c = authbox.config.Config(None) self.assertEqual(999, c.get_int_seconds("section", "a", 999)) def test_get(self): c = authbox.config.Config(None) c._config.add_section("section") c._config.set("section", "a", "1{b}2") c._config.set("section", "b", "x") self.assertEqual("1x2", c.get("section", "a")) class OneSectionConfig(object): def __init__(self, contents): self.contents = contents def get(self, section_name, key): if section_name == "section": return self.contents[key] class RecursiveConfigParamLookupTest(unittest.TestCase): def test_simple(self): c = OneSectionConfig({"a": "b"}) self.assertEqual( "abc", authbox.config.recursive_config_lookup("abc", c, "section") ) self.assertEqual( "b", authbox.config.recursive_config_lookup("{a}", c, "section") ) self.assertRaises( KeyError, authbox.config.recursive_config_lookup, "{x}", c, "section" ) def test_simple_left_alone(self): c = OneSectionConfig({"a": "b"}) self.assertEqual( "{0} b", authbox.config.recursive_config_lookup("{0} {a}", c, "section") ) self.assertEqual( "{} b", authbox.config.recursive_config_lookup("{} {a}", c, "section") ) def test_recursive(self): c = OneSectionConfig( {"a": "{b}{b}", "b": "{c}2", "c": "d", "broken": "{b2}", "b2": "{missing}"} ) self.assertEqual( "d2d2", authbox.config.recursive_config_lookup("{a}", c, "section") ) self.assertRaises( KeyError, authbox.config.recursive_config_lookup, "{broken}", c, "section" ) def test_recursive_fail(self): c = OneSectionConfig({"a": "{b}", "b": "{a}"}) self.assertRaises( authbox.config.CycleError, authbox.config.recursive_config_lookup, "{a}", c, "section", )
apache-2.0
polypapps/RandomRango_Web
vendor/doctrine/orm/docs/en/conf.py
2448
6497
# -*- coding: utf-8 -*- # # Doctrine 2 ORM documentation build configuration file, created by # sphinx-quickstart on Fri Dec 3 18:10:24 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('_exts')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['configurationblock'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Doctrine 2 ORM' copyright = u'2010-12, Doctrine Project Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2' # The full version, including alpha/beta/rc tags. release = '2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'doctrine' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Doctrine2ORMdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation', u'Doctrine Project Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True primary_domain = "dcorm" def linkcode_resolve(domain, info): if domain == 'dcorm': return 'http://' return None
gpl-2.0
metsarono/dotfiles
linux/.config/sublime-text-2/Packages/CSSLint/CSSLint.py
2
11828
import os import re import sublime import sublime_plugin import subprocess import zipfile try: # ST2 from version_file_checker import check_file_match except ImportError: # ST3 from .version_file_checker import check_file_match RESULT_VIEW_NAME = 'csslint_result_view' RESULT_REGION_NAME = 'csslint_highlighted_region' SETTINGS_FILE = "CSSLint.sublime-settings" PLUGIN_PATH = os.path.abspath(os.path.dirname(__file__)) PACKAGE_PATH = os.path.abspath(os.path.join(sublime.packages_path(), 'CSSLint')) SCRIPT_PATH = os.path.join(PACKAGE_PATH, 'scripts') MANIFEST = '' def plugin_loaded(): """ ST3: If run from a zip file, this method verifies the checksums of the CSSLint and Rhino scripts and extracts them to the packages folder if necessary. """ global PLUGIN_PATH global PACKAGE_PATH global SCRIPT_PATH PLUGIN_PATH = os.path.abspath(os.path.dirname(__file__)) PACKAGE_PATH = os.path.abspath(os.path.join(sublime.packages_path(), 'CSSLint')) SCRIPT_PATH = os.path.join(PACKAGE_PATH, 'scripts') manifest = [ { 'file_path': 'scripts/csslint/csslint-rhino.js', 'checksum': '71d63e12a904771978ddf06f22933e1f3e3812155545b844769703926f9dc027' }, { 'file_path': 'scripts/rhino/js.jar', 'checksum': '7ade44513268f7cfe08579f8ef69e4ea663b8613d3923491000784c650c67c8b' } ] file_matches = check_file_match(manifest, path_prefix=PACKAGE_PATH) if '.sublime-package' in PLUGIN_PATH: print('CSSLint is a .sublime-package; Verifying required files.') package = zipfile.ZipFile(PLUGIN_PATH, 'r') for match in file_matches: if match['isMatch'] is False: print('CSSLint: extracting {0} into {1}'.format(match['file_path'], PACKAGE_PATH)) package.extract(match['file_path'], PACKAGE_PATH) class CsslintCommand(sublime_plugin.TextCommand): def run(self, edit, paths=False, quiet=False): settings = sublime.load_settings(SETTINGS_FILE) self.edit = edit self.file_path = None file_paths = None self.file_paths = None cssFiles = [] self.use_console = True def add_css_to_list(path): if path.endswith('.css'): cssFiles.append('"' + path + '"') # Make new document for lint results - we're linting multiple files. if paths is not False: self.use_console = False # Walk through any directories and make a list of css files for path in paths: if os.path.isdir(path) is True: for path, subdirs, files in os.walk(path): for name in files: add_css_to_list(os.path.join(path, name)) else: add_css_to_list(path) # Generate the command line paths argument if len(cssFiles) < 1: sublime.error_message("No CSS files selected.") return else: self.file_paths = cssFiles file_paths = ' '.join(cssFiles) # set up new file for lint results self.current_document = sublime.active_window().new_file() self.current_document.insert(self.edit, self.current_document.size(), 'CSSLint Results\n\n') # Invoke console - we're linting a single file. else: if self.view.window().active_view().file_name() is None: if quiet is False: sublime.error_message("CSSLint: Please save your file before linting.") return if self.view.window().active_view().file_name().endswith('css') is not True: if quiet is False: sublime.error_message("CSSLint: This is not a css file.") return self.file_path = '"' + self.view.window().active_view().file_name() + '"' # init_tests_panel(self) show_tests_panel(self) # Begin linting. file_name = os.path.basename(self.file_path) if self.file_path else ', '.join(self.file_paths) self.buffered_data = '' self.file_name = file_name path_argument = file_paths if file_paths else self.file_path self.is_running = True rhino_path = settings.get('rhino_path') if settings.has('rhino_path') and settings.get('rhino_path') != False else '"{0}"'.format(os.path.join(SCRIPT_PATH, 'rhino', 'js.jar')) csslint_rhino_js = settings.get('csslint_rhino_js') if settings.has('csslint_rhino_js') and settings.get('csslint_rhino_js') != False else '"{0}"'.format(os.path.join(SCRIPT_PATH, 'csslint', 'csslint-rhino.js')) errors = ' --errors=' + ','.join(settings.get('errors')) if isinstance(settings.get('errors'), list) and len(settings.get('errors')) > 0 else '' warnings = ' --warnings=' + ','.join(settings.get('warnings')) if isinstance(settings.get('warnings'), list) and len(settings.get('warnings')) > 0 else '' ignores = ' --ignore=' + ','.join(settings.get('ignore')) if isinstance(settings.get('ignore'), list) and len(settings.get('ignore')) > 0 else '' options = '--format=compact' + errors + warnings + ignores cmd = 'java -jar ' + rhino_path + ' ' + csslint_rhino_js + ' ' + options + ' ' + path_argument self.run_linter(cmd) def update_status(self, msg, progress): sublime.status_message(msg + " " + progress) def process_data(self, data, end=False): # truncate file paths but save them in an array. # add error number to each line - needed for finding full path. def munge_errors(data): data_all_lines = data.split('\n') data_nonempty_lines = [] self.errors = [] # remove empty lines for line in data_all_lines: if len(line) > 0: data_nonempty_lines.append(line) # truncate path for display, save full path in array. for line in data_nonempty_lines: full_path_string = line[0:line.find('css:') + 3] path_to_remove = full_path_string + ': ' cleaned_error_item = line.replace(path_to_remove, '') found_error = False def add_new_error(): new_error_stylesheet = { 'full_path': full_path_string, 'items': [cleaned_error_item] } self.errors.append(new_error_stylesheet) for error in self.errors: if error['full_path'] == full_path_string: found_error = True error['items'].append(cleaned_error_item) break if found_error is False: add_new_error() # Concatenate buffered data but prevent duplicates. self.buffered_data = self.buffered_data + data.decode("utf-8") data = self.buffered_data.replace('\r\n', '\n').replace('\r', '\n') if end is False: rsep_pos = data.rfind('\n') if rsep_pos == -1: # not found full line. return self.buffered_data = data[rsep_pos+1:] data = data[:rsep_pos+1] munge_errors(data) # Push to display. if self.use_console is True: self.output_to_console() else: self.output_to_document() def output_to_console(self): self.output_view.set_read_only(False) for error_section in self.errors: self.output_view.insert(self.edit, self.output_view.size(), '\n'.join(error_section['items'])) self.output_view.set_read_only(True) CsslintEventListener.disabled = False def output_to_document(self): for error_section in self.errors: error_output = error_section['full_path'] + '\n\t' + '\n\t'.join(error_section['items']) + '\n\n' self.current_document.insert(self.edit, self.current_document.size(), error_output) def run_linter(self, cmd): self.proc = subprocess.Popen(cmd, env={"PATH": os.environ['PATH']}, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) result = self.proc.communicate()[0] if result is not None: sublime.set_timeout(self.process_data(result), 0) class CsslintSelectionCommand(sublime_plugin.WindowCommand): def run(self, paths=[]): self.view.window().run_command('csslint', {"paths": paths}) class CsslintEventListener(sublime_plugin.EventListener): disabled = False def __init__(self): self.previous_region = None self.file_view = None def on_post_save(self, view): settings = sublime.load_settings(SETTINGS_FILE) if settings.get('run_on_save') is True: view.window().run_command("csslint", {'quiet': True}) # for some reason on_selection_modified_async does not fire any events, # but this one does. def on_selection_modified(self, view): if CsslintEventListener.disabled: return if view.name() != RESULT_VIEW_NAME: return region = view.line(view.sel()[0]) # make sure call once. if self.previous_region == region: return self.previous_region = region # extract line from csslint result. text = view.substr(region) if len(text) < 1: return line = re.search('(?<=line\s)[0-9]+', text).group(0) # hightlight view line. view.add_regions(RESULT_VIEW_NAME, [region], "comment") # highlight the selected line in the active view. file_view = sublime.active_window().active_view() # if self.file_view is None else self.file_view file_view.run_command("goto_line", {"line": line}) file_region = file_view.line(file_view.sel()[0]) # highlight file_view line region_settings = sublime.DRAW_NO_FILL if hasattr(sublime, 'DRAW_NOFILL') else sublime.DRAW_OUTLINED file_view.add_regions(RESULT_REGION_NAME, [file_region], "string", "", region_settings) if hasattr(self, 'file_view') is True: self.file_view = file_view def on_deactivated(self, view): if view.name() == RESULT_VIEW_NAME: if hasattr(self, 'file_view'): self.file_view.erase_regions(RESULT_REGION_NAME) def show_tests_panel(self): """Initializes (if not already initialized) and shows the results output panel.""" if not hasattr(self, 'output_view'): try: # ST3 self.output_view = self.view.window().create_output_panel(RESULT_VIEW_NAME) except AttributeError: # ST2 self.output_view = self.view.window().get_output_panel(RESULT_VIEW_NAME) self.output_view.set_name(RESULT_VIEW_NAME) # self.output_view.settings().set("file_path", self.file_path) clear_test_view(self) self.view.window().run_command("show_panel", {"panel": "output." + RESULT_VIEW_NAME}) def clear_test_view(self): self.output_view.set_read_only(False) self.output_view.erase(self.edit, sublime.Region(0, self.output_view.size())) self.output_view.set_read_only(True)
gpl-3.0
wroersma/volatility
volatility/plugins/mac/arp.py
58
1398
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: Andrew Case @license: GNU General Public License 2.0 @contact: atcuno@gmail.com @organization: """ import volatility.obj as obj import volatility.plugins.mac.common as common import volatility.plugins.mac.route as route class mac_arp(route.mac_route): """ Prints the arp table """ def calculate(self): common.set_plugin_members(self) arp_addr = self.addr_space.profile.get_symbol("_llinfo_arp") ptr = obj.Object("Pointer", offset = arp_addr, vm = self.addr_space) ent = ptr.dereference_as("llinfo_arp") while ent: yield ent.la_rt ent = ent.la_le.le_next
gpl-2.0
kaku289/paparazzi
sw/tools/airframe_editor/paparazzi.py
72
3597
#!/usr/bin/env python from __future__ import print_function import glob from collections import namedtuple from os import path, getenv #from subprocess import call import commands import lxml.etree as ET # if PAPARAZZI_HOME not set, then assume the tree containing this # file is a reasonable substitute home_dir = getenv("PAPARAZZI_HOME", path.normpath(path.join( path.dirname(path.abspath(__file__)), '../../../'))) # Directories firmwares_dir = path.join(home_dir, "conf/firmwares/") modules_dir = path.join(home_dir, "conf/modules/") airframes_dir = path.join(home_dir, "conf/airframes/") boards_dir = path.join(home_dir, "conf/boards/") # Structures PprzModule = namedtuple("PprzModule", "description defines configures") # List Of Stuff def get_list_of_files(directory, extension): mylist = glob.glob(path.join(directory, "*" + extension)) mylist.sort() ret = [] for it in mylist: ret.append( it.replace(directory, "").replace(extension, "")) return ret def get_list_of_modules(): return get_list_of_files( modules_dir, ".xml") def get_list_of_firmwares(): return get_list_of_files( firmwares_dir, ".makefile") def get_list_of_boards(): return get_list_of_files( boards_dir, ".makefile") def get_list_of_subsystems(firmware): subsys_dir = path.join( firmwares_dir, "subsystems/" + firmware + "/") # \todo how about shared #subsys_dir = path.join( firmwares_dir, "subsystems/shared/" ) return get_list_of_files(subsys_dir, ".makefile") def get_list_of_servo_drivers(): # \todo where do we know this? return ["Ppm", "Asctec", "Scilab"] def get_module_information(module_name): str_desc = "" lst_def = [] lst_conf = [] try: xml = ET.parse(path.join(modules_dir, module_name + ".xml")) root = xml.getroot().find("doc") str_desc = root.find("description").text for block in root.iter("define"): lst_def.append([block.get("name"), block.get("value"), block.get("unit"), block.get("description")]) for block in root.iter("configure"): lst_conf.append([block.get("name"), block.get("value"), block.get("unit"), block.get("description")]) except (IOError, ET.XMLSyntaxError) as e: print(e.__str__()) return PprzModule(description=str_desc, defines=lst_def, configures=lst_conf) def search(string): #return call(["grep", "-r", string , home_dir + "/sw/airborne/"]) #return system("grep -r " + string + " " + home_dir + "/sw/airborne/") cmd = "grep -r " + string + " " + home_dir + "/sw/airborne/" status, output = commands.getstatusoutput(cmd) return output.replace(home_dir + "/sw/airborne/", "") if __name__ == '__main__': print("\nPAPARAZZI\n=========\n\nContent listing of current branch\n") print("\nBOARDS\n------\n") boards = get_list_of_boards() for b in boards: print(" - ```" + b + "```" ) print("\nFIRMWARES - SUBSYSTEMS\n---------\n") firmwares = get_list_of_firmwares() firmwares.append("shared") for f in firmwares: print(" - " + f) subsystems = get_list_of_subsystems(f) for s in subsystems: print(" - ```", s, "```") print("\nMODULES\n-------\n") modules = get_list_of_modules() for m in modules: info = get_module_information(m) d = info.description if ((d is None) or (len(d) == 0)): d = " " print(" - ```" + m + "``` " + d.split('\n', 1)[0]) # for mod in get_list_of_modules(): # print(mod, " ---> ", get_module_information(mod))
gpl-2.0
nortikin/sverchok
node_scripts/SNLite_templates/utils/splitting_pols.py
2
1548
""" in vers_in v d=[[]] n=0 in pols_in s d=[[]] n=0 in value s d=0.5 n=2 in direction s d=0 n=2 in shift s d=0 n=2 in ignoresecond s d=0 n=2 out vers_out v out pols_out s """ from mathutils import Vector as V pols_out, vers_out = [], [] sh = True igs = ignoresecond for ver, pol in zip(vers_in,pols_in): nex = len(ver) np,nv = [],ver for p in pol: igs = not igs if ignoresecond and igs: np.append(p) continue nex += 2 sh = not sh if not direction: if shift and not sh: np.append([p[0],nex-2,nex-1,p[3]]) else: np.append([p[3],nex-1,nex-2,p[0]]) if shift and sh: np.append([nex-1,p[2],p[1],nex-2]) else: np.append([nex-2,p[1],p[2],nex-1]) nv.append(list((V(ver[p[1]])+value*(V(ver[p[0]])-V(ver[p[1]]))).to_tuple())) nv.append(list((V(ver[p[2]])+value*(V(ver[p[3]])-V(ver[p[2]]))).to_tuple())) else: if shift and not sh: np.append([nex-1,p[3],p[2],nex-2]) else: np.append([nex-2,p[2],p[3],nex-1]) if shift and sh: np.append([nex-2,p[1],p[0],nex-1]) else: np.append([nex-1,p[0],p[1],nex-2]) nv.append(list((V(ver[p[2]])+value*(V(ver[p[1]])-V(ver[p[2]]))).to_tuple())) nv.append(list((V(ver[p[3]])+value*(V(ver[p[0]])-V(ver[p[3]]))).to_tuple())) pols_out.append(np) vers_out.append(nv)
gpl-3.0
magreiner/base
lib/python/openstack_get_cluster_nodes.py
1
2900
import json import subprocess from optparse import OptionParser def parse_arguments(): parser = OptionParser( prog="openstack_get_cluster_nodes", version="0", usage="%prog [options]") parser.add_option( "-k", "--key_file", type="string", default="swarm_key_1.pem", help="Name of ssh key file (default: %default)") parser.add_option( "-m", "--manager", type="string", default="manager", help="Name of the manager node (default: %default)") parser.add_option( "-n", "--name", type="string", default="swarm-1", help="Name of the cluster stack (default: %default)") parser.add_option( "-w", "--worker", type="string", default="worker", help="Name of the worker node (default: %default)") (opts, args) = parser.parse_args() if len(args) != 0: parser.print_help() sys.exit(1) return opts # get the commandline arguments opts = parse_arguments() #manager_ips = subprocess.run("openstack stack output show -c output_value -f json drupal-1 manager_public_ip", shell=True, stdout=subprocess.PIPE) #manager_ips = json.loads(str(manager_ips.stdout, encoding='utf-8')) #manager_ips = json.loads(manager_ips['output_value']) #worker_ips = subprocess.run("openstack stack output show -c output_value -f json drupal-1 worker_public_ip", shell=True, stdout=subprocess.PIPE) #worker_ips = json.loads(str(worker_ips.stdout, encoding='utf-8')) #worker_ips = json.loads(worker_ips['output_value']) m_ips = subprocess.run("openstack server list --name {} -c Networks -f json".format(opts.manager), shell=True, stdout=subprocess.PIPE) m_ips = json.loads(str(m_ips.stdout, encoding='utf-8')) w_ips = subprocess.run("openstack server list --name {} -c Networks -f json".format(opts.worker), shell=True, stdout=subprocess.PIPE) w_ips = json.loads(str(w_ips.stdout, encoding='utf-8')) manager_ips = [] for ip in m_ips: manager_ips.append(ip['Networks'].split('=')[1]) worker_ips = [] for ip in w_ips: worker_ips.append(ip['Networks'].split('=')[1]) for i, ip in enumerate(manager_ips, start=1): print("manager-{} ansible_ssh_host={} ansible_user=ubuntu ansible_ssh_private_key_file={}".format(i, ip, opts.key_file)) for i, ip in enumerate(worker_ips, start=1): print("worker-{} ansible_ssh_host={} ansible_user=ubuntu ansible_ssh_private_key_file={}".format(i, ip, opts.key_file)) print("localhost ansible_connection=local") print() print("[docker_engine]") for i, ip in enumerate(manager_ips, start=1): print("manager-{}".format(i)) for i, ip in enumerate(worker_ips, start=1): print("worker-{}".format(i)) print() print("[docker_swarm_manager]") for i, ip in enumerate(manager_ips, start=1): print("manager-{}".format(i)) print() print("[docker_swarm_worker]") for i, ip in enumerate(worker_ips, start=1): print("worker-{}".format(i))
bsd-2-clause
samfpetersen/gnuradio
docs/exploring-gnuradio/fm_demod.py
53
3367
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr from gnuradio import blocks from gnuradio import filter from gnuradio import analog from gnuradio import audio from gnuradio.filter import firdes import sys, math # Create a top_block class build_graph(gr.top_block): def __init__(self): gr.top_block.__init__(self) input_rate = 200e3 # rate of a broadcast FM station audio_rate = 44.1e3 # Rate we send the signal to the speaker # resample from the output of the demodulator to the rate of # the audio sink. resamp_rate = audio_rate / input_rate # use a file as a dummy source. Replace this with a real radio # receiver to capture signals over-the-air. src = blocks.file_source(gr.sizeof_gr_complex, "dummy.dat", True) # Set the demodulator using the same deviation as the receiver. max_dev = 75e3 fm_demod_gain = input_rate/(2*math.pi*max_dev/8.0) fm_demod = analog.quadrature_demod_cf(fm_demod_gain) # Create a filter for the resampler and filter the audio # signal to 15 kHz. The nfilts is the number of filters in the # arbitrary resampler. It logically operates at a rate of # nfilts*input_rate, so we make those adjustments when # building the filter. volume = 0.20 nfilts = 32 resamp_taps = firdes.low_pass_2(volume*nfilts, # gain nfilts*input_rate, # sampling rate 15e3, # low pass cutoff freq 1e3, # width of trans. band 60, # stop band attenuaton firdes.WIN_KAISER) # Build the resampler and filter resamp_filter = filter.pfb_arb_resampler_fff(resamp_rate, resamp_taps, nfilts) # sound card as final sink You may have to add a specific # device name as a second argument here, something like # "pulse" if using pulse audio or "plughw:0,0". audio_sink = audio.sink(int(audio_rate)) # now wire it all together self.connect(src, fm_demod) self.connect(fm_demod, resamp_filter) self.connect(resamp_filter, (audio_sink,0)) def main(args): tb = build_graph() tb.start() # fork thread and return raw_input('Press Enter to quit: ') tb.stop() if __name__ == '__main__': main(sys.argv[1:])
gpl-3.0
hyperized/ansible
contrib/inventory/lxd.py
37
3723
#!/usr/bin/env python # (c) 2013, Michael Scherer <misc@zarb.org> # (c) 2014, Hiroaki Nakamura <hnakamur@gmail.com> # (c) 2016, Andew Clarke <andrew@oscailte.org> # # This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible, # and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py # # NOTE, this file has some obvious limitations, improvements welcome # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. import os from subprocess import Popen, PIPE import distutils.spawn import sys import json from ansible.module_utils.six.moves import configparser # Set up defaults resource = 'local:' group = 'lxd' connection = 'lxd' hosts = {} result = {} # Read the settings from the lxd.ini file config = configparser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini') if config.has_option('lxd', 'resource'): resource = config.get('lxd', 'resource') if config.has_option('lxd', 'group'): group = config.get('lxd', 'group') if config.has_option('lxd', 'connection'): connection = config.get('lxd', 'connection') # Ensure executable exists if distutils.spawn.find_executable('lxc'): # Set up containers result and hosts array result[group] = {} result[group]['hosts'] = [] # Run the command and load json result pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True) lxdjson = json.load(pipe.stdout) # Iterate the json lxd output for item in lxdjson: # Check state and network if 'state' in item and item['state'] is not None and 'network' in item['state']: network = item['state']['network'] # Check for eth0 and addresses if 'eth0' in network and 'addresses' in network['eth0']: addresses = network['eth0']['addresses'] # Iterate addresses for address in addresses: # Only return inet family addresses if 'family' in address and address['family'] == 'inet': if 'address' in address: ip = address['address'] name = item['name'] # Add the host to the results and the host array result[group]['hosts'].append(name) hosts[name] = ip # Set the other containers result values result[group]['vars'] = {} result[group]['vars']['ansible_connection'] = connection # Process arguments if len(sys.argv) == 2 and sys.argv[1] == '--list': print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': if sys.argv[2] == 'localhost': print(json.dumps({'ansible_connection': 'local'})) else: if connection == 'lxd': print(json.dumps({'ansible_connection': connection})) else: print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]})) else: print("Need an argument, either --list or --host <host>")
gpl-3.0
vrsys/avangong
avango-utils/python/_converter.py
1
7630
# -*- Mode:Python -*- ########################################################################## # # # This file is part of AVANGO. # # # # Copyright 1997 - 2010 Fraunhofer-Gesellschaft zur Foerderung der # # angewandten Forschung (FhG), Munich, Germany. # # # # AVANGO is free software: you can redistribute it and/or modify # # it under the terms of the GNU Lesser General Public License as # # published by the Free Software Foundation, version 3. # # # # AVANGO is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. # # # ########################################################################## import avango import avango.script from avango.script import field_has_changed import avango.osg class FloatXBase(avango.script.Script): "Base for sequential operations on floats" BaseFieldName = avango.SFString() NumFieldsOut = avango.SFInt() Output = avango.SFFloat() def __init__(self): self.super(FloatXBase).__init__() self.Name.value = "FloatXBase" self.Output.value = 0 self.BaseFieldName.value = "Input" self.NumFieldsOut.value = 0 self._actual_id = 0 def add_and_connect_float_field(self,field): field_name = self.BaseFieldName.value + str(self._actual_id) if self.has_field(field_name): return #create and add the new field self.add_and_init_field(avango.SFFloat(), field_name, 0) #connect the field with the given field getattr(self, field_name).connect_from(field) self._actual_id += 1 self.NumFieldsOut.value = self._actual_id def evaluate(self): self.on_calculate() def on_calculate(self): pass class FloatXSum(FloatXBase): def __init__(self): self.super(FloatXSum).__init__() self.Name.value = "FloatXSum" def on_calculate(self): sum = 0 for field_id in range(0,self._actual_id): field_name = self.BaseFieldName.value + str(field_id) field = self.get_field(field_name) if not field: continue sum += field.value self.Output.value = sum class FloatXMin(FloatXBase): def __init__(self): self.super(FloatXMin).__init__() self.Name.value = "FloatXMin" def on_calculate(self): min = 1e+100 # TODO: Find a way to get the real maximum for field_id in range(0,self._actual_id): field_name = self.BaseFieldName.value + str(field_id) field = self.get_field(field_name) if not field: continue if field.value < min: min = field.value self.Output.value = min class Float2Add(avango.script.Script): "Adds two float values" Value0 = avango.SFFloat() Value1 = avango.SFFloat() Output = avango.SFFloat() def __init__(self): self.super(Float2Add).__init__() self.Name.value = "Float2Add" def evaluate(self): self.Output.value = self.Value0.value + self.Value1.value class Float4AddVec2Converter(avango.script.Script): "Converts four Floats into a Vec2, where vec.x = Value00 + Value01 and vec.y = Value10 + Value11" Value00 = avango.SFFloat() Value01 = avango.SFFloat() Value10 = avango.SFFloat() Value11 = avango.SFFloat() Output = avango.osg.SFVec2() def __init__(self): self.super(Float4AddVec2Converter).__init__() self.Name.value = "Float4AddVec2Converter" def evaluate(self): self.Output.value = avango.osg.Vec2(self.Value00.value+self.Value01.value, self.Value10.value+self.Value11.value) class SFNode2MFContainerConverter(avango.script.Script): "Converts a SFNode to a MFNode" Input = avango.osg.SFNode() Output = avango.MFContainer() def __init__(self): self.super(SFNode2MFContainerConverter).__init__() self.Name.value = "SFNode2MFContainerConverter" def evaluate(self): self.Output.value = [ self.Input.value ] class Float2Vec2Converter(avango.script.Script): "Converts two Floats into on Vec2" Value0 = avango.SFFloat() Value1 = avango.SFFloat() Output = avango.osg.SFVec2() def __init__(self): self.super(Float2Vec2Converter).__init__() self.Name.value = "Float2Vec2Converter" def evaluate(self): self.Output.value = avango.osg.Vec2(self.Value0.value, self.Value1.value) class Vec3ToTransMatrix(avango.script.Script): TransVec = avango.osg.SFVec3() TransOffset = avango.osg.SFVec3() Matrix = avango.osg.SFMatrix() def __init__(self): self.super(Vec3ToTransMatrix).__init__() self.Name.value = "Vec3ToTransMatrix" def evaluate(self): self.Matrix.value = avango.osg.make_trans_mat(self.TransVec.value + self.TransOffset.value) def make_vec3_to_trans_matrix(vec3_field, trans_offset=avango.osg.Vec3(0,0,0)): converter = Vec3ToTransMatrix() converter.TransVec.connect_from(vec3_field) converter.TransOffset.value = trans_offset return converter.Matrix class TranslationMatrixCalculator(avango.script.Script): MatrixFrom = avango.osg.SFMatrix() MatrixTo = avango.osg.SFMatrix() MatrixTransDif = avango.osg.SFMatrix() def __init__(self): self.super(TranslationMatrixCalculator).__init__() self.Name.value = "TranslationMatrixCalculator" def evaluate(self): if not self.MatrixFrom.value or not self.MatrixTo.value: return self.MatrixTransDif.value = avango.osg.make_trans_mat( self.MatrixFrom.value.get_translate() - self.MatrixTo.value.get_translate() ) class FloatToAlphaConverter(avango.script.Script): ColorIn = avango.osg.SFVec4() Alpha = avango.SFFloat() Color = avango.osg.SFVec4() def __init__(self): self.super(FloatToAlphaConverter).__init__() self.__alpha_changed = False self.__color_in = avango.osg.Vec4(1,1,1,1) self.Name.value = "FloatToAlphaConverter" @field_has_changed(ColorIn) def color_in_changed(self): self.__color_in = self.ColorIn.value @field_has_changed(Alpha) def alpha_changed(self): self.__alpha_changed = True def evaluate(self): print "eval: " + str(self.Alpha.value) if self.__alpha_changed: self.Color.value = avango.osg.Vec4(self.__color_in.x, self.__color_in.y, self.__color_in.z, self.Alpha.value) self.__alpha_changed = False
lgpl-3.0