code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
#!/usr/bin/python # # Parameters of the memory system # # log2 of the memory capacity, in bytes param_capacity = 27 # log2 of the number of DRAM banks param_banks = 2 # log2 of the size of a DRAM row, in 32-bit words param_rowsize = 10 # burst length param_burstlength = 4 # dead time, per transaction param_deadtime = 2 # page miss penalty param_misspenalty = 6 # import sys def extract_bits(n, start, count): mask = 2**count - 1 return (n & (mask << start)) >> start def split_adr(adr): return extract_bits(adr, param_rowsize+param_banks, param_capacity-2-param_rowsize-param_banks+1), extract_bits(adr, param_rowsize, param_banks), extract_bits(adr, 0, param_rowsize) def count_page_hits(adrs): openrows = [0]*(2**param_banks) page_hits = 0 for adr in adrs: row, bank, col = split_adr(adr) if openrows[bank] == row: page_hits += 1 openrows[bank] = row return page_hits def reorder(adrs, window_size): window = [] openrows = [0]*(2**param_banks) def fetch_transaction(): for candidate in window: row, bank, col = split_adr(candidate) if openrows[bank] == row: window.remove(candidate) return candidate candidate = window[0] row, bank, col = split_adr(candidate) openrows[bank] = row window.remove(candidate) return candidate out = [] for adr in adrs: if len(window) == window_size: out.append(fetch_transaction()) window.append(adr) for i in range(0, window_size): out.append(fetch_transaction()) return out def print_page_hits(p, adrs): hit_rate = float(count_page_hits(adrs))/float(transaction_count) utilization = hit_rate*float(param_burstlength)/float(param_burstlength+param_deadtime) + (1.0-hit_rate)*float(param_burstlength)/float(param_deadtime+param_misspenalty+param_burstlength) print "%d, %d, %d" % (p, 100*hit_rate, 100*utilization) print "======= DRAM model parameters =======" print "Memory capacity: %dMB" % ((2**param_capacity)/(1024*1024)) print "DRAM banks: %d" % (2**param_banks) print "Row size: %d words (32-bit)" % (2**param_rowsize) print "Burst length: %d" % param_burstlength print "Dead time: %d" % param_deadtime print "Miss penalty: %d" % param_misspenalty print "======= Reading input... =======" transaction_count = 0 write_count = 0 unordered_adrs = [] for line in sys.stdin: adr = int(line, 16) >> 2 # express address in 32-bit words unordered_adrs.append(adr) if extract_bits(adr, param_capacity-2, 1) == 1: write_count = write_count + 1 transaction_count += 1 print "...done." print "Transaction count: %d" % transaction_count print "Writes: %d%%" % (100*write_count/transaction_count) print "Reads: %d%%" % (100*(transaction_count-write_count)/transaction_count) if write_count != 0: print "Reads/Writes: %.2f" % (float(transaction_count-write_count)/float(write_count)) print "======= Full reordering =======" print "(window size, page hit rate, utilization)" print_page_hits(1, unordered_adrs) for window_size in [2, 3, 4, 5, 6, 7, 8, 9, 10, 25, 50, 100]: reordered = reorder(unordered_adrs, window_size) print_page_hits(window_size, reordered)
fallen/milkymist-mmu
tools/memadr_stats.py
Python
lgpl-3.0
3,130
#!/usr/bin/python # -*- coding: utf-8 -*- """ Parameter file for the BD_trajectory.py program """ # Integer, Number of simulations for the mean trajectory nb_simul=100 # Integer, Sample size n=216 # Float, Starting frequency for the fixation trajectory x0=0.01 # Float, time step for the fixation trajectory dt=0.0001 # Float, time parameter for the conditioning tau=1.14 # String, path and name of the output file path="../Output_files/BD_trajectory.txt"
lapierreM/Yoruba_demography
Programs/BD_trajectory_parameters.py
Python
lgpl-2.1
464
'''@file leaky_dblstm.py contains the DBLSTM with leak class''' import tensorflow as tf import model from nabu.neuralnetworks.components import layer class LeakyDBLSTM(model.Model): '''A deep bidirectional LSTM classifier with memory leakage''' def _get_outputs(self, inputs, input_seq_length, is_training): ''' Create the variables and do the forward computation Args: inputs: the inputs to the neural network, this is a list of [batch_size x time x ...] tensors input_seq_length: The sequence lengths of the input utterances, this is a [batch_size] vector is_training: whether or not the network is in training mode Returns: - output, which is a [batch_size x time x ...] tensors ''' #the blstm layer blstm = layer.LeakyBLSTMLayer( num_units=int(self.conf['num_units']), layer_norm=self.conf['layer_norm'] == 'True', recurrent_dropout=float(self.conf['recurrent_dropout']), leak_factor=float(self.conf['leak_factor'])) #code not available for multiple inputs!! if len(inputs) > 1: raise 'The implementation of DBLSTM expects 1 input and not %d' %len(inputs) else: inputs=inputs[0] with tf.variable_scope(self.scope): if is_training and float(self.conf['input_noise']) > 0: inputs = inputs + tf.random_normal( tf.shape(inputs), stddev=float(self.conf['input_noise'])) logits = inputs for l in range(int(self.conf['num_layers'])): logits = blstm(logits, input_seq_length, 'layer' + str(l)) if is_training and float(self.conf['dropout']) < 1: logits = tf.nn.dropout(logits, float(self.conf['dropout'])) output = logits return output
JeroenZegers/Nabu-MSSS
nabu/neuralnetworks/models/leaky_dblstm.py
Python
mit
1,833
import RPi.GPIO as GPIO import time import sys #pin=32 #12 on board #pin=12 #18 on board, default hardware pwm port. in our case, it does not work pin=16 #23 on board. does not support pwm try: GPIO.setmode(GPIO.BOARD) GPIO.setup([pin], GPIO.OUT) print("Setup Done !!!!! yay Woho") #GPIO.output(pin, 1) #time.sleep(20) #GPIO.output(pin, 0) #print("set to low now") #time.sleep(20) print("start pwm") ##The servos position is controlled by the pulsewidth ##of a 50 Hz PWM signal. ##Hence, we need to turn the PWM sequence on at 50 Hz pwm=GPIO.PWM(pin, 50) ##Typically, the servo will go to the ## full left position when it sees a pulse width of 1 millisecond, ## middle position when it sees a pulse width of 1.5 millisecond, ## full right position when it sees a pulse width of 2 millisecond. ## DutyCycle = PulseWidth/(1/frequency) = PulseWidth * frequency ## So in short ## full left: 1ms, 5% duty cycle, pwm.start(5) ## middle position: 1.5ms 7.5% duty cycle, pwm.ChangeDutyCycle(7.5) ## full right position: 2ms 10% duty cycle, pwm.ChangeDutyCycle(10) time.sleep(10) print("start 5 ...") pwm.start(5) time.sleep(10) print("7.5 ...") pwm.ChangeDutyCycle(7.5) time.sleep(10) print("10 ...") pwm.ChangeDutyCycle(10) time.sleep(10) print("6 ...") pwm.ChangeDutyCycle(6) time.sleep(10) print("0 ...") pwm.ChangeDutyCycle(0) time.sleep(10) #pwm.ChangeDutyCycle(20) #pwm.ChangeDutyCycle(40) #pwm.ChangeDutyCycle(70) print("stop ...") pwm.stop() time.sleep(10) print("HALT") except: print("ohhh nooooooooooooo", sys.exc_info()[0]) finally: GPIO.cleanup()
melvinma/funbots
src/python-src/examples/motor.py
Python
apache-2.0
1,779
#!/usr/bin/python # # Bootstrap script for a new device. This uploads a configuration and installs # the Bringup sketch. # import sys import os import argparse import subprocess import hmtl.portscan as portscan def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("-c", "--config", dest="config", required=True, help="JSON configuration file") parser.add_argument("-d", "--device", dest="device", help="Arduino USB device") parser.add_argument("-i", "--deviceid", dest="deviceid", required=True, help="Device ID to configure") parser.add_argument("-a", "--address", dest="address", help="Address to configure (defaults to device ID)") parser.add_argument("-t", "--type", dest="type", default="nano", help="Device type for platformio scripts (nano, mini, uno, moteinomega, etc) [%(default)s)]") parser.add_argument("-s", "--stages", dest="stages", default="1,2,3", help="Stages to execute [%(default)s]") parser.add_argument("--module", dest="module", default=False, action='store_true', help="Initially load module code") options = parser.parse_args() if options.device == None: options.device = portscan.choose_port() return options def main(): options = parse_args() if not os.path.exists(options.config): print("Config file %s does not exist" % options.config) sys.exit(1) config_path = os.path.abspath(options.config) if not options.deviceid: print("Must specify a device ID") sys.exit(1) if not options.address: options.address = options.deviceid stages = [int(x) for x in options.stages.split(",")] platformio_cmd = ["platformio", "run", "-t", "upload", "-e", "%s" % options.type] if 1 in stages: # Upload the python configuration sketch os.chdir("/Users/amp/Dropbox/Arduino/HMTL/platformio/HMTLPythonConfig") print("Executing: %s cwd:%s" % (platformio_cmd, os.getcwd())) ret = subprocess.call(platformio_cmd) if ret != 0: print("Uploading configuration sketch failed: %s" % ret) sys.exit(1) if 2 in stages: # Upload a configuration command = "HMTLConfig -f %s -i %s -a %s -v -w -d %s" % \ (config_path, options.deviceid, options.address, options.device) print("Executing: %s" % command) ret = os.system(command) #ret = subprocess.call(command) if ret != 0: print("HMTLConfig call failed: %s" % ret) sys.exit(1) if 3 in stages: # Upload the initial sketch if options.module: sketch="/Users/amp/Dropbox/Arduino/HMTL/platformio/HMTL_Module" else: sketch="/Users/amp/Dropbox/Arduino/HMTL/platformio/HMTL_Bringup" os.chdir(sketch) print("Executing: %s cwd:%s" % (platformio_cmd, os.getcwd())) ret = subprocess.call(platformio_cmd) if ret != 0: print("Uploading bringup sketch failed: %s" % ret) sys.exit(1) main()
aphelps/HMTL
python/Bootstrap.py
Python
mit
3,352
""" TranSPHIRE is supposed to help with the cryo-EM data collection Copyright (C) 2017 Markus Stabrin This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import shutil import os import pexpect as pe try: from PyQt4.QtCore import QObject, pyqtSlot, pyqtSignal except ImportError: from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal class MountCalculator(QObject): """ MountCalculator object. Inherits from: QObject Buttons: None Signals: sig_finished - Signal emitted, if process finished (str, str, str) """ sig_finished = pyqtSignal(str, str, str) def __init__(self, name, parent=None): """ Initialize object variables. Arguments: name - Name of the mount point parent - Parent widget (default None) Return: None """ super(MountCalculator, self).__init__(parent) self.name = name self.ssh_dict = None self.quota_command_dict = None self.password_dict = None self.kill_thread = False self.running = False @pyqtSlot(str, str) def calculate_df_quota(self, key, mount_folder): """ Calculate the quota with the help of the df command. Arguments: key - Mount point key mount_folder - Mount folder Return: None """ if key.startswith('HDD') and len(key) == 5: old_key = key key = key[:-2] else: old_key = key key = key if key != self.name.replace(' ', '_'): return None else: pass if old_key.startswith('HDD') and len(old_key) == 5: key = old_key else: pass self.running = True try: total_quota = shutil.disk_usage(mount_folder).total / 1e12 used_quota = shutil.disk_usage(mount_folder).used / 1e12 except FileNotFoundError: self.sig_finished.emit( 'Needs remount', key, 'red' ) else: if total_quota == used_quota and total_quota == 0: self.sig_finished.emit( 'Needs remount', key, 'red' ) else: self.sig_finished.emit( '{0:.1f}TB / {1:.1f}TB'.format(used_quota, total_quota), key, 'green' ) self.running = False @pyqtSlot(str, str, str, str, object, object, object) def calculate_ssh_quota( self, user, folder, device, mount_folder, ssh_dict, quota_command_dict, password_dict ): """ Calculate the quota via ssh. Arguments: mount_folder - Mount folder user - Username folder - Folder to mount device - Device name ssh_dict - ssh_dict containing ssh information quota_command_dict - Dictionary containing the quota commands password_dict - Dictionary containing passwords Return: None """ if device != self.name.replace(' ', '_'): return None else: pass self.running = True self.ssh_dict = ssh_dict self.quota_command_dict = quota_command_dict self.password_dict = password_dict try: used_quota, total_quota = self.get_ssh_quota( user=user, folder=folder, device=device ) except KeyError: total_quota = shutil.disk_usage(mount_folder).total / 1e12 used_quota = shutil.disk_usage(mount_folder).used / 1e12 except pe.exceptions.TIMEOUT: total_quota = shutil.disk_usage(mount_folder).total / 1e12 used_quota = shutil.disk_usage(mount_folder).used / 1e12 self.sig_finished.emit( '{0:.1f}TB / {1:.1f}TB'.format(used_quota, total_quota), device, 'green' ) self.running = False @pyqtSlot(str, str, str) def calculate_get_quota(self, key, quota, mount_folder): """ Calculate the quota by calculating the size of every file. Arguments: key - Mount point key mount_folder - Mount folder quota - User provided maximum quota Return: None """ if key != self.name.replace(' ', '_'): return None else: pass self.running = True total_quota = float(quota) try: used_quota = self.get_folder_size(mount_folder, 0) / 1024 ** 4 except PermissionError: self.sig_finished.emit( 'DENIED', key, 'red' ) except FileNotFoundError: print(mount_folder, 'Directory changed during quota estimation! Wait for next run!') self.sig_finished.emit( 'CHANGED', key, 'red' ) except OSError: print(mount_folder, 'Please remount!') self.sig_finished.emit( 'Needs remount', key, 'red' ) else: self.sig_finished.emit( '{0:.1f}TB / {1:.1f}TB'.format(used_quota, total_quota), key, 'green' ) self.running = False def get_ssh_quota(self, user, folder, device): """ Get the quota via ssh command. Arguments: user - User name folder - Mounted folder device - Device name Return: None """ command = 'ssh {0}@{1} {2}'.format( user, self.ssh_dict[device], self.quota_command_dict[device] ) child = pe.spawnu(command) try: idx = child.expect( [ "{0}@{1}'s password:".format(user, self.ssh_dict[device]), 'RSA.*' ], timeout=4 ) except pe.exceptions.TIMEOUT: print('SSH quota command failed!') raise if idx == 0: child.sendline(self.password_dict[device]) elif idx == 1: child.sendline('yes') child.expect([ "{0}@{1}'s password:".format(user, self.ssh_dict[device]), 'RSA.*' ]) child.sendline(self.password_dict[device]) else: print('SSH quota command failed!') raise pe.exceptions.TIMEOUT child.expect(pe.EOF) if self.quota_command_dict[device].startswith('quota'): used_quota, total_quota = self.get_quota_quota_command( text=child.before.split('\n'), folder=folder ) else: print('To get the quota via SSH failed, do not know how to handle {0}'.format( self.quota_command_dict[device] )) print('Command:\n{0}'.format(child.before)) print('Please write a wrapper for this case or write the content to the author of TranSPHIRE') raise pe.exceptions.TIMEOUT return used_quota, total_quota @staticmethod def get_quota_quota_command(text, folder): """ Extract the quota from the quota command. Arguments: text - Text returned by quota command. folder - Mounted folder Return: None """ write_value = False value_line = None for line in text: if write_value: value_line = line write_value = False else: pass if '{0}/'.format(folder.split('/')[0]) in line: write_value = True else: pass if value_line is None: raise KeyError size_list = [] for value in value_line.split()[:2]: unit = value[-1] try: int(unit) except ValueError: size = value[:-1] if unit.startswith('M'): adjust = 1024**2 elif unit.startswith('G'): adjust = 1024 elif unit.startswith('T'): adjust = 1 elif unit.startswith('P'): adjust = 1/1024 else: print( unit, 'unit of quota command not known!' ) print( 'Please contact the author of TranSPHIRE to fix this issue' ) raise KeyError else: adjust = 1024**3 size = value size_list.append(float(size) / adjust) return size_list[0], size_list[1] def get_folder_size(self, folder, size): """ Get the size of the folder recursively Arguments: folder - Folder to check contents size - Current caclulated size Return: Calculated size """ for entry in os.scandir(folder): if self.kill_thread: return size elif entry.is_dir(): size = self.get_folder_size(entry.path, size) else: size += entry.stat().st_size return size
mstabrin/transphire
transphire/mountcalculator.py
Python
gpl-3.0
10,332
# coding: utf-8 """ Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import kinow_client from kinow_client.rest import ApiException from kinow_client.apis.groups_api import GroupsApi class TestGroupsApi(unittest.TestCase): """ GroupsApi unit test stubs """ def setUp(self): self.api = kinow_client.apis.groups_api.GroupsApi() def tearDown(self): pass def test_attach_customer_to_group(self): """ Test case for attach_customer_to_group """ pass def test_create_group(self): """ Test case for create_group """ pass def test_detach_customer_from_group(self): """ Test case for detach_customer_from_group """ pass def test_get_group(self): """ Test case for get_group """ pass def test_get_groups(self): """ Test case for get_groups """ pass def test_get_product_groups(self): """ Test case for get_product_groups """ pass if __name__ == '__main__': unittest.main()
kinow-io/kinow-python-sdk
test/test_groups_api.py
Python
apache-2.0
1,382
# -*- coding: utf-8 -*- # # Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com> # # This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with # the additional special exception to link portions of this program with the OpenSSL library. # See LICENSE for more details. # import logging import deluge.component as component log = logging.getLogger(__name__) class EventManager(component.Component): def __init__(self): component.Component.__init__(self, "EventManager") self.handlers = {} def emit(self, event): """ Emits the event to interested clients. :param event: DelugeEvent """ # Emit the event to the interested clients component.get("RPCServer").emit_event(event) # Call any handlers for the event if event.name in self.handlers: for handler in self.handlers[event.name]: # log.debug("Running handler %s for event %s with args: %s", event.name, handler, event.args) try: handler(*event.args) except Exception as ex: log.error("Event handler %s failed in %s with exception %s", event.name, handler, ex) def register_event_handler(self, event, handler): """ Registers a function to be called when a `:param:event` is emitted. :param event: str, the event name :param handler: function, to be called when `:param:event` is emitted """ if event not in self.handlers: self.handlers[event] = [] if handler not in self.handlers[event]: self.handlers[event].append(handler) def deregister_event_handler(self, event, handler): """ Deregisters an event handler function. :param event: str, the event name :param handler: function, currently registered to handle `:param:event` """ if event in self.handlers and handler in self.handlers[event]: self.handlers[event].remove(handler)
bendykst/deluge
deluge/core/eventmanager.py
Python
gpl-3.0
2,071
import unittest from unittest.mock import Mock, patch from spresso.model.authentication.request import IdpInfoRequest class IdpInfoRequestTestCase(unittest.TestCase): @patch("spresso.model.authentication.request.GetRequest") def test_get_content(self, request_mock): netloc = "netloc" settings = Mock() select = Mock() endpoint = Mock() endpoint.path = "path" select.get.return_value = endpoint settings.endpoints_ext.select.return_value = select settings.scheme_well_known_info = "scheme" settings.verify = "verify" settings.proxies = "proxies" request = Mock() request_mock.return_value = request idp_info_request = IdpInfoRequest(netloc, settings=settings) settings.endpoints_ext.select.assert_called_once_with("netloc") select.get.assert_called_once_with("info") request_mock.assert_called_once_with("scheme", "netloc", "path", "verify", "proxies") cache = Mock() cache.get.return_value = "cache" settings.cache = cache res = idp_info_request.get_content() self.assertEqual(res, "cache") cache.get.return_value = None response = Mock() response.text = "response" request.request.return_value = response settings.caching_settings.select.return_value = "config" cache.reset_mock() request.reset_mock() settings.reset_mock() res = idp_info_request.get_content() cache.get.assert_called_once_with("netloc") self.assertEqual(request.request.call_count, 1) settings.caching_settings.select.assert_called_once_with("netloc") cache.set.assert_called_once_with("netloc", "config", "response") self.assertEqual(res, "response")
lujung/python-spresso
tests/model/authentication/test_request.py
Python
mit
1,865
from hashlib import md5 from re import sub from counts import Counts from google.appengine.ext import db from google.appengine.api import users, memcache from emend.const import DATE_SHORT from pretty_timedelta import pretty_datetime_from_now class User(db.Model, Counts): user = db.UserProperty(required=True) nickname = db.StringProperty() open = db.IntegerProperty(required=True, default=0) closed = db.IntegerProperty(required=True, default=0) created = db.DateTimeProperty(auto_now_add=True) banned = db.BooleanProperty(default=False) created_short = property(fget=lambda self: self.created.strftime(DATE_SHORT)) def __str__(self): """Returns the users nickname.""" user_nickname = sub('@.*$', '', self.user.nickname()) # obscure domain return self.nickname or user_nickname def can_edit(self): user = users.get_current_user() if users.is_current_user_admin(): return True if self.user == user: return True def permalink(self, shareable=False): if self.can_edit() and not shareable: key = self.user.email() else: key = self.key() return "/users/%s" % key def shareable_permalink(self): return self.permalink(shareable=True) def put(self): super(User, self).put() self.invalidate() def invalidate(self): memcache.set(self.key().name(), self) def gravatar_url(self): gravatar = "http://www.gravatar.com/avatar" email_hash = md5(self.user.email()) return "%s/%s" % (gravatar, email_hash.hexdigest()) def created_pretty_timedelta(self): return pretty_datetime_from_now(self.created) @staticmethod def key_name_from_email(email, prefix="user"): """Accepts google.appengine.api.users.User object.""" return '%s:%s' % (prefix, email) def sanitize(self, urlize): return dict( nickname=unicode(self), permalink=urlize(self.permalink()), )
tantalor/emend
app/emend/model/user.py
Python
mit
1,928
import os def copyrecursively(src_hdr, cpy_fldr, dest_fldr_list): src_fldr = os.path.join(src_hdr, cpy_fldr) err = 0 for root, dirs, files in os.walk(src_hdr): for src_d in dirs: src_d_full = os.path.join(root, src_d) if len(src_d_full) >= len(src_fldr) and src_d_full.find(src_fldr) >= 0: # ie found a cpy_fldr & its subfolders for dest_fldr in dest_fldr_list: dest_path = os.path.join(dest_fldr +'/', src_d_full.replace(src_hdr, "")) if not os.path.exists(dest_path): os.mkdir(dest_path) for item in files: if root.find(src_fldr) >= 0: if item.find(" ") >= 0: item_cln = item.replace(" ", "_") os.rename(os.path.join(root, item), os.path.join(root, item_cln)) else: item_cln = item src_file = os.path.join(root, item_cln) cmdText = "dc3dd if=" + src_file for dest_fldr in dest_fldr_list: dest_file = os.path.join(dest_fldr, src_file.replace(src_hdr, "")) cmdText = cmdText + " of=" + dest_file rtn_val = os.system(cmdText) if rtn_val == 0: print(src_file + " --Ok-- ") else: print(src_file + " --Error-- ") err += 1 print("\n\n FILE COPY COMPLETE with " + str(err) + " errors") ## MAIN ## #source_header = "/media/JUKEBOX_C/" #folder_to_copy = "blah" source_header = "/media/JUKEBOX_C/" folder_to_copy = "ALL" destination_folders = ["/media/JUKEBOX_A", "/media/JUKEBOX_B"] copyrecursively(source_header, folder_to_copy, destination_folders)
daisyfox/RPi_dc3dd_Duplicator
test_save.py
Python
gpl-2.0
1,794
import cloudpassage import datetime import os import pytest from cloudpassage.utility import Utility as utility config_file_name = "portal.yaml.local" tests_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) config_file = os.path.join(tests_dir, "configs/", config_file_name) session_info = cloudpassage.ApiKeyManager(config_file=config_file) key_id = session_info.key_id secret_key = session_info.secret_key api_hostname = session_info.api_hostname api_port = session_info.api_port class TestIntegrationScan: def get_fim_scan_with_findings(self): scan_type = "fim" scan_status = "completed_clean" scanner = self.build_scan_object() report = scanner.scan_history(module=scan_type, status=scan_status) for item in report: if (item["critical_findings_count"] >= 0 or item["non_critical_findings_count"] >= 0): return item["id"] return None def build_scan_object(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port, integration_string="SDK-Smoke") return_obj = cloudpassage.Scan(session) return(return_obj) def build_server_group_object(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port, integration_string="SDK-Smoke") return_obj = cloudpassage.ServerGroup(session) return(return_obj) def build_server_object(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port, integration_string="SDK-Smoke") return_obj = cloudpassage.Server(session) return(return_obj) def get_svm_target(self): target_id = None s_group = self.build_server_group_object() list_of_groups = s_group.list_all() num_members = 0 for g in list_of_groups: target_group_id = g["id"] num_members = g["server_counts"]["active"] if num_members > 0: members = s_group.list_members(target_group_id) target_id = members[0]["id"] break return(target_id) def get_csm_target(self): target_id = None s_group = self.build_server_group_object() list_of_groups = s_group.list_all() num_members = 0 for g in list_of_groups: csm_policies = g["policy_ids"] num_members = g["server_counts"]["active"] if num_members > 0 and len(csm_policies) > 0: members = s_group.list_members(g["id"]) target_id = members[0]["id"] break return(target_id) def get_fim_target(self): target_id = None s_group = self.build_server_group_object() list_of_groups = s_group.list_all() num_members = 0 for g in list_of_groups: fim_policies = g["fim_policy_ids"] num_members = g["server_counts"]["active"] if num_members > 0 and len(fim_policies) > 0: members = s_group.list_members(g["id"]) target_id = members[0]["id"] break return(target_id) def get_sam_target(self): target_id = None s_group = self.build_server_group_object() list_of_groups = s_group.list_all() num_members = 0 for g in list_of_groups: num_members = g["server_counts"]["active"] if num_members > 0: members = s_group.list_members(g["id"]) for member in members: if member["platform"] != "windows": target_id = members[0]["id"] break return(target_id) def test_instantiation(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port) assert cloudpassage.Scan(session) def test_bad_scan_type(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port) scanner = cloudpassage.Scan(session) s_group = cloudpassage.ServerGroup(session) scan_type = "barfola" server_id = s_group.list_all()[0]["id"] with pytest.raises(cloudpassage.CloudPassageValidation) as e: scanner.initiate_scan(server_id, scan_type) assert 'Unsupported scan type: barfola' in str(e) def test_bad_server_id(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port) scanner = cloudpassage.Scan(session) scan_type = "svm" server_id = "ABC123" with pytest.raises(cloudpassage.CloudPassageResourceExistence) as e: scanner.initiate_scan(server_id, scan_type) assert server_id in str(e) def test_sam_historical_is_unsupported(self): rejected = False session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port) scanner = cloudpassage.Scan(session) server = cloudpassage.Server(session) scan_type = "sam" server_id = server.list_all()[0]["id"] try: scanner.last_scan_results(server_id, scan_type) except cloudpassage.CloudPassageValidation: rejected = True assert rejected def test_scan_type_valid(self): valid_types = ["svm", "sva", "csm", "sca", "fim", "sam", "sv"] invalid_types = ["death_stare", "lids"] session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port) scanner = cloudpassage.Scan(session) for v in valid_types: assert scanner.scan_type_supported(v) for i in invalid_types: assert not scanner.scan_type_supported(i) def test_sv_initiate(self): scanner = self.build_scan_object() target_id = self.get_sam_target() command = scanner.initiate_scan(target_id, "sv") assert command["id"] def test_sca_initiate(self): sca_aliases = ["sca", "csm"] scanner = self.build_scan_object() target_id = self.get_csm_target() for alias in sca_aliases: command = scanner.initiate_scan(target_id, alias) assert command["id"] def test_sca_retrieve(self): sca_aliases = ["sca", "csm"] scanner = self.build_scan_object() target_id = self.get_csm_target() for alias in sca_aliases: report = scanner.last_scan_results(target_id, alias) assert report["id"] def test_fim_initiate(self): scanner = self.build_scan_object() target_id = self.get_fim_target() command = scanner.initiate_scan(target_id, 'fim') assert command["id"] def test_fim_retrieve(self): scanner = self.build_scan_object() target_id = self.get_fim_target() report = scanner.last_scan_results(target_id, 'fim') assert report["id"] def test_svm_initiate(self): svm_aliases = ["svm", "sva"] scanner = self.build_scan_object() target_id = self.get_svm_target() for alias in svm_aliases: command = scanner.initiate_scan(target_id, alias) assert command["id"] def test_svm_retrieve(self): svm_aliases = ["svm", "sva"] scanner = self.build_scan_object() target_id = self.get_svm_target() for alias in svm_aliases: report = scanner.last_scan_results(target_id, alias) assert report["id"] def test_sam_initiate(self): scanner = self.build_scan_object() target_id = self.get_sam_target() command = scanner.initiate_scan(target_id, "sam") assert command["id"] def test_scan_history(self): scanner = self.build_scan_object() report = scanner.scan_history() assert report[0]["id"] """ def test_scan_history_by_serverid(self): scanner = self.build_scan_object() target_id = self.get_sam_target() report = scanner.scan_history(server_id=target_id) assert report[0]["server_id"] == target_id """ def test_scan_history_by_single_scan_type(self): """This test requires a completed SAM scan. If you don't have one in your account, this scan will fail. """ scan_type = "sam" scanner = self.build_scan_object() report = scanner.scan_history(module=scan_type, max_pages=2) assert report[0]["module"] == scan_type def test_scan_history_by_multi_scan_type(self): """This test requires a completed SAM and SVM scan. If your account doesn't have results from both of these scan types, the test will fail. """ scan_types = ["sam", "svm"] scanner = self.build_scan_object() report = scanner.scan_history(module=scan_types, max_pages=2) assert report[0]["module"] in scan_types def test_scan_history_by_single_status(self): """This test requires scan results in your account with a status of completed_clean. If you don't have any scan results with this status, this test will fail. """ scan_status = "completed_clean" scanner = self.build_scan_object() report = scanner.scan_history(status=scan_status, max_pages=2) assert report[0]["status"] == scan_status """ def test_scan_history_by_multi_status(self): scan_status = ["completed_clean", "completed_with_errors"] scanner = self.build_scan_object() target_id = self.get_sam_target() report = scanner.scan_history(status=scan_status, max_pages=2) assert report[0]["status"] in scan_status def test_scan_details(self): scanner = self.build_scan_object() target_id = self.get_fim_target() report = scanner.scan_history(server_id=target_id) details = scanner.scan_details(report[0]["id"]) assert "id" in details """ def test_fim_findings_details(self): """This test requires a FIM scan with findings. If you don't have a FIM scan with resulting findings, this test will fail. """ target_fim_scan_id = self.get_fim_scan_with_findings() scanner = self.build_scan_object() details = scanner.scan_details(target_fim_scan_id) findings = details["findings"] target_finding = findings[0]["id"] target_findings_body = scanner.findings(target_fim_scan_id, target_finding) assert "id" in target_findings_body def test_scan_history_by_date(self): """This test requires scan results in your account, produced in the last week. If no such records exist, this test will fail. """ scan = self.build_scan_object() until = utility.time_string_now() since = datetime.datetime.utcnow() - datetime.timedelta(weeks=1) scan_list = scan.scan_history(max_pages=2, since=since, until=until) assert "id" in scan_list[0] class TestIntegrationCveException: def create_cve_exception_object(self): session = cloudpassage.HaloSession(key_id, secret_key, api_host=api_hostname, api_port=api_port, integration_string="SDK-Smoke") return_obj = cloudpassage.CveException(session) return(return_obj) def test_instantiation(self): assert self.create_cve_exception_object() def test_get_list(self): """Your account must have at least one CVE exception set. If you haven't set any CVE exceptions in your account, this test will fail. """ cve_exc = self.create_cve_exception_object() list_of_exceptions = cve_exc.list_all() assert "id" in list_of_exceptions[0] def test_get_details(self): """Your account must have at least one CVE exception set. If you haven't set any CVE exceptions in your account, this test will fail. """ cve_exc = self.create_cve_exception_object() list_of_exceptions = cve_exc.list_all() details = cve_exc.describe(list_of_exceptions[0]["id"]) assert "id" in details
cloudpassage/cloudpassage-halo-python-sdk
tests/integration/test_integration_scan.py
Python
bsd-3-clause
13,263
#!/usr/bin/env python # -*- coding: utf-8 -*- # author: J.Y Han # start # spawn-fcgi -d /users/hanjiyun/project/geeksoho -f /users/hanjiyun/project/geeksoho/application.py -a 127.0.0.1 -p 9001 #stop # kill `pgrep -f "/users/hanjiyun/project/geeksoho/application.py"` import os import web import rediswebpy from web.contrib.template import render_jinja import misc db = web.database(dbn='mysql', db='geeksoho', user='geeksoho', passwd='geeksoho') urls = ( '/', 'index', '/test', 'test' ) # controllers # =============== class index: """Home""" def GET(self): # return pjax('jobs.html') jobsList = GetJobs() return render.jobs(jobsList=jobsList) def POST(self): data = web.input(title='', link='', company='', company_weibo='', company_website='', city='', salary='', intro='') CreatNewJob(data) raise web.seeother('/') class test: """test""" def GET(self): # return pjax('test.html') return render.test() # models # ============= def CreatNewJob(data): db.insert( 'jobs', title = data.title, link = data.link, company = data.company, company_weibo = data.company_weibo, company_website = data.company_website, city = data.city, salary = data.salary, intro = data.intro) def GetJobs(): return db.select('jobs', limit = 100, order='id DESC') # globals = get_all_functions(misc) app = web.application(urls, globals()) web.config.debug = True cache = False session = web.session.Session(app, rediswebpy.RedisStore(), initializer={'count': 0}) render = render_jinja( 'templates', # 设置模板路径. encoding = 'utf-8', # 编码. ) myFilters = {'filter_tags': misc.filter_tags,} render._lookup.filters.update(myFilters) if __name__ == "__main__": web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr) app.run()
naoyeye/geeksoho
application.py
Python
mit
1,969
from .alignment import *
SunghanKim/toga
toga/constants/__init__.py
Python
bsd-3-clause
25
#============================================================================ # This library is free software; you can redistribute it and/or # modify it under the terms of version 2.1 of the GNU Lesser General Public # License as published by the Free Software Foundation. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #============================================================================ # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com> # Copyright (C) 2005 Christian Limpach <Christian.Limpach@cl.cam.ac.uk> # Copyright (C) 2005 XenSource Ltd #============================================================================ """Handler for domain operations. Nothing here is persistent (across reboots). Needs to be persistent for one uptime. """ import os import stat import shutil import socket import tempfile import threading import xen.lowlevel.xc from xen.xend import XendOptions, XendCheckpoint, XendDomainInfo from xen.xend.PrettyPrint import prettyprint from xen.xend import XendConfig from xen.xend.XendError import XendError, XendInvalidDomain, VmError from xen.xend.XendError import VMBadState from xen.xend.XendLogging import log from xen.xend.XendAPIConstants import XEN_API_VM_POWER_STATE from xen.xend.XendConstants import XS_VMROOT from xen.xend.XendConstants import DOM_STATE_HALTED, DOM_STATE_PAUSED from xen.xend.XendConstants import DOM_STATE_RUNNING, DOM_STATE_SUSPENDED from xen.xend.XendConstants import DOM_STATE_SHUTDOWN, DOM_STATE_UNKNOWN from xen.xend.XendConstants import TRIGGER_TYPE from xen.xend.XendDevices import XendDevices from xen.xend.XendAPIConstants import * from xen.xend.xenstore.xstransact import xstransact from xen.xend.xenstore.xswatch import xswatch from xen.util import mkdir from xen.xend import uuid xc = xen.lowlevel.xc.xc() xoptions = XendOptions.instance() __all__ = [ "XendDomain" ] CACHED_CONFIG_FILE = 'config.sxp' CHECK_POINT_FILE = 'checkpoint.chk' DOM0_UUID = "00000000-0000-0000-0000-000000000000" DOM0_NAME = "Domain-0" DOM0_ID = 0 POWER_STATE_NAMES = dict([(x, XEN_API_VM_POWER_STATE[x]) for x in [DOM_STATE_HALTED, DOM_STATE_PAUSED, DOM_STATE_RUNNING, DOM_STATE_SUSPENDED, DOM_STATE_SHUTDOWN, DOM_STATE_UNKNOWN]]) POWER_STATE_ALL = 'all' class XendDomain: """Index of all domains. Singleton. @ivar domains: map of domains indexed by domid @type domains: dict of XendDomainInfo @ivar managed_domains: domains that are not running and managed by Xend @type managed_domains: dict of XendDomainInfo indexed by uuid @ivar domains_lock: lock that must be held when manipulating self.domains @type domains_lock: threaading.RLock @ivar _allow_new_domains: Flag to set that allows creating of new domains. @type _allow_new_domains: boolean """ def __init__(self): self.domains = {} self.managed_domains = {} self.domains_lock = threading.RLock() # xen api instance vars # TODO: nothing uses this at the moment self._allow_new_domains = True # This must be called only the once, by instance() below. It is separate # from the constructor because XendDomainInfo calls back into this class # in order to check the uniqueness of domain names. This means that # instance() must be able to return a valid instance of this class even # during this initialisation. def init(self): """Singleton initialisation function.""" dom_path = self._managed_path() mkdir.parents(dom_path, stat.S_IRWXU) xstransact.Mkdir(XS_VMROOT) xstransact.SetPermissions(XS_VMROOT, {'dom': DOM0_ID}) self.domains_lock.acquire() try: try: dom0info = [d for d in self._running_domains() \ if d.get('domid') == DOM0_ID][0] dom0info['name'] = DOM0_NAME dom0 = XendDomainInfo.recreate(dom0info, True) except IndexError: raise XendError('Unable to find Domain 0') self._setDom0CPUCount() # This watch registration needs to be before the refresh call, so # that we're sure that we haven't missed any releases, but inside # the domains_lock, as we don't want the watch to fire until after # the refresh call has completed. xswatch("@introduceDomain", self._on_domains_changed) xswatch("@releaseDomain", self._on_domains_changed) self._init_domains() finally: self.domains_lock.release() def _on_domains_changed(self, _): """ Callback method when xenstore changes. Calls refresh which will keep the local cache of domains in sync. @rtype: int @return: 1 """ self.domains_lock.acquire() try: self._refresh() finally: self.domains_lock.release() return 1 def _init_domains(self): """Does the initial scan of managed and active domains to populate self.domains. Note: L{XendDomainInfo._checkName} will call back into XendDomain to make sure domain name is not a duplicate. """ self.domains_lock.acquire() try: running = self._running_domains() managed = self._managed_domains() # add all active domains for dom in running: if dom['dying'] == 1: log.warn('Ignoring dying domain %d from now on' % dom['domid']) continue if dom['domid'] != DOM0_ID: try: new_dom = XendDomainInfo.recreate(dom, False) except Exception: log.exception("Failed to create reference to running " "domain id: %d" % dom['domid']) # add all managed domains as dormant domains. for dom in managed: dom_uuid = dom.get('uuid') if not dom_uuid: continue dom_name = dom.get('name_label', 'Domain-%s' % dom_uuid) try: running_dom = self.domain_lookup_nr(dom_name) if not running_dom: # instantiate domain if not started. new_dom = XendDomainInfo.createDormant(dom) self._managed_domain_register(new_dom) else: self._managed_domain_register(running_dom) for key in XendConfig.XENAPI_CFG_TYPES.keys(): if key not in XendConfig.LEGACY_XENSTORE_VM_PARAMS and \ key in dom: running_dom.info[key] = dom[key] except Exception: log.exception("Failed to create reference to managed " "domain: %s" % dom_name) finally: self.domains_lock.release() # ----------------------------------------------------------------- # Getting managed domains storage path names def _managed_path(self, domuuid = None): """Returns the path of the directory where managed domain information is stored. @keyword domuuid: If not None, will return the path to the domain otherwise, will return the path containing the directories which represent each domain. @type: None or String. @rtype: String @return: Path. """ dom_path = xoptions.get_xend_domains_path() if domuuid: dom_path = os.path.join(dom_path, domuuid) return dom_path def _managed_config_path(self, domuuid): """Returns the path to the configuration file of a managed domain. @param domname: Domain uuid @type domname: String @rtype: String @return: path to config file. """ return os.path.join(self._managed_path(domuuid), CACHED_CONFIG_FILE) def _managed_check_point_path(self, domuuid): """Returns absolute path to check point file for managed domain. @param domuuid: Name of managed domain @type domname: String @rtype: String @return: Path """ return os.path.join(self._managed_path(domuuid), CHECK_POINT_FILE) def _managed_config_remove(self, domuuid): """Removes a domain configuration from managed list @param domuuid: Name of managed domain @type domname: String @raise XendError: fails to remove the domain. """ config_path = self._managed_path(domuuid) try: if os.path.exists(config_path) and os.path.isdir(config_path): shutil.rmtree(config_path) except IOError: log.exception('managed_config_remove failed removing conf') raise XendError("Unable to remove managed configuration" " for domain: %s" % domuuid) def managed_config_save(self, dominfo): """Save a domain's configuration to disk @param domninfo: Managed domain to save. @type dominfo: XendDomainInfo @raise XendError: fails to save configuration. @rtype: None """ if not self.is_domain_managed(dominfo): return # refuse to save configuration this domain isn't managed if dominfo: domains_dir = self._managed_path() dom_uuid = dominfo.get_uuid() domain_config_dir = self._managed_path(dom_uuid) def make_or_raise(path): try: mkdir.parents(path, stat.S_IRWXU) except: log.exception("%s could not be created." % path) raise XendError("%s could not be created." % path) make_or_raise(domains_dir) make_or_raise(domain_config_dir) try: fd, fn = tempfile.mkstemp() f = os.fdopen(fd, 'w+b') try: prettyprint(dominfo.sxpr(legacy_only = False), f, width = 78) finally: f.close() try: shutil.move(fn, self._managed_config_path(dom_uuid)) except: log.exception("Renaming %s to %s", fn, self._managed_config_path(dom_uuid)) os.remove(fn) except: log.exception("Error occurred saving configuration file " + "to %s" % domain_config_dir) raise XendError("Failed to save configuration file to: %s" % domain_config_dir) else: log.warn("Trying to save configuration for invalid domain") def _managed_domains(self): """ Returns list of domains that are managed. Expects to be protected by domains_lock. @rtype: list of XendConfig @return: List of domain configurations that are managed. """ dom_path = self._managed_path() dom_uuids = os.listdir(dom_path) doms = [] for dom_uuid in dom_uuids: try: cfg_file = self._managed_config_path(dom_uuid) cfg = XendConfig.XendConfig(filename = cfg_file) if cfg.get('uuid') != dom_uuid: # something is wrong with the SXP log.error("UUID mismatch in stored configuration: %s" % cfg_file) continue doms.append(cfg) except Exception: log.exception('Unable to open or parse config.sxp: %s' % \ cfg_file) return doms def _managed_domain_unregister(self, dom): try: if self.is_domain_managed(dom): self._managed_config_remove(dom.get_uuid()) del self.managed_domains[dom.get_uuid()] except ValueError: log.warn("Domain is not registered: %s" % dom.get_uuid()) def _managed_domain_register(self, dom): self.managed_domains[dom.get_uuid()] = dom def is_domain_managed(self, dom = None): return (dom.get_uuid() in self.managed_domains) # End of Managed Domain Access # -------------------------------------------------------------------- def _running_domains(self): """Get table of domains indexed by id from xc. @requires: Expects to be protected by domains_lock. @rtype: list of dicts @return: A list of dicts representing the running domains. """ try: return xc.domain_getinfo() except RuntimeError, e: log.exception("Unable to get domain information.") return {} def _setDom0CPUCount(self): """Sets the number of VCPUs dom0 has. Retreived from the Xend configuration, L{XendOptions}. @requires: Expects to be protected by domains_lock. @rtype: None """ dom0 = self.privilegedDomain() # get max number of vcpus to use for dom0 from config target = int(xoptions.get_dom0_vcpus()) log.debug("number of vcpus to use is %d", target) # target == 0 means use all processors if target > 0: dom0.setVCpuCount(target) def _refresh(self, refresh_shutdown = True): """Refresh the domain list. Needs to be called when either xenstore has changed or when a method requires up to date information (like uptime, cputime stats). Expects to be protected by the domains_lock. @rtype: None """ txn = xstransact() try: self._refreshTxn(txn, refresh_shutdown) txn.commit() except: txn.abort() raise def _refreshTxn(self, transaction, refresh_shutdown): running = self._running_domains() # Add domains that are not already tracked but running in Xen, # and update domain state for those that are running and tracked. for dom in running: domid = dom['domid'] if domid in self.domains: self.domains[domid].update(dom, refresh_shutdown, transaction) elif domid not in self.domains and dom['dying'] != 1: try: new_dom = XendDomainInfo.recreate(dom, False) except VmError: log.exception("Unable to recreate domain") try: xc.domain_destroy(domid) except: log.exception("Hard destruction of domain failed: %d" % domid) # update information for all running domains # - like cpu_time, status, dying, etc. # remove domains that are not running from active domain list. # The list might have changed by now, because the update call may # cause new domains to be added, if the domain has rebooted. We get # the list again. running = self._running_domains() running_domids = [d['domid'] for d in running if d['dying'] != 1] for domid, dom in self.domains.items(): if domid not in running_domids and domid != DOM0_ID: self._remove_domain(dom, domid) def add_domain(self, info): """Add a domain to the list of running domains @requires: Expects to be protected by the domains_lock. @param info: XendDomainInfo of a domain to be added. @type info: XendDomainInfo """ log.debug("Adding Domain: %s" % info.getDomid()) self.domains[info.getDomid()] = info # update the managed domains with a new XendDomainInfo object # if we are keeping track of it. if info.get_uuid() in self.managed_domains: self._managed_domain_register(info) def remove_domain(self, info, domid = None): """Remove the domain from the list of running domains, taking the domains_lock first. """ self.domains_lock.acquire() try: self._remove_domain(info, domid) finally: self.domains_lock.release() def _remove_domain(self, info, domid = None): """Remove the domain from the list of running domains @requires: Expects to be protected by the domains_lock. @param info: XendDomainInfo of a domain to be removed. @type info: XendDomainInfo """ if info: if domid == None: domid = info.getDomid() if info._stateGet() != DOM_STATE_HALTED: info.cleanupDomain() if domid in self.domains: del self.domains[domid] else: log.warning("Attempted to remove non-existent domain.") def restore_(self, config): """Create a domain as part of the restore process. This is called only from L{XendCheckpoint}. A restore request comes into XendDomain through L{domain_restore} or L{domain_restore_fd}. That request is forwarded immediately to XendCheckpoint which, when it is ready, will call this method. It is necessary to come through here rather than go directly to L{XendDomainInfo.restore} because we need to serialise the domain creation process, but cannot lock domain_restore_fd as a whole, otherwise we will deadlock waiting for the old domain to die. @param config: Configuration of domain to restore @type config: SXP Object (eg. list of lists) """ self.domains_lock.acquire() try: dominfo = XendDomainInfo.restore(config) return dominfo finally: self.domains_lock.release() def domain_lookup(self, domid): """Look up given I{domid} in the list of managed and running domains. @note: Will cause a refresh before lookup up domains, for a version that does not need to re-read xenstore use L{domain_lookup_nr}. @param domid: Domain ID or Domain Name. @type domid: int or string @return: Found domain. @rtype: XendDomainInfo @raise XendInvalidDomain: If domain is not found. """ self.domains_lock.acquire() try: self._refresh(refresh_shutdown = False) dom = self.domain_lookup_nr(domid) if not dom: raise XendInvalidDomain(str(domid)) return dom finally: self.domains_lock.release() def domain_lookup_nr(self, domid): """Look up given I{domid} in the list of managed and running domains. @param domid: Domain ID or Domain Name. @type domid: int or string @return: Found domain. @rtype: XendDomainInfo or None """ self.domains_lock.acquire() try: # lookup by name match = [dom for dom in self.domains.values() \ if dom.getName() == domid] if match: return match[0] match = [dom for dom in self.managed_domains.values() \ if dom.getName() == domid] if match: return match[0] # lookup by id try: if int(domid) in self.domains: return self.domains[int(domid)] except ValueError: pass # lookup by uuid for running domains match = [dom for dom in self.domains.values() \ if dom.get_uuid() == domid] if match: return match[0] # lookup by uuid for inactive managed domains if domid in self.managed_domains: return self.managed_domains[domid] return None finally: self.domains_lock.release() def privilegedDomain(self): """ Get the XendDomainInfo of a dom0 @rtype: XendDomainInfo """ self.domains_lock.acquire() try: return self.domains[DOM0_ID] finally: self.domains_lock.release() def autostart_domains(self): """ Autostart managed domains that are marked as such. """ need_starting = [] self.domains_lock.acquire() try: for dom_uuid, dom in self.managed_domains.items(): if dom and dom._stateGet() == DOM_STATE_HALTED: on_xend_start = dom.info.get('on_xend_start', 'ignore') auto_power_on = dom.info.get('auto_power_on', False) should_start = (on_xend_start == 'start') or auto_power_on if should_start: need_starting.append(dom_uuid) finally: self.domains_lock.release() for dom_uuid in need_starting: self.domain_start(dom_uuid, False) def cleanup_domains(self): """Clean up domains that are marked as autostop. Should be called when Xend goes down. This is currently called from L{xen.xend.servers.XMLRPCServer}. """ log.debug('cleanup_domains') self.domains_lock.acquire() try: for dom in self.domains.values(): if dom.getName() == DOM0_NAME: continue try: if dom._stateGet() == DOM_STATE_RUNNING: shutdownAction = dom.info.get('on_xend_stop', 'ignore') if shutdownAction == 'shutdown': log.debug('Shutting down domain: %s' % dom.getName()) dom.shutdown("poweroff") elif shutdownAction == 'suspend': self.domain_suspend(dom.getName()) else: log.debug('Domain %s continues to run.' % dom.getName()) except: log.exception('Domain %s failed to %s.' % \ (dom.getName(), shutdownAction)) finally: self.domains_lock.release() # ---------------------------------------------------------------- # Xen API def set_allow_new_domains(self, allow_new_domains): self._allow_new_domains = allow_new_domains def allow_new_domains(self): return self._allow_new_domains def get_domain_refs(self): result = [] try: self.domains_lock.acquire() result = [d.get_uuid() for d in self.domains.values()] for d in self.managed_domains.keys(): if d not in result: result.append(d) return result finally: self.domains_lock.release() def get_all_vms(self): self.domains_lock.acquire() try: result = self.domains.values() result += [x for x in self.managed_domains.values() if x not in result] return result finally: self.domains_lock.release() def get_vm_by_uuid(self, vm_uuid): self.domains_lock.acquire() try: for dom in self.domains.values(): if dom.get_uuid() == vm_uuid: return dom if vm_uuid in self.managed_domains: return self.managed_domains[vm_uuid] return None finally: self.domains_lock.release() def get_vm_with_dev_uuid(self, klass, dev_uuid): self.domains_lock.acquire() try: for dom in self.domains.values() + self.managed_domains.values(): if dom.has_device(klass, dev_uuid): return dom return None finally: self.domains_lock.release() def get_dev_property_by_uuid(self, klass, dev_uuid, field): value = None self.domains_lock.acquire() try: try: dom = self.get_vm_with_dev_uuid(klass, dev_uuid) if dom: value = dom.get_dev_property(klass, dev_uuid, field) except ValueError, e: pass finally: self.domains_lock.release() return value def set_dev_property_by_uuid(self, klass, dev_uuid, field, value, old_val = None): rc = True self.domains_lock.acquire() try: try: dom = self.get_vm_with_dev_uuid(klass, dev_uuid) if dom: o_val = dom.get_dev_property(klass, dev_uuid, field) log.info("o_val=%s, old_val=%s" % (o_val, old_val)) if old_val and old_val != o_val: return False dom.set_dev_property(klass, dev_uuid, field, value) self.managed_config_save(dom) except ValueError, e: pass finally: self.domains_lock.release() return rc def is_valid_vm(self, vm_ref): return (self.get_vm_by_uuid(vm_ref) != None) def is_valid_dev(self, klass, dev_uuid): return (self.get_vm_with_dev_uuid(klass, dev_uuid) != None) def do_legacy_api_with_uuid(self, fn, vm_uuid, *args, **kwargs): dom = self.uuid_to_dom(vm_uuid) fn(dom, *args, **kwargs) def uuid_to_dom(self, vm_uuid): self.domains_lock.acquire() try: for domid, dom in self.domains.items(): if dom.get_uuid() == vm_uuid: return domid if vm_uuid in self.managed_domains: domid = self.managed_domains[vm_uuid].getDomid() if domid is None: return self.managed_domains[vm_uuid].getName() else: return domid raise XendInvalidDomain(vm_uuid) finally: self.domains_lock.release() def create_domain(self, xenapi_vm): self.domains_lock.acquire() try: try: xeninfo = XendConfig.XendConfig(xapi = xenapi_vm) dominfo = XendDomainInfo.createDormant(xeninfo) log.debug("Creating new managed domain: %s: %s" % (dominfo.getName(), dominfo.get_uuid())) self._managed_domain_register(dominfo) self.managed_config_save(dominfo) return dominfo.get_uuid() except XendError, e: raise except Exception, e: raise XendError(str(e)) finally: self.domains_lock.release() def rename_domain(self, dom, new_name): self.domains_lock.acquire() try: old_name = dom.getName() dom.setName(new_name) finally: self.domains_lock.release() # # End of Xen API # ---------------------------------------------------------------- # ------------------------------------------------------------ # Xen Legacy API def list(self, state = DOM_STATE_RUNNING): """Get list of domain objects. @param: the state in which the VMs should be -- one of the DOM_STATE_XYZ constants, or the corresponding name, or 'all'. @return: domains @rtype: list of XendDomainInfo """ if type(state) == int: state = POWER_STATE_NAMES[state] state = state.lower() self.domains_lock.acquire() try: self._refresh(refresh_shutdown = False) # active domains active_domains = self.domains.values() active_uuids = [d.get_uuid() for d in active_domains] # inactive domains inactive_domains = [] for dom_uuid, dom in self.managed_domains.items(): if dom_uuid not in active_uuids: inactive_domains.append(dom) if state == POWER_STATE_ALL: return active_domains + inactive_domains else: return filter(lambda x: POWER_STATE_NAMES[x._stateGet()].lower() == state, active_domains + inactive_domains) finally: self.domains_lock.release() def list_sorted(self, state = DOM_STATE_RUNNING): """Get list of domain objects, sorted by name. @param: the state in which the VMs should be -- one of the DOM_STATE_XYZ constants, or the corresponding name, or 'all'. @return: domain objects @rtype: list of XendDomainInfo """ doms = self.list(state) doms.sort(lambda x, y: cmp(x.getName(), y.getName())) return doms def list_names(self, state = DOM_STATE_RUNNING): """Get list of domain names. @param: the state in which the VMs should be -- one of the DOM_STATE_XYZ constants, or the corresponding name, or 'all'. @return: domain names @rtype: list of strings. """ return [d.getName() for d in self.list_sorted(state)] def domain_suspend(self, domname): """Suspends a domain that is persistently managed by Xend @param domname: Domain Name @type domname: string @rtype: None @raise XendError: Failure during checkpointing. """ try: dominfo = self.domain_lookup_nr(domname) if not dominfo: raise XendInvalidDomain(domname) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot suspend privileged domain %s" % domname) if dominfo._stateGet() != DOM_STATE_RUNNING: raise VMBadState("Domain is not running", POWER_STATE_NAMES[DOM_STATE_RUNNING], POWER_STATE_NAMES[dominfo._stateGet()]) dom_uuid = dominfo.get_uuid() if not os.path.exists(self._managed_config_path(dom_uuid)): raise XendError("Domain is not managed by Xend lifecycle " + "support.") path = self._managed_check_point_path(dom_uuid) oflags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC if hasattr(os, "O_LARGEFILE"): oflags |= os.O_LARGEFILE fd = os.open(path, oflags) try: # For now we don't support 'live checkpoint' XendCheckpoint.save(fd, dominfo, False, False, path) finally: os.close(fd) except OSError, ex: raise XendError("can't write guest state file %s: %s" % (path, ex[1])) def domain_resume(self, domname, start_paused = False): """Resumes a domain that is persistently managed by Xend. @param domname: Domain Name @type domname: string @rtype: None @raise XendError: If failed to restore. """ self.domains_lock.acquire() try: try: fd = None dominfo = self.domain_lookup_nr(domname) if not dominfo: raise XendInvalidDomain(domname) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot resume privileged domain %s" % domname) if dominfo._stateGet() != XEN_API_VM_POWER_STATE_SUSPENDED: raise XendError("Cannot resume domain that is not suspended.") dominfo.setResume(True) dom_uuid = dominfo.get_uuid() chkpath = self._managed_check_point_path(dom_uuid) if not os.path.exists(chkpath): raise XendError("Domain was not suspended by Xend") # Restore that replaces the existing XendDomainInfo try: log.debug('Current DomainInfo state: %d' % dominfo._stateGet()) oflags = os.O_RDONLY if hasattr(os, "O_LARGEFILE"): oflags |= os.O_LARGEFILE fd = os.open(chkpath, oflags) XendCheckpoint.restore(self, fd, dominfo, paused = start_paused) os.unlink(chkpath) except OSError, ex: raise XendError("Failed to read stored checkpoint file") except IOError, ex: raise XendError("Failed to delete checkpoint file") except Exception, ex: log.exception("Exception occurred when resuming") raise XendError("Error occurred when resuming: %s" % str(ex)) finally: if fd is not None: os.close(fd) self.domains_lock.release() def domain_create(self, config): """Create a domain from a configuration. @param config: configuration @type config: SXP Object (list of lists) @rtype: XendDomainInfo """ self.domains_lock.acquire() try: self._refresh() dominfo = XendDomainInfo.create(config) return dominfo finally: self.domains_lock.release() def domain_create_from_dict(self, config_dict): """Create a domain from a configuration dictionary. @param config_dict: configuration @rtype: XendDomainInfo """ self.domains_lock.acquire() try: self._refresh() dominfo = XendDomainInfo.create_from_dict(config_dict) return dominfo finally: self.domains_lock.release() def domain_new(self, config): """Create a domain from a configuration but do not start it. @param config: configuration @type config: SXP Object (list of lists) @rtype: XendDomainInfo """ self.domains_lock.acquire() try: try: domconfig = XendConfig.XendConfig(sxp_obj = config) dominfo = XendDomainInfo.createDormant(domconfig) log.debug("Creating new managed domain: %s" % dominfo.getName()) self._managed_domain_register(dominfo) self.managed_config_save(dominfo) # no return value because it isn't meaningful for client except XendError, e: raise except Exception, e: raise XendError(str(e)) finally: self.domains_lock.release() def domain_start(self, domid, start_paused = True): """Start a managed domain @require: Domain must not be running. @param domid: Domain name or domain ID. @type domid: string or int @rtype: None @raise XendError: If domain is still running @rtype: None """ self.domains_lock.acquire() try: self._refresh() dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo._stateGet() != DOM_STATE_HALTED: raise VMBadState("Domain is already running", POWER_STATE_NAMES[DOM_STATE_HALTED], POWER_STATE_NAMES[dominfo._stateGet()]) dominfo.start(is_managed = True) finally: self.domains_lock.release() try: dominfo.waitForDevices() except Exception, ex: log.warn("Failed to setup devices for " + str(dominfo) + ": " + str(ex)) dominfo.destroy() raise if not start_paused: dominfo.unpause() def domain_delete(self, domid): """Remove a managed domain from database @require: Domain must not be running. @param domid: Domain name or domain ID. @type domid: string or int @rtype: None @raise XendError: If domain is still running """ self.domains_lock.acquire() try: try: dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo._stateGet() != XEN_API_VM_POWER_STATE_HALTED: raise VMBadState("Domain is not halted.", POWER_STATE_NAMES[DOM_STATE_HALTED], POWER_STATE_NAMES[dominfo._stateGet()]) self._domain_delete_by_info(dominfo) except Exception, ex: raise XendError(str(ex)) finally: self.domains_lock.release() def domain_delete_by_dominfo(self, dominfo): """Only for use by XendDomainInfo. """ self.domains_lock.acquire() try: self._domain_delete_by_info(dominfo) finally: self.domains_lock.release() def _domain_delete_by_info(self, dominfo): """Expects to be protected by domains_lock. """ log.info("Domain %s (%s) deleted." % (dominfo.getName(), dominfo.info.get('uuid'))) dominfo.metrics.destroy() self._managed_domain_unregister(dominfo) self._remove_domain(dominfo) XendDevices.destroy_device_state(dominfo) def domain_configure(self, config): """Configure an existing domain. @param vmconfig: vm configuration @type vmconfig: SXP Object (list of lists) @todo: Not implemented """ # !!! raise XendError("Unsupported") def domain_restore(self, src, paused=False): """Restore a domain from file. @param src: filename of checkpoint file to restore from @type src: string @return: Restored domain @rtype: XendDomainInfo @raise XendError: Failure to restore domain """ try: oflags = os.O_RDONLY if hasattr(os, "O_LARGEFILE"): oflags |= os.O_LARGEFILE fd = os.open(src, oflags) try: return self.domain_restore_fd(fd, paused=paused) finally: os.close(fd) except OSError, ex: raise XendError("can't read guest state file %s: %s" % (src, ex[1])) def domain_restore_fd(self, fd, paused=False): """Restore a domain from the given file descriptor. @param fd: file descriptor of the checkpoint file @type fd: File object @rtype: XendDomainInfo @raise XendError: if failed to restore """ try: return XendCheckpoint.restore(self, fd, paused=paused) except XendError, e: log.exception("Restore failed") raise except: # I don't really want to log this exception here, but the error # handling in the relocation-socket handling code (relocate.py) is # poor, so we need to log this for debugging. log.exception("Restore failed") raise XendError("Restore failed") def domain_unpause(self, domid): """Unpause domain execution. @param domid: Domain ID or Name @type domid: int or string. @rtype: None @raise XendError: Failed to unpause @raise XendInvalidDomain: Domain is not valid """ try: dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot unpause privileged domain %s" % domid) if dominfo._stateGet() not in (DOM_STATE_PAUSED, DOM_STATE_RUNNING): raise VMBadState("Domain '%s' is not started" % domid, POWER_STATE_NAMES[DOM_STATE_PAUSED], POWER_STATE_NAMES[dominfo._stateGet()]) log.info("Domain %s (%d) unpaused.", dominfo.getName(), int(dominfo.getDomid())) dominfo.unpause() except XendInvalidDomain: log.exception("domain_unpause") raise except Exception, ex: log.exception("domain_unpause") raise XendError(str(ex)) def domain_pause(self, domid, state=False): """Pause domain execution. @param domid: Domain ID or Name @type domid: int or string. @keyword state: If True, will return the domain state before pause @type state: bool @rtype: int if state is True @return: Domain state (DOM_STATE_*) @rtype: None if state is False @raise XendError: Failed to pause @raise XendInvalidDomain: Domain is not valid """ try: dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot pause privileged domain %s" % domid) ds = dominfo._stateGet() if ds not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED): raise VMBadState("Domain '%s' is not started" % domid, POWER_STATE_NAMES[DOM_STATE_RUNNING], POWER_STATE_NAMES[ds]) log.info("Domain %s (%d) paused.", dominfo.getName(), int(dominfo.getDomid())) dominfo.pause() if state: return ds except XendInvalidDomain: log.exception("domain_pause") raise except Exception, ex: log.exception("domain_pause") raise XendError(str(ex)) def domain_dump(self, domid, filename, live, crash): """Dump domain core.""" dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot dump core for privileged domain %s" % domid) if dominfo._stateGet() not in (DOM_STATE_PAUSED, DOM_STATE_RUNNING): raise VMBadState("Domain '%s' is not started" % domid, POWER_STATE_NAMES[DOM_STATE_PAUSED], POWER_STATE_NAMES[dominfo._stateGet()]) try: log.info("Domain core dump requested for domain %s (%d) " "live=%d crash=%d.", dominfo.getName(), dominfo.getDomid(), live, crash) return dominfo.dumpCore(filename) except Exception, ex: raise XendError(str(ex)) def domain_destroy(self, domid): """Terminate domain immediately. @param domid: Domain ID or Name @type domid: int or string. @rtype: None @raise XendError: Failed to destroy @raise XendInvalidDomain: Domain is not valid """ dominfo = self.domain_lookup_nr(domid) if dominfo and dominfo.getDomid() == DOM0_ID: raise XendError("Cannot destroy privileged domain %s" % domid) if dominfo: val = dominfo.destroy() else: try: val = xc.domain_destroy(int(domid)) except ValueError: raise XendInvalidDomain(domid) except Exception, e: raise XendError(str(e)) return val def domain_migrate(self, domid, dst, live=False, resource=0, port=0): """Start domain migration. @param domid: Domain ID or Name @type domid: int or string. @param dst: Destination IP address @type dst: string @keyword port: relocation port on destination @type port: int @keyword live: Live migration @type live: bool @keyword resource: not used?? @rtype: None @raise XendError: Failed to migrate @raise XendInvalidDomain: Domain is not valid """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot migrate privileged domain %s" % domid) if dominfo._stateGet() != DOM_STATE_RUNNING: raise VMBadState("Domain is not running", POWER_STATE_NAMES[DOM_STATE_RUNNING], POWER_STATE_NAMES[dominfo._stateGet()]) """ The following call may raise a XendError exception """ dominfo.testMigrateDevices(True, dst) if live: """ Make sure there's memory free for enabling shadow mode """ dominfo.checkLiveMigrateMemory() if port == 0: port = xoptions.get_xend_relocation_port() try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((dst, port)) except socket.error, err: raise XendError("can't connect: %s" % err[1]) sock.send("receive\n") sock.recv(80) try: XendCheckpoint.save(sock.fileno(), dominfo, True, live, dst) finally: sock.close() def domain_save(self, domid, dst, checkpoint=False): """Start saving a domain to file. @param domid: Domain ID or Name @type domid: int or string. @param dst: Destination filename @type dst: string @rtype: None @raise XendError: Failed to save domain @raise XendInvalidDomain: Domain is not valid """ try: dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo.getDomid() == DOM0_ID: raise XendError("Cannot save privileged domain %s" % str(domid)) if dominfo._stateGet() != DOM_STATE_RUNNING: raise VMBadState("Domain is not running", POWER_STATE_NAMES[DOM_STATE_RUNNING], POWER_STATE_NAMES[dominfo._stateGet()]) oflags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC if hasattr(os, "O_LARGEFILE"): oflags |= os.O_LARGEFILE fd = os.open(dst, oflags) try: XendCheckpoint.save(fd, dominfo, False, False, dst, checkpoint=checkpoint) except Exception, e: os.close(fd) raise e os.close(fd) except OSError, ex: raise XendError("can't write guest state file %s: %s" % (dst, ex[1])) def domain_pincpu(self, domid, vcpu, cpumap): """Set which cpus vcpu can use @param domid: Domain ID or Name @type domid: int or string. @param vcpu: vcpu to pin to @type vcpu: int @param cpumap: string repr of usable cpus @type cpumap: string @rtype: 0 """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) # if vcpu is keyword 'all', apply the cpumap to all vcpus vcpus = [ vcpu ] if str(vcpu).lower() == "all": vcpus = range(0, int(dominfo.getVCpuCount())) # set the same cpumask for all vcpus rc = 0 if dominfo._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED): for v in vcpus: try: rc = xc.vcpu_setaffinity(dominfo.getDomid(), int(v), cpumap) except Exception, ex: log.exception(ex) raise XendError("Cannot pin vcpu: %s to cpu: %s - %s" % \ (v, cpumap, str(ex))) else: # FIXME: if we could define cpu affinity definitions to # each vcpu, reprogram the following processing. if str(vcpu).lower() != "all": raise XendError("Must specify 'all' to VCPU " "for inactive managed domains") dominfo.setCpus(cpumap) self.managed_config_save(dominfo) return rc def domain_cpu_sedf_set(self, domid, period, slice_, latency, extratime, weight): """Set Simple EDF scheduler parameters for a domain. @param domid: Domain ID or Name @type domid: int or string. @rtype: 0 """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) try: return xc.sedf_domain_set(dominfo.getDomid(), period, slice_, latency, extratime, weight) except Exception, ex: raise XendError(str(ex)) def domain_cpu_sedf_get(self, domid): """Get Simple EDF scheduler parameters for a domain. @param domid: Domain ID or Name @type domid: int or string. @rtype: SXP object @return: The parameters for Simple EDF schedule for a domain. """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) try: sedf_info = xc.sedf_domain_get(dominfo.getDomid()) # return sxpr return ['sedf', ['domid', sedf_info['domid']], ['period', sedf_info['period']], ['slice', sedf_info['slice']], ['latency', sedf_info['latency']], ['extratime', sedf_info['extratime']], ['weight', sedf_info['weight']]] except Exception, ex: raise XendError(str(ex)) def domain_shadow_control(self, domid, op): """Shadow page control. @param domid: Domain ID or Name @type domid: int or string. @param op: operation @type op: int @rtype: 0 """ dominfo = self.domain_lookup(domid) try: return xc.shadow_control(dominfo.getDomid(), op) except Exception, ex: raise XendError(str(ex)) def domain_shadow_mem_get(self, domid): """Get shadow pagetable memory allocation. @param domid: Domain ID or Name @type domid: int or string. @rtype: int @return: shadow memory in MB """ dominfo = self.domain_lookup(domid) try: return xc.shadow_mem_control(dominfo.getDomid()) except Exception, ex: raise XendError(str(ex)) def domain_shadow_mem_set(self, domid, mb): """Set shadow pagetable memory allocation. @param domid: Domain ID or Name @type domid: int or string. @param mb: shadow memory to set in MB @type: mb: int @rtype: int @return: shadow memory in MB """ dominfo = self.domain_lookup(domid) try: return xc.shadow_mem_control(dominfo.getDomid(), mb=mb) except Exception, ex: raise XendError(str(ex)) def domain_sched_credit_get(self, domid): """Get credit scheduler parameters for a domain. @param domid: Domain ID or Name @type domid: int or string. @rtype: dict with keys 'weight' and 'cap' @return: credit scheduler parameters """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED): try: return xc.sched_credit_domain_get(dominfo.getDomid()) except Exception, ex: raise XendError(str(ex)) else: return {'weight' : dominfo.getWeight(), 'cap' : dominfo.getCap()} def domain_sched_credit_set(self, domid, weight = None, cap = None): """Set credit scheduler parameters for a domain. @param domid: Domain ID or Name @type domid: int or string. @type weight: int @type cap: int @rtype: 0 """ set_weight = False set_cap = False dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) try: if weight is None: weight = int(0) elif weight < 1 or weight > 65535: raise XendError("weight is out of range") else: set_weight = True if cap is None: cap = int(~0) elif cap < 0 or cap > dominfo.getVCpuCount() * 100: raise XendError("cap is out of range") else: set_cap = True assert type(weight) == int assert type(cap) == int rc = 0 if dominfo._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED): rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight, cap) if rc == 0: if set_weight: dominfo.setWeight(weight) if set_cap: dominfo.setCap(cap) self.managed_config_save(dominfo) return rc except Exception, ex: log.exception(ex) raise XendError(str(ex)) def domain_maxmem_set(self, domid, mem): """Set the memory limit for a domain. @param domid: Domain ID or Name @type domid: int or string. @param mem: memory limit (in MiB) @type mem: int @raise XendError: fail to set memory @rtype: 0 """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) dominfo.setMemoryMaximum(mem) def domain_ioport_range_enable(self, domid, first, last): """Enable access to a range of IO ports for a domain @param first: first IO port @param last: last IO port @raise XendError: failed to set range @rtype: 0 """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) nr_ports = last - first + 1 try: return xc.domain_ioport_permission(dominfo.getDomid(), first_port = first, nr_ports = nr_ports, allow_access = 1) except Exception, ex: raise XendError(str(ex)) def domain_ioport_range_disable(self, domid, first, last): """Disable access to a range of IO ports for a domain @param first: first IO port @param last: last IO port @raise XendError: failed to set range @rtype: 0 """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) nr_ports = last - first + 1 try: return xc.domain_ioport_permission(dominfo.getDomid(), first_port = first, nr_ports = nr_ports, allow_access = 0) except Exception, ex: raise XendError(str(ex)) def domain_send_trigger(self, domid, trigger_name, vcpu = 0): """Send trigger to a domain. @param domid: Domain ID or Name @type domid: int or string. @param trigger_name: trigger type name @type trigger_name: string @param vcpu: VCPU to send trigger (default is 0) @type vcpu: int @raise XendError: failed to send trigger @raise XendInvalidDomain: Domain is not valid @rtype: 0 """ dominfo = self.domain_lookup_nr(domid) if not dominfo: raise XendInvalidDomain(str(domid)) if dominfo._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED): raise VMBadState("Domain '%s' is not started" % domid, POWER_STATE_NAMES[DOM_STATE_RUNNING], POWER_STATE_NAMES[dominfo._stateGet()]) if trigger_name.lower() in TRIGGER_TYPE.keys(): trigger = TRIGGER_TYPE[trigger_name.lower()] else: raise XendError("Invalid trigger: %s" % trigger_name) try: return xc.domain_send_trigger(dominfo.getDomid(), trigger, vcpu) except Exception, ex: raise XendError(str(ex)) def instance(): """Singleton constructor. Use this instead of the class constructor. """ global inst try: inst except: inst = XendDomain() inst.init() return inst
mikesun/xen-cow-checkpointing
tools/python/xen/xend/XendDomain.py
Python
gpl-2.0
58,761
class Solution: def permuteUnique(self, nums): if nums==None: return None if len(nums)==0: return None if len(nums)==1: return [nums,] first=nums[0] result=[] reduce_nums=nums[1:] perm_result = self.permuteUnique(reduce_nums) for i in range(0,len(perm_result)): for j in range(0, len(perm_result[i])+1): if (j-1>=0)&(first==perm_result[i][j-1]): continue perm=[it for it in perm_result[i]] perm.insert(j,first) if perm not in result: result.append(perm) return result
saai/LeetcodePythonSolutions
permute.py
Python
mit
691
#!/usr/bin/env python # encoding: utf-8 """Add coloring to default logging module.""" import logging class ColoredStreamHandler(logging.StreamHandler): def __init__(self): logging.StreamHandler.__init__(self) def emit(self, record): levelno = record.levelno if(levelno>=50): color = '\x1b[31m' # red elif(levelno>=40): color = '\x1b[31m' # red elif(levelno>=30): color = '\x1b[33m' # yellow elif(levelno>=20): color = '\x1b[32m' # green elif(levelno>=10): color = '\x1b[36m' # pink else: color = '\x1b[0m' # normal orig_msg = record.msg record.msg = color + record.msg + '\x1b[0m' # normal logging.StreamHandler.emit(self, record) record.msg = orig_msg # now we patch Python code to add color support to logging.StreamHandler def add_coloring_to_emit_windows(fn): # add methods we need to the class def _out_handle(self): import ctypes return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE) out_handle = property(_out_handle) def _set_color(self, code): import ctypes # Constants from the Windows API self.STD_OUTPUT_HANDLE = -11 hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE) ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code) setattr(ColoredStreamHandler, '_set_color', _set_color) def new(*args): FOREGROUND_BLUE = 0x0001 # text color contains blue. FOREGROUND_GREEN = 0x0002 # text color contains green. FOREGROUND_RED = 0x0004 # text color contains red. FOREGROUND_INTENSITY = 0x0008 # text color is intensified. FOREGROUND_WHITE = FOREGROUND_BLUE|FOREGROUND_GREEN |FOREGROUND_RED # winbase.h STD_INPUT_HANDLE = -10 STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 # wincon.h FOREGROUND_BLACK = 0x0000 FOREGROUND_BLUE = 0x0001 FOREGROUND_GREEN = 0x0002 FOREGROUND_CYAN = 0x0003 FOREGROUND_RED = 0x0004 FOREGROUND_MAGENTA = 0x0005 FOREGROUND_YELLOW = 0x0006 FOREGROUND_GREY = 0x0007 FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified. BACKGROUND_BLACK = 0x0000 BACKGROUND_BLUE = 0x0010 BACKGROUND_GREEN = 0x0020 BACKGROUND_CYAN = 0x0030 BACKGROUND_RED = 0x0040 BACKGROUND_MAGENTA = 0x0050 BACKGROUND_YELLOW = 0x0060 BACKGROUND_GREY = 0x0070 BACKGROUND_INTENSITY = 0x0080 # background color is intensified. levelno = args[1].levelno if(levelno>=50): color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY elif(levelno>=40): color = FOREGROUND_RED | FOREGROUND_INTENSITY elif(levelno>=30): color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY elif(levelno>=20): color = FOREGROUND_GREEN elif(levelno>=10): color = FOREGROUND_MAGENTA else: color = FOREGROUND_WHITE args[0]._set_color(color) ret = fn(*args) args[0]._set_color( FOREGROUND_WHITE ) #print "after" return ret return new import platform if platform.system()=='Windows': # Windows does not support ANSI escapes and we are using API calls to set the console color ColoredStreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
xarts19/Dugong
dugong/external/colorer.py
Python
gpl-3.0
3,669
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2004-2007 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # """Module for parsing Qt .ts files for translation. Currently this module supports the old format of .ts files. Some applictaions use the newer .ts format which are documented here: `TS file format 4.3 <http://doc.trolltech.com/4.3/linguist-ts-file-format.html>`_, `Example <http://svn.ez.no/svn/ezcomponents/trunk/Translation/docs/linguist-format.txt>`_ `Specification of the valid variable entries <http://doc.trolltech.com/4.3/qstring.html#arg>`_, `2 <http://doc.trolltech.com/4.3/qstring.html#arg-2>`_ """ from translate.misc import ourdom class QtTsParser: contextancestors = dict.fromkeys(["TS"]) messageancestors = dict.fromkeys(["TS", "context"]) def __init__(self, inputfile=None): """make a new QtTsParser, reading from the given inputfile if required""" self.filename = getattr(inputfile, "filename", None) self.knowncontextnodes = {} self.indexcontextnodes = {} if inputfile is None: self.document = ourdom.parseString("<!DOCTYPE TS><TS></TS>") else: self.document = ourdom.parse(inputfile) assert self.document.documentElement.tagName == "TS" def addtranslation(self, contextname, source, translation, comment=None, transtype=None, createifmissing=False): """adds the given translation (will create the nodes required if asked). Returns success""" contextnode = self.getcontextnode(contextname) if contextnode is None: if not createifmissing: return False # construct a context node with the given name contextnode = self.document.createElement("context") namenode = self.document.createElement("name") nametext = self.document.createTextNode(contextname) namenode.appendChild(nametext) contextnode.appendChild(namenode) self.document.documentElement.appendChild(contextnode) if not createifmissing: return False messagenode = self.document.createElement("message") sourcenode = self.document.createElement("source") sourcetext = self.document.createTextNode(source) sourcenode.appendChild(sourcetext) messagenode.appendChild(sourcenode) if comment: commentnode = self.document.createElement("comment") commenttext = self.document.createTextNode(comment) commentnode.appendChild(commenttext) messagenode.appendChild(commentnode) translationnode = self.document.createElement("translation") translationtext = self.document.createTextNode(translation) translationnode.appendChild(translationtext) if transtype: translationnode.setAttribute("type", transtype) messagenode.appendChild(translationnode) contextnode.appendChild(messagenode) return True def getxml(self): """return the ts file as xml""" xml = self.document.toprettyxml(indent=" ", encoding="utf-8") #This line causes empty lines in the translation text to be removed (when there are two newlines) xml = "\n".join([line for line in xml.split("\n") if line.strip()]) return xml def getcontextname(self, contextnode): """returns the name of the given context""" namenode = ourdom.getFirstElementByTagName(contextnode, "name") return ourdom.getnodetext(namenode) def getcontextnode(self, contextname): """finds the contextnode with the given name""" contextnode = self.knowncontextnodes.get(contextname, None) if contextnode is not None: return contextnode contextnodes = self.document.searchElementsByTagName("context", self.contextancestors) for contextnode in contextnodes: if self.getcontextname(contextnode) == contextname: self.knowncontextnodes[contextname] = contextnode return contextnode return None def getmessagenodes(self, context=None): """returns all the messagenodes, limiting to the given context (name or node) if given""" if context is None: return self.document.searchElementsByTagName("message", self.messageancestors) else: if isinstance(context, (str, unicode)): # look up the context node by name context = self.getcontextnode(context) if context is None: return [] return context.searchElementsByTagName("message", self.messageancestors) def getmessagesource(self, message): """returns the message source for a given node""" sourcenode = ourdom.getFirstElementByTagName(message, "source") return ourdom.getnodetext(sourcenode) def getmessagetranslation(self, message): """returns the message translation for a given node""" translationnode = ourdom.getFirstElementByTagName(message, "translation") return ourdom.getnodetext(translationnode) def getmessagetype(self, message): """returns the message translation attributes for a given node""" translationnode = ourdom.getFirstElementByTagName(message, "translation") return translationnode.getAttribute("type") def getmessagecomment(self, message): """returns the message comment for a given node""" commentnode = ourdom.getFirstElementByTagName(message, "comment") # NOTE: handles only one comment per msgid (OK) # and only one-line comments (can be VERY wrong) TODO!!! return ourdom.getnodetext(commentnode) def iteritems(self): """iterates through (contextname, messages)""" for contextnode in self.document.searchElementsByTagName("context", self.contextancestors): yield self.getcontextname(contextnode), self.getmessagenodes(contextnode) def __del__(self): """clean up the document if required""" if hasattr(self, "document"): self.document.unlink()
mozilla/verbatim
vendor/lib/python/translate/storage/ts.py
Python
gpl-2.0
6,808
# 700-hPa Temperature Advection tadv_700 = mpcalc.advection(tmpk_700s, (uwnd_700s, vwnd_700s), (dx, dy)).to_base_units() # Laplacian of Temperature Advection lap_tadv_700 = mpcalc.laplacian(tadv_700, deltas=(dy, dx)) # Final term B calculation with constants term_B = (-Rd / (sigma * (700 * units.hPa)) * lap_tadv_700).to_base_units() print(term_B.units)
julienchastang/unidata-python-workshop
notebooks/MetPy_Advanced/solutions/term_B_calc.py
Python
mit
355
# Copyright (C) 2013-2021 Roland Lutz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import xorn.proxy import gaf.attrib as GA def throws(fun, *args): try: fun(*args) except Exception as e: return type(e) rev = xorn.proxy.RevisionProxy(xorn.storage.Revision()) ob00 = rev.add_object(xorn.storage.Text(text = 'foo0bar0')) ob01 = rev.add_object(xorn.storage.Text(text = 'foo0=bar0')) ob02 = rev.add_object(xorn.storage.Text(text = 'foo0baz0')) ob03 = rev.add_object(xorn.storage.Text(text = 'foo0=baz0')) ob10 = rev.add_object(xorn.storage.Text(text = 'foo1bar1')) ob11 = rev.add_object(xorn.storage.Text(text = 'foo1=bar1')) ob12 = rev.add_object(xorn.storage.Text(text = 'foo1baz1')) ob13 = rev.add_object(xorn.storage.Text(text = 'foo1=baz1')) ob20 = rev.add_object(xorn.storage.Text(text = 'foo2bar2')) ob21 = rev.add_object(xorn.storage.Text(text = 'foo2=bar2')) ob22 = rev.add_object(xorn.storage.Text(text = 'foo2baz2')) ob23 = rev.add_object(xorn.storage.Text(text = 'foo2=baz2')) net = rev.add_object(xorn.storage.Net()) ob10.relocate(net, None) ob11.relocate(net, None) ob12.relocate(net, None) ob13.relocate(net, None) component = rev.add_object(xorn.storage.Component()) ob20.relocate(component, None) ob21.relocate(component, None) ob22.relocate(component, None) ob23.relocate(component, None) class Symbol: pass component.symbol = Symbol() component.symbol.prim_objs = xorn.storage.Revision() srev = xorn.proxy.RevisionProxy(component.symbol.prim_objs) ob30 = srev.add_object(xorn.storage.Text(text = 'foo3bar3')) ob31 = srev.add_object(xorn.storage.Text(text = 'foo3=bar3')) ob32 = srev.add_object(xorn.storage.Text(text = 'foo3baz3')) ob33 = srev.add_object(xorn.storage.Text(text = 'foo3=baz3')) ob40 = srev.add_object(xorn.storage.Text(text = 'foo4bar4')) ob41 = srev.add_object(xorn.storage.Text(text = 'foo4=bar4')) ob42 = srev.add_object(xorn.storage.Text(text = 'foo4baz4')) ob43 = srev.add_object(xorn.storage.Text(text = 'foo4=baz4')) pin = srev.add_object(xorn.storage.Net(is_pin = True)) ob40.relocate(pin, None) ob41.relocate(pin, None) ob42.relocate(pin, None) ob43.relocate(pin, None) nonexisting = rev.add_object(xorn.storage.Circle()) rev.delete_object(nonexisting) # is_attribute assert GA.is_attribute(ob00) == False assert GA.is_attribute(ob01) == True assert GA.is_attribute(ob02) == False assert GA.is_attribute(ob03) == True assert GA.is_attribute(net) == False assert GA.is_attribute(component) == False assert GA.is_attribute(pin) == False assert throws(GA.is_attribute, nonexisting) == KeyError # inherited_objects assert GA.inherited_objects(component) == [ob30, ob31, ob32, ob33, pin] assert throws(GA.inherited_objects, ob00) == ValueError assert throws(GA.inherited_objects, ob01) == ValueError assert throws(GA.inherited_objects, net) == ValueError assert throws(GA.inherited_objects, pin) == ValueError assert throws(GA.inherited_objects, nonexisting) == KeyError # find_floating_attribs assert GA.find_floating_attribs(rev) == [ob01, ob03] assert GA.find_floating_attribs(srev) == [ob31, ob33] # find_attached_attribs assert GA.find_attached_attribs(ob00) == [] assert GA.find_attached_attribs(ob01) == [] assert GA.find_attached_attribs(net) == [ob11, ob13] assert GA.find_attached_attribs(component) == [ob21, ob23] assert GA.find_attached_attribs(pin) == [ob41, ob43] assert throws(GA.find_attached_attribs, nonexisting) == KeyError # find_inherited_attribs assert GA.find_inherited_attribs(component) == [ob31, ob33] assert throws(GA.find_inherited_attribs, ob00) == ValueError assert throws(GA.find_inherited_attribs, ob01) == ValueError assert throws(GA.find_inherited_attribs, net) == ValueError assert throws(GA.find_inherited_attribs, pin) == ValueError assert throws(GA.find_inherited_attribs, nonexisting) == KeyError # find_all_attribs assert GA.find_all_attribs(component) == [ob21, ob23, ob31, ob33] assert throws(GA.find_all_attribs, ob00) == ValueError assert throws(GA.find_all_attribs, ob01) == ValueError assert throws(GA.find_all_attribs, net) == ValueError assert throws(GA.find_all_attribs, pin) == ValueError assert throws(GA.find_all_attribs, nonexisting) == KeyError # search assert GA.search([ob00, ob01, ob02, ob03, ob10, ob11, ob12, ob13], 'foo0') == ['bar0', 'baz0'] assert GA.search([ob00, ob01, ob02, ob03, ob10, ob11, ob12, ob13], 'foo1') == ['bar1', 'baz1'] # search_floating assert GA.search_floating(rev, 'foo0') == ['bar0', 'baz0'] assert GA.search_floating(rev, 'foo1') == [] assert GA.search_floating(rev, 'foo2') == [] assert GA.search_floating(rev, 'foo3') == [] assert GA.search_floating(rev, 'foo4') == [] assert GA.search_floating(srev, 'foo0') == [] assert GA.search_floating(srev, 'foo1') == [] assert GA.search_floating(srev, 'foo2') == [] assert GA.search_floating(srev, 'foo3') == ['bar3', 'baz3'] assert GA.search_floating(srev, 'foo4') == [] # search_attached assert GA.search_attached(ob00, 'foo0') == [] assert GA.search_attached(ob00, 'foo1') == [] assert GA.search_attached(ob00, 'foo2') == [] assert GA.search_attached(ob00, 'foo3') == [] assert GA.search_attached(ob00, 'foo4') == [] assert GA.search_attached(ob01, 'foo0') == [] assert GA.search_attached(ob01, 'foo1') == [] assert GA.search_attached(ob01, 'foo2') == [] assert GA.search_attached(ob01, 'foo3') == [] assert GA.search_attached(ob01, 'foo4') == [] assert GA.search_attached(net, 'foo0') == [] assert GA.search_attached(net, 'foo1') == ['bar1', 'baz1'] assert GA.search_attached(net, 'foo2') == [] assert GA.search_attached(net, 'foo3') == [] assert GA.search_attached(net, 'foo4') == [] assert GA.search_attached(component, 'foo0') == [] assert GA.search_attached(component, 'foo1') == [] assert GA.search_attached(component, 'foo2') == ['bar2', 'baz2'] assert GA.search_attached(component, 'foo3') == [] assert GA.search_attached(component, 'foo4') == [] assert GA.search_attached(pin, 'foo0') == [] assert GA.search_attached(pin, 'foo1') == [] assert GA.search_attached(pin, 'foo2') == [] assert GA.search_attached(pin, 'foo3') == [] assert GA.search_attached(pin, 'foo4') == ['bar4', 'baz4'] assert throws(GA.search_attached, nonexisting, 'foo0') == KeyError # search_inherited assert GA.search_inherited(component, 'foo0') == [] assert GA.search_inherited(component, 'foo1') == [] assert GA.search_inherited(component, 'foo2') == [] assert GA.search_inherited(component, 'foo3') == ['bar3', 'baz3'] assert GA.search_inherited(component, 'foo4') == [] assert throws(GA.search_inherited, ob00, 'foo0') == ValueError assert throws(GA.search_inherited, ob01, 'foo0') == ValueError assert throws(GA.search_inherited, net, 'foo0') == ValueError assert throws(GA.search_inherited, pin, 'foo0') == ValueError assert throws(GA.search_inherited, nonexisting, 'foo0') == KeyError # search_all assert GA.search_all(component, 'foo0') == [] assert GA.search_all(component, 'foo1') == [] assert GA.search_all(component, 'foo2') == ['bar2', 'baz2'] assert GA.search_all(component, 'foo3') == ['bar3', 'baz3'] assert GA.search_all(component, 'foo4') == [] assert throws(GA.search_all, ob00, 'foo0') == ValueError assert throws(GA.search_all, ob01, 'foo0') == ValueError assert throws(GA.search_all, net, 'foo0') == ValueError assert throws(GA.search_all, pin, 'foo0') == ValueError assert throws(GA.search_all, nonexisting, 'foo0') == KeyError # find_pins_by_attribute assert GA.find_pins_by_attribute(component, 'foo3', 'bar3') == [] assert GA.find_pins_by_attribute(component, 'foo3', 'baz3') == [] assert GA.find_pins_by_attribute(component, 'foo3', 'bar4') == [] assert GA.find_pins_by_attribute(component, 'foo3', 'baz4') == [] assert GA.find_pins_by_attribute(component, 'foo4', 'bar3') == [] assert GA.find_pins_by_attribute(component, 'foo4', 'baz3') == [] assert GA.find_pins_by_attribute(component, 'foo4', 'bar4') == [pin] assert GA.find_pins_by_attribute(component, 'foo4', 'baz4') == [pin] pin.is_pin = False assert GA.find_pins_by_attribute(component, 'foo4', 'bar4') == [] assert GA.find_pins_by_attribute(component, 'foo4', 'baz4') == [] assert throws(GA.find_pins_by_attribute, ob00, 'foo3', 'bar3') == ValueError assert throws(GA.find_pins_by_attribute, ob01, 'foo3', 'bar3') == ValueError assert throws(GA.find_pins_by_attribute, net, 'foo3', 'bar3') == ValueError assert throws(GA.find_pins_by_attribute, pin, 'foo3', 'bar3') == ValueError assert throws(GA.find_pins_by_attribute, nonexisting, 'foo3', 'bar3') == KeyError
rlutz/xorn
tests/gaf/attrib.py
Python
gpl-2.0
9,184
#/usr/bin/python import codecs def correct(scriptfile): script=codecs.open(scriptfile,'r','utf-16'); error=[]; index=0; scriptlines=script.readlines(); for line in scriptlines: if len(line)<4: print '%s line: %d \n'%(scriptfile,index); error.append(index); index+=1; script.close(); if len(error)!=0: for err in error : scriptlines[err]='\t<STA/> \n'; script=codecs.open(scriptfile,'w+','utf-16'); script.writelines(scriptlines); script.close(); import sys if __name__=='__main__': correct(sys.argv[1]);
drawfish/VAD_HTK
pyscrp/noneContex.py
Python
gpl-2.0
628
# -*- coding: utf-8 -*- """The :program:`celery events` command. .. program:: celery events .. seealso:: See :ref:`preload-options` and :ref:`daemon-options`. .. cmdoption:: -d, --dump Dump events to stdout. .. cmdoption:: -c, --camera Take snapshots of events using this camera. .. cmdoption:: --detach Camera: Detach and run in the background as a daemon. .. cmdoption:: -F, --freq, --frequency Camera: Shutter frequency. Default is every 1.0 seconds. .. cmdoption:: -r, --maxrate Camera: Optional shutter rate limit (e.g., 10/m). .. cmdoption:: -l, --loglevel Logging level, choose between `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`, or `FATAL`. Default is INFO. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, `stderr` is used. .. cmdoption:: --pidfile Optional file used to store the process pid. The program won't start if this file already exists and the pid is still alive. .. cmdoption:: --uid User id, or user name of the user to run as after detaching. .. cmdoption:: --gid Group id, or group name of the main group to change to after detaching. .. cmdoption:: --umask Effective umask (in octal) of the process after detaching. Inherits the umask of the parent process by default. .. cmdoption:: --workdir Optional directory to change to after detaching. .. cmdoption:: --executable Executable to use for the detached process. """ from __future__ import absolute_import, unicode_literals import sys from functools import partial from celery.bin.base import Command, daemon_options from celery.platforms import detached, set_process_title, strargv __all__ = ('events',) HELP = __doc__ class events(Command): """Event-stream utilities. Notes: .. code-block:: console # - Start graphical monitor (requires curses) $ celery events --app=proj $ celery events -d --app=proj # - Dump events to screen. $ celery events -b amqp:// # - Run snapshot camera. $ celery events -c <camera> [options] Examples: .. code-block:: console $ celery events $ celery events -d $ celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info """ doc = HELP supports_args = False def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, loglevel='INFO', logfile=None, prog_name='celery events', pidfile=None, uid=None, gid=None, umask=None, workdir=None, detach=False, **kwargs): self.prog_name = prog_name if dump: return self.run_evdump() if camera: return self.run_evcam(camera, freq=frequency, maxrate=maxrate, loglevel=loglevel, logfile=logfile, pidfile=pidfile, uid=uid, gid=gid, umask=umask, workdir=workdir, detach=detach) return self.run_evtop() def run_evdump(self): from celery.events.dumper import evdump self.set_process_status('dump') return evdump(app=self.app) def run_evtop(self): from celery.events.cursesmon import evtop self.set_process_status('top') return evtop(app=self.app) def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, gid=None, umask=None, workdir=None, detach=False, **kwargs): from celery.events.snapshot import evcam self.set_process_status('cam') kwargs['app'] = self.app cam = partial(evcam, camera, logfile=logfile, pidfile=pidfile, **kwargs) if detach: with detached(logfile, pidfile, uid, gid, umask, workdir): return cam() else: return cam() def set_process_status(self, prog, info=''): prog = '{0}:{1}'.format(self.prog_name, prog) info = '{0} {1}'.format(info, strargv(sys.argv)) return set_process_title(prog, info=info) def add_arguments(self, parser): dopts = parser.add_argument_group('Dumper') dopts.add_argument('-d', '--dump', action='store_true', default=False) copts = parser.add_argument_group('Snapshot') copts.add_argument('-c', '--camera') copts.add_argument('--detach', action='store_true', default=False) copts.add_argument('-F', '--frequency', '--freq', type=float, default=1.0) copts.add_argument('-r', '--maxrate') copts.add_argument('-l', '--loglevel', default='INFO') daemon_options(parser, default_pidfile='celeryev.pid') user_options = self.app.user_options['events'] if user_options: self.add_compat_options( parser.add_argument_group('User Options'), user_options) def main(): ev = events() ev.execute_from_commandline() if __name__ == '__main__': # pragma: no cover main()
kawamon/hue
desktop/core/ext-py/celery-4.2.1/celery/bin/events.py
Python
apache-2.0
5,172
import math from copy import copy from pyglet.gl import * from euclid import Matrix4, Vector3, Point3 from renderer import Renderer class Node(object): def __init__(self, parent=None): self.model = Matrix4() self.transform = Matrix4() self.renderables = [] self.children = [] self._parent = None self.parent = parent def _set_parent(self, parent): if self._parent: self._parent.children.remove(self) self._parent = parent if self._parent: self._parent.children.append(self) def _get_parent(self): return self._parent parent = property(_get_parent, _set_parent) def update(self, camera): self._update_matrices() self._update_children(camera) def _update_matrices(self): if self.parent: self.transform = self.parent.transform * self.model else: self.transform = copy(self.model) def _update_children(self, camera): for r in self.renderables: r.update(camera, self.transform) for c in self.children: c.update(camera) def render(self, camera): for r in self.renderables: Renderer.render(camera, self.transform, r, r.render_pass) for c in self.children: c.render(camera) class BillboardNode(Node): def render(self, camera): m, v = self.transform, camera.view m.a, m.b, m.c = v.a, v.e, v.i m.e, m.f, m.g = v.b, v.f, v.j m.i, m.j, m.k = v.c, v.g, v.k Node.render(self, camera) class ZAxisBillboardNode(Node): def render(self, camera): '''m, v = self.transform, camera.view m.a, m.b, m.c = v.a, v.e, 0 m.e, m.f, m.g = v.b, v.f, 0 m.i, m.j, m.k = 0, 0, 1''' cam = camera.view*Point3() - self.transform*Point3() z = self.transform*Vector3(0, 0, 1) y = cam.cross(z) self.transform = Matrix4.new_rotate_triple_axis(cam, y, z) Node.render(self, camera) class SkyNode(Node): def render(self, camera): m = self.transform m.a, m.b, m.c = 1, 0, 0 m.e, m.f, m.g = 0, 1, 0 m.i, m.j, m.k = 0, 0, 1 Node.render(self, camera) class IntervalNode(Node): def __init__(self, interval=10, parent=None): Node.__init__(self, parent) self.interval = interval def render(self, camera): m = self.transform m.a, m.b, m.c = 1, 0, 0 m.e, m.f, m.g = 0, 1, 0 m.i, m.j, m.k = 0, 0, 1 m.d -= math.fmod(m.d, self.interval) m.h -= math.fmod(m.h, self.interval) m.l -= math.fmod(m.l, self.interval) Node.render(self, camera)
swiftcoder/ashima-iv
src/node.py
Python
bsd-3-clause
2,391
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals """This file is part of the django ERP project. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>' __copyright__ = 'Copyright (c) 2013-2015, django ERP Team' __version__ = '0.0.1' from django.test import TestCase, RequestFactory from django.test.utils import override_settings from django.shortcuts import render_to_response from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser from django.conf import settings from ..models import Permission, ObjectPermission from ..decorators import * def _get_user(request, *args, **kwargs): pk = kwargs.get("pk") user = None if pk: user = get_user_model().objects.get(pk=pk) return user def _get_perm_name(request, *args, **kwargs): return "core.view_user" @obj_permission_required("core.view_user", _get_user) def test_decorator_view(request, *args, **kwargs): return render_to_response('index.html') @obj_permission_required(_get_perm_name, _get_user) def test_decorator_view2(request, *args, **kwargs): return render_to_response('index.html') @override_settings( LOGIN_REQUIRED_URLS_EXCEPTIONS=(r'/(.*)$',), TEMPLATE_DIRS=("%s/core/tests/templates" % settings.PROJECT_PATH,), ) class ObjPermissionRequiredTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.user_model = get_user_model() # Please note that "User.objects.create_user" is different from # "User.objects.create" because it stores the hash of given password. self.u1 = self.user_model.objects.create_user("u1", "u1@u.it", "password") self.u2 = self.user_model.objects.create_user("u2", "u@u.it", "password") self.u3 = self.user_model.objects.create_user("u3", "u@u.it", "password") self.u4 = self.user_model.objects.create_user("u4", "u@u.it", "password") self.perm, n = Permission.objects.get_or_create_by_natural_key("view_user", "core", "user") self.obj_perm, n = ObjectPermission.objects.get_or_create_by_natural_key("view_user", "core", "user", self.u3.pk) self.u1.user_permissions.add(self.perm) self.u2.objectpermissions.add(self.obj_perm) def test_decorator_with_anonymous_user(self): """Tests an anonymous user should not pass the decorator's test. """ request = self.factory.get('/view_user/') request.user = AnonymousUser() response = test_decorator_view(request) self.assertEqual(response.status_code, 302) def test_get_perm_by_string_without_object(self): """Tests the decorator passing only the perm name by string (no obj). In this case it looks for a table-permission, instead of row-permission. """ request = self.factory.get('/view_user/') request.user = self.u1 response = test_decorator_view(request) self.assertEqual(response.status_code, 200) request.user = self.u2 response = test_decorator_view(request) self.assertEqual(response.status_code, 302) def test_get_perm_by_string_with_object(self): """Tests the decorator passing only the perm name by string (with obj). """ request = self.factory.get('/view_user/') request.user = self.u1 response = test_decorator_view(request, pk=self.u3.pk) self.assertEqual(response.status_code, 200) request.user = self.u4 response = test_decorator_view(request, pk=self.u3.pk) self.assertEqual(response.status_code, 302) def test_get_obj_perm_by_string_with_object(self): """Tests the decorator passing only the obj perm name by string (with obj). """ request = self.factory.get('/view_user/') request.user = self.u2 response = test_decorator_view(request, pk=self.u3.pk) self.assertEqual(response.status_code, 200) request.user = self.u4 response = test_decorator_view(request, pk=self.u3.pk) self.assertEqual(response.status_code, 302) def test_fail_perm_by_string_with_object(self): """Tests failure of decorator passing perm name by string (with obj). """ request = self.factory.get('/view_user/') request.user = self.u2 response = test_decorator_view(request, pk=self.u1.pk) self.assertEqual(response.status_code, 302) def test_get_obj_perm_by_callable_with_object(self): """Tests the decorator passing a callable for obj perm name (with obj). """ request = self.factory.get('/view_user/') request.user = self.u2 response = test_decorator_view2(request, pk=self.u3.pk) self.assertEqual(response.status_code, 200) def test_fail_obj_perm_by_callable_with_object(self): """Tests failure of decorator passing a callable for obj perm name (with obj). """ request = self.factory.get('/view_user/') request.user = self.u2 response = test_decorator_view2(request, pk=self.u1.pk) self.assertEqual(response.status_code, 302)
mobb-io/django-erp
djangoerp/core/tests/test_decorators.py
Python
mit
5,954
import platform import subprocess import sys if platform.system() == 'Windows': import ctypes AdvAPI32 = ctypes.windll.Advapi32 from ctypes import POINTER UNLEN = 256 GetUserNameW = AdvAPI32.GetUserNameW GetUserNameW.argtypes = ( ctypes.c_wchar_p, # _In_Out_ lpBuffer POINTER(ctypes.c_uint) # _In_out_ pcBuffer ) GetUserNameW.restype = ctypes.c_uint buffer = ctypes.create_unicode_buffer(UNLEN + 1) size = ctypes.c_uint(len(buffer)) GetUserNameW(buffer, ctypes.byref(size)) # For NetworkService, Host$ is returned, so we choose have to turn it back # into something that icacls understands. if not buffer.value.endswith('$'): user_name = buffer.value else: user_name = 'NT AUTHORITY\\NetworkService' for path in sys.argv[1:]: subprocess.call(['icacls', path, '/deny', '{}:(R)'.format(user_name)]) else: for path in sys.argv[1:]: subprocess.call(['chmod', 'a-r', path])
apple/swift
test/ModuleInterface/Inputs/make-unreadable.py
Python
apache-2.0
1,032
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 from django.conf.urls.defaults import * from . import views urlpatterns = patterns('', (r'^ajax/(?P<path>.*)$', views.proxy))
ken-muturi/rapidsms
lib/rapidsms/contrib/ajax/urls.py
Python
bsd-3-clause
185
from pyramid.httpexceptions import HTTPFound # from pyramid.response import Response from collections import defaultdict from pyramid.renderers import get_renderer from ..models import ( ConcisionReport, ) import json from ..lib import ( html_f, consts, report_f, converters, filter_funcs, # graphing, # pretty_sql, # joins, report_display, ) from .. import config def list_reports(request): the_user = config['get_user_func'](request) layout = get_renderer(config['layout']).implementation() report_list = config['DBSession'].query(ConcisionReport).filter(ConcisionReport.creator == the_user.id).order_by(ConcisionReport.name.asc()) return dict( title = "Concision reports", layout = layout, the_user = the_user, report_list = report_list, ) def new(request): the_user = config['get_user_func'](request) layout = get_renderer(config['layout']).implementation() if "form.submitted" in request.params: the_report = ConcisionReport() the_report.name = request.params['name'].strip() the_report.creator = the_user.id the_report.data = json.dumps(report_f.check_report_data({})) config['DBSession'].add(the_report) q = config['DBSession'].query(ConcisionReport.id).filter(ConcisionReport.creator == the_report.creator).order_by(ConcisionReport.id.desc()).first()[0] return HTTPFound(location=request.route_url("concision.report.overview", report_id=q)) return dict( title = "Report name", layout = layout, the_user = the_user, ) def overview(request): layout = get_renderer(config['layout']).implementation() report_id = int(request.matchdict['report_id']) the_report = config['DBSession'].query(ConcisionReport).filter(ConcisionReport.id == report_id).first() data = the_report.extract_data() report_f.check_report_data(data) tablist = report_display.tablist(data) # seletable_columns = [] # for t in data['tables']: # the_source = config['sources'][t] # seletable_columns.extend([("%s.%s" % (t, c), "%s %s" % (the_source.label, the_source.column_labels.get(c, c))) for c in the_source.columns]) # filter_html = display.filter_html(data['filters']).replace("[report_id]", str(report_id)) # # Grouping by # selected_columns = defaultdict(list) # for s in data['tables']: # prelude = lambda c: '%s.%s' % (s, c) in data.get('columns', []) or '%s.%s' % (s, c) == data.get('key', "") # the_source = config['sources'][s] # selected_columns[s] = list(filter(prelude, the_source.columns)) return dict( title = "Concision query", layout = layout, # the_user = the_user, # the_report = the_report, # data = data, # tables = list(display.tables(data)), # columns = list(display.columns(data)), # filter_html = filter_html, # orderbys = list(display.orderbys(data)), # query_key = display.query_key(data), report_id = report_id, tablist = tablist, # seletable_columns = seletable_columns, # html_f = html_f, # consts = consts, )
Teifion/concision
views/report.py
Python
bsd-2-clause
3,385
# -*- coding: UTF-8 -*- ####################################################################### # ---------------------------------------------------------------------------- # "THE BEER-WARE LICENSE" (Revision 42): # @Daddy_Blamo wrote this file. As long as you retain this notice you # can do whatever you want with this stuff. If we meet some day, and you think # this stuff is worth it, you can buy me a beer in return. - Muad'Dib # ---------------------------------------------------------------------------- ####################################################################### # Addon Name: Placenta # Addon id: plugin.video.placenta # Addon Provider: Mr.Blamo import re import urllib import urlparse from resources.lib.modules import cache from resources.lib.modules import cleantitle from resources.lib.modules import client from resources.lib.modules import source_utils from resources.lib.modules import dom_parser class source: def __init__(self): self.priority = 1 self.language = ['en'] self.domains = ['movie4k.org'] self._base_link = None self.search_link = '/movies.php?list=search&search=%s' @property def base_link(self): if not self._base_link: self._base_link = cache.get(self.__get_base_url, 120, 'http://%s' % self.domains[0]) return self._base_link def movie(self, imdb, title, localtitle, aliases, year): try: url = self.__search(imdb, [localtitle] + source_utils.aliases_to_array(aliases), year) if not url and title != localtitle: url = self.__search(imdb, [title] + source_utils.aliases_to_array(aliases), year) return url except: return def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = r.replace('\\"', '"') links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'}) for i in links: try: host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt'] host = host.split()[0].rsplit('.', 1)[0].strip().lower() host = host.encode('utf-8') valid, host = source_utils.is_host_valid(host, hostDict) if not valid: continue url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href'] url = client.replaceHTMLCodes(url) url = urlparse.urljoin(self.base_link, url) url = url.encode('utf-8') sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False}) except: pass return sources except: return sources def resolve(self, url): try: h = urlparse.urlparse(url.strip().lower()).netloc r = client.request(url) r = r.rsplit('"underplayer"')[0].rsplit("'underplayer'")[0] u = re.findall('\'(.+?)\'', r) + re.findall('\"(.+?)\"', r) u = [client.replaceHTMLCodes(i) for i in u] u = [i for i in u if i.startswith('http') and not h in i] url = u[-1].encode('utf-8') return url except: return def __search(self, imdb, titles, year): try: q = self.search_link % urllib.quote_plus(cleantitle.query(titles[0])) q = urlparse.urljoin(self.base_link, q) t = [cleantitle.get(i) for i in set(titles) if i] y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0'] r = client.request(q) r = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('coverPreview.+?')}) r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'div', attrs={'style': re.compile('.+?')}), dom_parser.parse_dom(i, 'img', req='src')) for i in r] r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1], i[2]) for i in r if i[0] and i[2]] r = [(i[0], i[1], [x.content for x in i[2] if x.content.isdigit() and len(x.content) == 4], i[3]) for i in r] r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r] r = [i for i in r if any('us_flag' in x.attrs['src'] for x in i[3])] r = [(i[0], i[1], i[2], [re.findall('(\d+)', x.attrs['src']) for x in i[3] if 'smileys' in x.attrs['src']]) for i in r] r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r] r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r] r = sorted(r, key=lambda x: x[3])[::-1] r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r] r = [(i[0], i[1], i[2]) for i in r if not i[3]] r = [i for i in r if i[2] in y] r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r] match = [i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]] match2 = [i[0] for i in r] match2 = [x for y, x in enumerate(match2) if x not in match2[:y]] if match2 == []: return for i in match2[:5]: try: if match: url = match[0]; break r = client.request(urlparse.urljoin(self.base_link, i)) r = re.findall('(tt\d+)', r) if imdb in r: url = i; break except: pass return source_utils.strip_domain(url) except: return def __get_base_url(self, fallback): try: for domain in self.domains: try: url = 'http://%s' % domain r = client.request(url, limit=1, timeout='10') r = dom_parser.parse_dom(r, 'meta', attrs={'name': 'author'}, req='content') if r and 'movie4k.to' in r[0].attrs.get('content').lower(): return url except: pass except: pass return fallback
RuiNascimento/krepo
script.module.lambdascrapers/lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/movie4korg.py
Python
gpl-2.0
6,425
from fabric.api import * from fabric.utils import * from fabric.contrib import * class Apt(object): def __init__(self): return def update(self): cmd = 'sudo apt update' run(cmd) print(cmd) def purge(self, package): cmd = 'sudo apt purge -y %(package)s' % {'package': package} # print(cmd) run(cmd) def upgrade(self): cmd = 'sudo apt upgrade -y' run(cmd) # print(cmd) def install(self, package): if package != None: cmd = 'sudo apt -y install %(package)s' % {'package': package} run(cmd) # print(cmd)
stregatto/fabric_lib
apt.py
Python
gpl-2.0
651
# ---------------------------------------------------------------------------- # A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation #----------------------------------------------------------------------------- # Copyright (c) 2016 Federico Perazzi # Licensed under the BSD License [see LICENSE for details] # Written by Federico Perazzi # Adapted from FAST-RCNN (Ross Girshick) # ---------------------------------------------------------------------------- """ Configuration file.""" import os import os.path as osp import sys import yaml from easydict import EasyDict as edict __C = edict() # Public access to configuration settings cfg = __C # Paths to dataset folders __C.PATH = edict() # Dataset Resolution Available: 1080p,480p __C.RESOLUTION="480p" # Root folder of project __C.PATH.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..','..')) # Data folder __C.PATH.DATA_DIR = osp.abspath(osp.join(__C.PATH.ROOT_DIR, 'data/DAVIS')) __C.PATH.RESULTS_DIR = osp.abspath(osp.join(__C.PATH.DATA_DIR, 'Results')) # Resulting segmentation mask folder __C.PATH.SEGMENTATION_DIR = osp.abspath(osp.join(__C.PATH.RESULTS_DIR, 'Segmentations', __C.RESOLUTION)) # Evaluation Folder" __C.PATH.EVAL_DIR = osp.abspath(osp.join(__C.PATH.RESULTS_DIR, 'Evaluation', __C.RESOLUTION)) # Path to input images __C.PATH.SEQUENCES_DIR = osp.join(__C.PATH.DATA_DIR,"JPEGImages",__C.RESOLUTION) # Path to annotations __C.PATH.ANNOTATION_DIR = osp.join(__C.PATH.DATA_DIR,"Annotations",__C.RESOLUTION) # Paths to files __C.FILES = edict() # Path to property file, holding information on evaluation sequences. __C.FILES.DB_INFO = osp.abspath(osp.join(__C.PATH.DATA_DIR,"Annotations/db_info.yml")) # Define the set of techniques to be loaded __C.EVAL_SET="all" # Accepted options [paper,all] assert __C.EVAL_SET == 'paper' or __C.EVAL_SET == 'all' # Path to technique file, holding information about benchmark data __C.FILES.DB_BENCHMARK = osp.abspath( osp.join(__C.PATH.RESULTS_DIR,"Evaluation/db_benchmark.yml")) __C.N_JOBS = 32 # append path for cpp libraries def _set_path_to_cpp_libs(): sys.path.append(osp.abspath( osp.join(cfg.PATH.ROOT_DIR,'build/release')))
fperazzi/davis
python/lib/davis/config.py
Python
bsd-3-clause
2,238
# pylint: disable=missing-docstring,import-error,unused-import, import-modules-only import first from first import second from third import Fourth, Fifth # [multiple-import-items] from sixth import Sixth, seventh, Eighth # [multiple-import-items] import eighth
Shopify/shopify_python
tests/functional/multiple_import_items.py
Python
mit
264
#!/usr/bin/env python import test_helper import os import re test_path = os.path.dirname(__file__) for root, dirs, files in os.walk(test_path): modules = [file[:-3] for file in sorted(files) if file[-3:] == '.py' and re.match('\d{3,}', file)] suite = test_helper.TestSuite() for module_name in modules: module = __import__(module_name) suite.addTest( test_helper.defaultTestLoader.loadTestsFromModule(module) ) test_helper.TextTestRunner(verbosity=2).run(suite)
ethanrowe/thunderhead
t/runner.py
Python
gpl-3.0
480
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Metric class used in monitor mixin framework. """ import numpy class Metric(object): """ A metric computed over a set of data (usually from a `CountsTrace`). """ def __init__(self, monitor, title, data): """ @param monitor (MonitorMixinBase) Monitor Mixin instance that generated this trace @param title (string) Title @param data (list) List of numbers to compute metric from """ self.monitor = monitor self.title = title self.min = None self.max = None self.sum = None self.mean = None self.standardDeviation = None self._computeStats(data) @staticmethod def createFromTrace(trace, excludeResets=None): data = list(trace.data) if excludeResets is not None: data = [x for i, x in enumerate(trace.data) if not excludeResets.data[i]] return Metric(trace.monitor, trace.title, data) def copy(self): metric = Metric(self.monitor, self.title, []) metric.min = self.min metric.max = self.max metric.sum = self.sum metric.mean = self.mean metric.standardDeviation = self.standardDeviation return metric def prettyPrintTitle(self): return ("[{0}] {1}".format(self.monitor.mmName, self.title) if self.monitor.mmName is not None else self.title) def _computeStats(self, data): if not len(data): return self.min = min(data) self.max = max(data) self.sum = sum(data) self.mean = numpy.mean(data) self.standardDeviation = numpy.std(data) def getStats(self, sigFigs=7): if self.mean is None: return [None, None, None, None, None] return [round(self.mean, sigFigs), round(self.standardDeviation, sigFigs), round(self.min, sigFigs), round(self.max, sigFigs), round(self.sum, sigFigs)]
david-ragazzi/nupic
nupic/research/monitor_mixin/metric.py
Python
gpl-3.0
2,865
# ====================================================================== # Given vtu/pvtu file(s) resulting from an MVR elasticity simulation # (the solution files hence contain three scalar valued arrays named # 'u0', 'u1' and 'u2' as PointData), this vtk-based python script # warps the initial/original mesh geometry by means of the displacement # vector, and computes the Cauchy strain tensor and the von Mises stress. # # The script's output is the following: # - vtu file that contains all the data of the # original file with modified coordinates of the points and the # displacement vector as additional PointData and the strain tensor # along with the von Mises stress (w.r.t. the Lame parameters # lambda = 28466, mu = 700 for mitral valve tissue, according to # [Mansi-2012]) as additional CellData. # # How to run the script: # python calculator.py ./input/myInput.(p)vtu ./output/myOutput.vtu # # Author: Nicolai Schoch, EMCL; 2015-04-12. # ====================================================================== __author__ = 'schoch' import sys import vtk #from .msmlvtk import * # NEEDED?! def compute_vonMisesStress_for_MV(inputfilename, outputfilename): # ====================================================================== # get system arguments ------------------------------------------------- # Path to input file and name of the output file #inputfilename = sys.argv[1] #outputfilename = sys.argv[2] print " " print "==================================================================================================" print "=== Execute Python script to analyze MV geometry in order for the HiFlow3-based MVR-Simulation ===" print "==================================================================================================" print " " # ====================================================================== # Read file if inputfilename[-4] == 'p': reader = vtk.vtkXMLPUnstructuredGridReader() reader.SetFileName(inputfilename) reader.Update() else: reader = vtk.vtkXMLUnstructuredGridReader() reader.SetFileName(inputfilename) reader.Update() print "Reading input files: DONE." # ====================================================================== # Compute displacement vector calc = vtk.vtkArrayCalculator() calc.SetInput(reader.GetOutput()) calc.SetAttributeModeToUsePointData() calc.AddScalarVariable('x', 'u0', 0) calc.AddScalarVariable('y', 'u1', 0) calc.AddScalarVariable('z', 'u2', 0) calc.SetFunction('x*iHat+y*jHat+z*kHat') calc.SetResultArrayName('DisplacementSolutionVector') calc.Update() # ====================================================================== # Compute strain tensor derivative = vtk.vtkCellDerivatives() derivative.SetInput(calc.GetOutput()) derivative.SetTensorModeToComputeStrain() derivative.Update() # ====================================================================== # Compute von Mises stress calc = vtk.vtkArrayCalculator() calc.SetInput(derivative.GetOutput()) calc.SetAttributeModeToUseCellData() calc.AddScalarVariable('Strain_0', 'Strain', 0) calc.AddScalarVariable('Strain_1', 'Strain', 1) calc.AddScalarVariable('Strain_2', 'Strain', 2) calc.AddScalarVariable('Strain_3', 'Strain', 3) calc.AddScalarVariable('Strain_4', 'Strain', 4) calc.AddScalarVariable('Strain_5', 'Strain', 5) calc.AddScalarVariable('Strain_6', 'Strain', 6) calc.AddScalarVariable('Strain_7', 'Strain', 7) calc.AddScalarVariable('Strain_8', 'Strain', 8) calc.SetFunction('sqrt( (2*700*Strain_0 + 28466*(Strain_0+Strain_4+Strain_8))^2 + (2*700*Strain_4 + 28466*(Strain_0+Strain_4+Strain_8))^2 + (2*700*Strain_8 + 28466*(Strain_0+Strain_4+Strain_8))^2 - ( (2*700*Strain_0 + 28466*(Strain_0+Strain_4+Strain_8))*(2*700*Strain_4 + 28466*(Strain_0+Strain_4+Strain_8)) ) - ( (2*700*Strain_0 + 28466*(Strain_0+Strain_4+Strain_8))*(2*700*Strain_8 + 28466*(Strain_0+Strain_4+Strain_8)) ) - ( (2*700*Strain_4 + 28466*(Strain_0+Strain_4+Strain_8))*(2*700*Strain_8 + 28466*(Strain_0+Strain_4+Strain_8)) ) + 3 * ((2*700*Strain_3)^2 + (2*700*Strain_6)^2 + (2*700*Strain_7)^2) )') calc.SetResultArrayName('vonMisesStress_forMV_mu700_lambda28466') calc.Update() print "Computation of displacement vectors, Cauchy strain and vom Mises stress: DONE." # ====================================================================== # Define dummy variable; get output of calc filter dummy = calc.GetOutput() # Get point data arrays u0, u1 and u2 pointData_u0 = dummy.GetPointData().GetArray('u0') pointData_u1 = dummy.GetPointData().GetArray('u1') pointData_u2 = dummy.GetPointData().GetArray('u2') # Set scalars dummy.GetPointData().SetScalars(pointData_u0) # ====================================================================== # Warp by scalar u0 warpScalar = vtk.vtkWarpScalar() warpScalar.SetInput(dummy) warpScalar.SetNormal(1.0,0.0,0.0) warpScalar.SetScaleFactor(1.0) warpScalar.SetUseNormal(1) warpScalar.Update() # Get output and set scalars dummy = warpScalar.GetOutput() dummy.GetPointData().SetScalars(pointData_u1) # ====================================================================== # Warp by scalar u1 warpScalar = vtk.vtkWarpScalar() warpScalar.SetInput(dummy) warpScalar.SetNormal(0.0,1.0,0.0) warpScalar.SetScaleFactor(1.0) warpScalar.SetUseNormal(1) warpScalar.Update() # Get output and set scalars dummy = warpScalar.GetOutput() dummy.GetPointData().SetScalars(pointData_u2) # ====================================================================== # Warp by scalar u2 warpScalar = vtk.vtkWarpScalar() warpScalar.SetInput(dummy) warpScalar.SetNormal(0.0,0.0,1.0) warpScalar.SetScaleFactor(1.0) warpScalar.SetUseNormal(1) warpScalar.Update() # Get ouput and add point data arrays that got deleted earlier dummy = warpScalar.GetOutput() dummy.GetPointData().AddArray(pointData_u0) dummy.GetPointData().AddArray(pointData_u1) # ====================================================================== # Write output to vtu writer = vtk.vtkXMLUnstructuredGridWriter() writer.SetDataModeToAscii() writer.SetFileName(outputfilename) writer.SetInput(dummy) writer.Write() # ====================================================================== print "Writing Extended VTU incl. von Mises Stress information: DONE." print "==============================================================" print " "
CognitionGuidedSurgery/msml
src/msml/ext/vonMisesStressComputation_mvrPostProcessingAnalytics.py
Python
gpl-3.0
6,424
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import import random REIL_MEMORY_ENDIANNESS_LE = 0x0 # Little Endian REIL_MEMORY_ENDIANNESS_BE = 0x1 # Big Endian class ReilMemory(object): """A REIL memory model (byte addressable). """ def __init__(self, address_size): # TODO: Set endianness through a parameter. # TODO: Check that all addresses have size address_size. # TODO: Use endianness parameter. # Memory's address size. self.__address_size = address_size # Memory's endianness. self.__endianness = REIL_MEMORY_ENDIANNESS_LE # Dictionary that implements the memory itself. self._memory = {} @property def address_size(self): return self.__address_size # Read methods # ======================================================================== # def read(self, address, size): """Read arbitrary size content from memory. """ value = 0x0 for i in range(0, size): value |= self._read_byte(address + i) << (i * 8) return value def _read_byte(self, address): """Read a byte from memory. """ # Initialize memory location with a random value. if address not in self._memory: self._memory[address] = random.randint(0x00, 0xff) return self._memory[address] # Write methods # ======================================================================== # def write(self, address, size, value): """Write arbitrary size content to memory. """ for i in range(0, size): self.__write_byte(address + i, (value >> (i * 8)) & 0xff) def __write_byte(self, address, value): """Write byte in memory. """ self._memory[address] = value & 0xff # Misc methods # ======================================================================== # def reset(self): # Dictionary that implements the memory itself. self._memory = {} # Magic methods # ======================================================================== # def __str__(self): lines = [] for addr in sorted(self._memory.keys()): lines += ["0x%08x : 0x%08x" % (addr, self._memory[addr])] return "\n".join(lines) class ReilMemoryEx(ReilMemory): """Reil memory extended class""" def __init__(self, address_size): super(ReilMemoryEx, self).__init__(address_size) # Previous state of memory. self.__memory_prev = {} # Write operations counter. self.__write_count = 0 # Read methods # ======================================================================== # def read_inverse(self, value, size): """Return a list of memory addresses that contain the specified value. """ addr_candidates = [addr for addr, val in self._memory.items() if val == (value & 0xff)] addr_matches = [] for addr in addr_candidates: match = True for i in range(0, size): byte_curr = (value >> (i * 8)) & 0xff try: match = self._memory[addr + i] == byte_curr except KeyError: match = False if not match: break if match: addr_matches += [addr] return addr_matches def try_read(self, address, size): """Try to read memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content). """ value = 0x0 for i in range(0, size): addr = address + i if addr in self._memory: value |= self._read_byte(addr) << (i * 8) else: return False, None return True, value def try_read_prev(self, address, size): """Try to read previous memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content). """ value = 0x0 for i in range(0, size): addr = address + i if addr in self.__memory_prev: _, val_byte = self.__try_read_byte_prev(addr) value |= val_byte << (i * 8) else: return False, None return True, value def __try_read_byte_prev(self, address): """Read previous value for memory location. Return a tuple (True, Byte) in case of successful read, (False, None) otherwise. """ # Initialize memory location with a random value if address not in self.__memory_prev: return False, None return True, self.__memory_prev[address] # Write methods # ======================================================================== # def write(self, address, size, value): """Write arbitrary size content to memory. """ for i in range(0, size): self.__write_byte(address + i, (value >> (i * 8)) & 0xff) self.__write_count += 1 def __write_byte(self, address, value): """Write byte in memory. """ # Save previous address content. if address in self._memory: self.__memory_prev[address] = self._memory[address] self._memory[address] = value & 0xff # Misc methods # ======================================================================== # def reset(self): super(ReilMemoryEx, self).reset() # Previous state of memory. self.__memory_prev = {} # Write operations counter. self.__write_count = 0 def get_addresses(self): """Get accessed addresses. """ return list(self._memory.keys()) def get_write_count(self): """Get number of write operations performed on the memory. """ return self.__write_count
programa-stic/barf-project
barf/core/reil/emulator/memory.py
Python
bsd-2-clause
7,471
#!/usr/bin/env python from __future__ import division from __future__ import unicode_literals import sys, math import argparse from volk_test_funcs import (create_connection, list_tables, get_results, helper, timeit, format_results) try: import matplotlib import matplotlib.pyplot as plt except ImportError: sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n") sys.exit(1) def main(): desc='Plot Volk performance results from a SQLite database. ' + \ 'Run one of the volk tests first (e.g, volk_math.py)' parser = argparse.ArgumentParser(description=desc) parser.add_argument('-D', '--database', type=str, default='volk_results.db', help='Database file to read data from [default: %(default)s]') parser.add_argument('-E', '--errorbars', action='store_true', default=False, help='Show error bars (1 standard dev.)') parser.add_argument('-P', '--plot', type=str, choices=['mean', 'min', 'max'], default='mean', help='Set the type of plot to produce [default: %(default)s]') parser.add_argument('-%', '--percent', type=str, default=None, metavar="table", help='Show percent difference to the given type [default: %(default)s]') args = parser.parse_args() # Set up global plotting properties matplotlib.rcParams['figure.subplot.bottom'] = 0.2 matplotlib.rcParams['figure.subplot.top'] = 0.95 matplotlib.rcParams['figure.subplot.right'] = 0.98 matplotlib.rcParams['ytick.labelsize'] = 16 matplotlib.rcParams['xtick.labelsize'] = 16 matplotlib.rcParams['legend.fontsize'] = 18 # Get list of tables to compare conn = create_connection(args.database) tables = list_tables(conn) M = len(tables) # Colors to distinguish each table in the bar graph # More than 5 tables will wrap around to the start. colors = ['b', 'r', 'g', 'm', 'k'] # Set up figure for plotting f0 = plt.figure(0, facecolor='w', figsize=(14,10)) s0 = f0.add_subplot(1,1,1) # Create a register of names that exist in all tables tmp_regs = [] for table in tables: # Get results from the next table res = get_results(conn, table[0]) tmp_regs.append(list()) for r in res: try: tmp_regs[-1].index(r['kernel']) except ValueError: tmp_regs[-1].append(r['kernel']) # Get only those names that are common in all tables name_reg = tmp_regs[0] for t in tmp_regs[1:]: name_reg = list(set(name_reg) & set(t)) name_reg.sort() # Pull the data out for each table into a dictionary # we can ref the table by it's name and the data associated # with a given kernel in name_reg by it's name. # This ensures there is no sorting issue with the data in the # dictionary, so the kernels are plotted against each other. table_data = dict() for i,table in enumerate(tables): # Get results from the next table res = get_results(conn, table[0]) data = dict() for r in res: data[r['kernel']] = r table_data[table[0]] = data if args.percent is not None: for i,t in enumerate(table_data): if args.percent == t: norm_data = [] for name in name_reg: if(args.plot == 'max'): norm_data.append(table_data[t][name]['max']) elif(args.plot == 'min'): norm_data.append(table_data[t][name]['min']) elif(args.plot == 'mean'): norm_data.append(table_data[t][name]['avg']) # Plot the results x0 = list(range(len(name_reg))) i = 0 for t in (table_data): ydata = [] stds = [] for name in name_reg: stds.append(math.sqrt(table_data[t][name]['var'])) if(args.plot == 'max'): ydata.append(table_data[t][name]['max']) elif(args.plot == 'min'): ydata.append(table_data[t][name]['min']) elif(args.plot == 'mean'): ydata.append(table_data[t][name]['avg']) if args.percent is not None: ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)] if(args.percent != t): # makes x values for this data set placement # width of bars depends on number of comparisons wdth = 0.80 / (M-1) x1 = [x + i*wdth for x in x0] i += 1 s0.bar(x1, ydata, width=wdth, color=colors[(i-1)%M], label=t, edgecolor='k', linewidth=2) else: # makes x values for this data set placement # width of bars depends on number of comparisons wdth = 0.80 / M x1 = [x + i*wdth for x in x0] i += 1 if(args.errorbars is False): s0.bar(x1, ydata, width=wdth, color=colors[(i-1)%M], label=t, edgecolor='k', linewidth=2) else: s0.bar(x1, ydata, width=wdth, yerr=stds, color=colors[i%M], label=t, edgecolor='k', linewidth=2, error_kw={"ecolor": 'k', "capsize":5, "linewidth":2}) nitems = res[0]['nitems'] if args.percent is None: s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems), fontsize=22, fontweight='bold', horizontalalignment='center') else: s0.set_ylabel("% Improvement over {0} [{1:G} items]".format( args.percent, nitems), fontsize=22, fontweight='bold') s0.legend() s0.set_xticks(x0) s0.set_xticklabels(name_reg) for label in s0.xaxis.get_ticklabels(): label.set_rotation(45) label.set_fontsize(16) plt.show() if __name__ == "__main__": main()
skoslowski/gnuradio
gnuradio-runtime/examples/volk_benchmark/volk_plot.py
Python
gpl-3.0
6,303
from sympy.core import ( Rational, Symbol, S, Float, Integer, Number, Pow, Basic, I, nan, pi, symbols, oo, zoo) from sympy.core.tests.test_evalf import NS from sympy.functions.elementary.miscellaneous import sqrt, cbrt from sympy.functions.elementary.exponential import exp, log from sympy.functions.elementary.trigonometric import sin, cos from sympy.series.order import O from sympy.utilities.pytest import XFAIL def test_rational(): a = Rational(1, 5) r = sqrt(5)/5 assert sqrt(a) == r assert 2*sqrt(a) == 2*r r = a*a**Rational(1, 2) assert a**Rational(3, 2) == r assert 2*a**Rational(3, 2) == 2*r r = a**5*a**Rational(2, 3) assert a**Rational(17, 3) == r assert 2 * a**Rational(17, 3) == 2*r def test_large_rational(): e = (Rational(123712**12 - 1, 7) + Rational(1, 7))**Rational(1, 3) assert e == 234232585392159195136 * (Rational(1, 7)**Rational(1, 3)) def test_negative_real(): def feq(a, b): return abs(a - b) < 1E-10 assert feq(S.One / Float(-0.5), -Integer(2)) def test_expand(): x = Symbol('x') assert (2**(-1 - x)).expand() == Rational(1, 2)*2**(-x) def test_issue_3449(): #test if powers are simplified correctly #see also issue 3995 x = Symbol('x') assert ((x**Rational(1, 3))**Rational(2)) == x**Rational(2, 3) assert ( (x**Rational(3))**Rational(2, 5)) == (x**Rational(3))**Rational(2, 5) a = Symbol('a', real=True) b = Symbol('b', real=True) assert (a**2)**b == (abs(a)**b)**2 assert sqrt(1/a) != 1/sqrt(a) # e.g. for a = -1 assert (a**3)**Rational(1, 3) != a assert (x**a)**b != x**(a*b) # e.g. x = -1, a=2, b=1/2 assert (x**.5)**b == x**(.5*b) assert (x**.5)**.5 == x**.25 assert (x**2.5)**.5 != x**1.25 # e.g. for x = 5*I k = Symbol('k', integer=True) m = Symbol('m', integer=True) assert (x**k)**m == x**(k*m) assert Number(5)**Rational(2, 3) == Number(25)**Rational(1, 3) assert (x**.5)**2 == x**1.0 assert (x**2)**k == (x**k)**2 == x**(2*k) a = Symbol('a', positive=True) assert (a**3)**Rational(2, 5) == a**Rational(6, 5) assert (a**2)**b == (a**b)**2 assert (a**Rational(2, 3))**x == (a**(2*x/3)) != (a**x)**Rational(2, 3) def test_issue_3866(): assert --sqrt(sqrt(5) - 1) == sqrt(sqrt(5) - 1) def test_negative_one(): x = Symbol('x', complex=True) y = Symbol('y', complex=True) assert 1/x**y == x**(-y) def test_issue_4362(): neg = Symbol('neg', negative=True) nonneg = Symbol('nonneg', nonnegative=True) any = Symbol('any') num, den = sqrt(1/neg).as_numer_denom() assert num == sqrt(-1) assert den == sqrt(-neg) num, den = sqrt(1/nonneg).as_numer_denom() assert num == 1 assert den == sqrt(nonneg) num, den = sqrt(1/any).as_numer_denom() assert num == sqrt(1/any) assert den == 1 def eqn(num, den, pow): return (num/den)**pow npos = 1 nneg = -1 dpos = 2 - sqrt(3) dneg = 1 - sqrt(3) assert dpos > 0 and dneg < 0 and npos > 0 and nneg < 0 # pos or neg integer eq = eqn(npos, dpos, 2) assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2) eq = eqn(npos, dneg, 2) assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2) eq = eqn(nneg, dpos, 2) assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2) eq = eqn(nneg, dneg, 2) assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2) eq = eqn(npos, dpos, -2) assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1) eq = eqn(npos, dneg, -2) assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1) eq = eqn(nneg, dpos, -2) assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1) eq = eqn(nneg, dneg, -2) assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1) # pos or neg rational pow = S.Half eq = eqn(npos, dpos, pow) assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow) eq = eqn(npos, dneg, pow) assert eq.is_Pow is False and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow) eq = eqn(nneg, dpos, pow) assert not eq.is_Pow or eq.as_numer_denom() == (nneg**pow, dpos**pow) eq = eqn(nneg, dneg, pow) assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow) eq = eqn(npos, dpos, -pow) assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, npos**pow) eq = eqn(npos, dneg, -pow) assert eq.is_Pow is False and eq.as_numer_denom() == (-(-npos)**pow*(-dneg)**pow, npos) eq = eqn(nneg, dpos, -pow) assert not eq.is_Pow or eq.as_numer_denom() == (dpos**pow, nneg**pow) eq = eqn(nneg, dneg, -pow) assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow) # unknown exponent pow = 2*any eq = eqn(npos, dpos, pow) assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow) eq = eqn(npos, dneg, pow) assert eq.is_Pow and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow) eq = eqn(nneg, dpos, pow) assert eq.is_Pow and eq.as_numer_denom() == (nneg**pow, dpos**pow) eq = eqn(nneg, dneg, pow) assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow) eq = eqn(npos, dpos, -pow) assert eq.as_numer_denom() == (dpos**pow, npos**pow) eq = eqn(npos, dneg, -pow) assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-npos)**pow) eq = eqn(nneg, dpos, -pow) assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, nneg**pow) eq = eqn(nneg, dneg, -pow) assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow) x = Symbol('x') y = Symbol('y') assert ((1/(1 + x/3))**(-S.One)).as_numer_denom() == (3 + x, 3) notp = Symbol('notp', positive=False) # not positive does not imply real b = ((1 + x/notp)**-2) assert (b**(-y)).as_numer_denom() == (1, b**y) assert (b**(-S.One)).as_numer_denom() == ((notp + x)**2, notp**2) nonp = Symbol('nonp', nonpositive=True) assert (((1 + x/nonp)**-2)**(-S.One)).as_numer_denom() == ((-nonp - x)**2, nonp**2) n = Symbol('n', negative=True) assert (x**n).as_numer_denom() == (1, x**-n) assert sqrt(1/n).as_numer_denom() == (S.ImaginaryUnit, sqrt(-n)) n = Symbol('0 or neg', nonpositive=True) # if x and n are split up without negating each term and n is negative # then the answer might be wrong; if n is 0 it won't matter since # 1/oo and 1/zoo are both zero as is sqrt(0)/sqrt(-x) unless x is also # zero (in which case the negative sign doesn't matter): # 1/sqrt(1/-1) = -I but sqrt(-1)/sqrt(1) = I assert (1/sqrt(x/n)).as_numer_denom() == (sqrt(-n), sqrt(-x)) c = Symbol('c', complex=True) e = sqrt(1/c) assert e.as_numer_denom() == (e, 1) i = Symbol('i', integer=True) assert (((1 + x/y)**i)).as_numer_denom() == ((x + y)**i, y**i) def test_Pow_signs(): """Cf. issues 4595 and 5250""" x = Symbol('x') y = Symbol('y') n = Symbol('n', even=True) assert (3 - y)**2 != (y - 3)**2 assert (3 - y)**n != (y - 3)**n assert (-3 + y - x)**2 != (3 - y + x)**2 assert (y - 3)**3 != -(3 - y)**3 def test_power_with_noncommutative_mul_as_base(): x = Symbol('x', commutative=False) y = Symbol('y', commutative=False) assert not (x*y)**3 == x**3*y**3 assert (2*x*y)**3 == 8*(x*y)**3 def test_zero(): x = Symbol('x') y = Symbol('y') assert 0**x != 0 assert 0**(2*x) == 0**x assert 0**(1.0*x) == 0**x assert 0**(2.0*x) == 0**x assert (0**(2 - x)).as_base_exp() == (0, 2 - x) assert 0**(x - 2) != S.Infinity**(2 - x) assert 0**(2*x*y) == 0**(x*y) assert 0**(-2*x*y) == S.ComplexInfinity**(x*y) def test_pow_as_base_exp(): x = Symbol('x') assert (S.Infinity**(2 - x)).as_base_exp() == (S.Infinity, 2 - x) assert (S.Infinity**(x - 2)).as_base_exp() == (S.Infinity, x - 2) p = S.Half**x assert p.base, p.exp == p.as_base_exp() == (S(2), -x) # issue 8344: assert Pow(1, 2, evaluate=False).as_base_exp() == (S(1), S(2)) def test_issue_6100(): x = Symbol('x') y = Symbol('y') assert x**1.0 == x assert x == x**1.0 assert True != x**1.0 assert x**1.0 is not True assert x is not True assert x*y == (x*y)**1.0 assert (x**1.0)**1.0 == x assert (x**1.0)**2.0 == x**2 b = Basic() assert Pow(b, 1.0, evaluate=False) == b # if the following gets distributed as a Mul (x**1.0*y**1.0 then # __eq__ methods could be added to Symbol and Pow to detect the # power-of-1.0 case. assert ((x*y)**1.0).func is Pow def test_issue_6208(): from sympy import root, Rational I = S.ImaginaryUnit assert sqrt(33**(9*I/10)) == -33**(9*I/20) assert root((6*I)**(2*I), 3).as_base_exp()[1] == Rational(1, 3) # != 2*I/3 assert root((6*I)**(I/3), 3).as_base_exp()[1] == I/9 assert sqrt(exp(3*I)) == exp(3*I/2) assert sqrt(-sqrt(3)*(1 + 2*I)) == sqrt(sqrt(3))*sqrt(-1 - 2*I) assert sqrt(exp(5*I)) == -exp(5*I/2) assert root(exp(5*I), 3).exp == Rational(1, 3) def test_issue_6990(): x = Symbol('x') a = Symbol('a') b = Symbol('b') assert (sqrt(a + b*x + x**2)).series(x, 0, 3).removeO() == \ b*x/(2*sqrt(a)) + x**2*(1/(2*sqrt(a)) - \ b**2/(8*a**(S(3)/2))) + sqrt(a) def test_issue_6068(): x = Symbol('x') assert sqrt(sin(x)).series(x, 0, 7) == \ sqrt(x) - x**(S(5)/2)/12 + x**(S(9)/2)/1440 - \ x**(S(13)/2)/24192 + O(x**7) assert sqrt(sin(x)).series(x, 0, 9) == \ sqrt(x) - x**(S(5)/2)/12 + x**(S(9)/2)/1440 - \ x**(S(13)/2)/24192 - 67*x**(S(17)/2)/29030400 + O(x**9) assert sqrt(sin(x**3)).series(x, 0, 19) == \ x**(S(3)/2) - x**(S(15)/2)/12 + x**(S(27)/2)/1440 + O(x**19) assert sqrt(sin(x**3)).series(x, 0, 20) == \ x**(S(3)/2) - x**(S(15)/2)/12 + x**(S(27)/2)/1440 - \ x**(S(39)/2)/24192 + O(x**20) def test_issue_6782(): x = Symbol('x') assert sqrt(sin(x**3)).series(x, 0, 7) == x**(S(3)/2) + O(x**7) assert sqrt(sin(x**4)).series(x, 0, 3) == x**2 + O(x**3) def test_issue_6653(): x = Symbol('x') assert (1 / sqrt(1 + sin(x**2))).series(x, 0, 3) == 1 - x**2/2 + O(x**3) def test_issue_6429(): x = Symbol('x') c = Symbol('c') f = (c**2 + x)**(0.5) assert f.series(x, x0=0, n=1) == (c**2)**0.5 + O(x) assert f.taylor_term(0, x) == (c**2)**0.5 assert f.taylor_term(1, x) == 0.5*x*(c**2)**(-0.5) assert f.taylor_term(2, x) == -0.125*x**2*(c**2)**(-1.5) def test_issue_7638(): f = pi/log(sqrt(2)) assert ((1 + I)**(I*f/2))**0.3 == (1 + I)**(0.15*I*f) # if 1/3 -> 1.0/3 this should fail since it cannot be shown that the # sign will be +/-1; for the previous "small arg" case, it didn't matter # that this could not be proved assert (1 + I)**(4*I*f) == ((1 + I)**(12*I*f))**(S(1)/3) assert (((1 + I)**(I*(1 + 7*f)))**(S(1)/3)).exp == S(1)/3 r = symbols('r', real=True) assert sqrt(r**2) == abs(r) assert cbrt(r**3) != r assert sqrt(Pow(2*I, 5*S.Half)) != (2*I)**(5/S(4)) p = symbols('p', positive=True) assert cbrt(p**2) == p**(2/S(3)) assert NS(((0.2 + 0.7*I)**(0.7 + 1.0*I))**(0.5 - 0.1*I), 1) == '0.4 + 0.2*I' assert sqrt(1/(1 + I)) == sqrt((1 - I)/2) # or 1/sqrt(1 + I) e = 1/(1 - sqrt(2)) assert sqrt(e) == I/sqrt(-1 + sqrt(2)) assert e**-S.Half == -I*sqrt(-1 + sqrt(2)) assert sqrt((cos(1)**2 + sin(1)**2 - 1)**(3 + I)).exp == S.Half assert sqrt(r**(4/S(3))) != r**(2/S(3)) assert sqrt((p + I)**(4/S(3))) == (p + I)**(2/S(3)) assert sqrt((p - p**2*I)**2) == p - p**2*I assert sqrt((p + r*I)**2) != p + r*I e = (1 + I/5) assert sqrt(e**5) == e**(5*S.Half) assert sqrt(e**6) == e**3 assert sqrt((1 + I*r)**6) != (1 + I*r)**3 def test_issue_8582(): assert 1**oo is nan assert 1**(-oo) is nan assert 1**zoo is nan assert 1**(oo + I) is nan assert 1**(1 + I*oo) is nan assert 1**(oo + I*oo) is nan def test_issue_8650(): n = Symbol('n', integer=True, nonnegative=True) assert (n**n).is_positive is True x = 5*n+5 assert (x**(5*(n+1))).is_positive is True
wxgeo/geophar
wxgeometrie/sympy/core/tests/test_eval_power.py
Python
gpl-2.0
12,168
# coding=utf-8 # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file import unittest import mock from apache_beam.examples.snippets.util import assert_matches_stdout from apache_beam.testing.test_pipeline import TestPipeline from . import cogroupbykey def check_plants(actual): expected = '''[START plants] ('Apple', {'icons': ['🍎', '🍏'], 'durations': ['perennial']}) ('Carrot', {'icons': [], 'durations': ['biennial']}) ('Tomato', {'icons': ['🍅'], 'durations': ['perennial', 'annual']}) ('Eggplant', {'icons': ['🍆'], 'durations': []}) [END plants]'''.splitlines()[1:-1] # Make it deterministic by sorting all sublists in each element. def normalize_element(elem): name, details = elem details['icons'] = sorted(details['icons']) details['durations'] = sorted(details['durations']) return name, details assert_matches_stdout(actual, expected, normalize_element) @mock.patch('apache_beam.Pipeline', TestPipeline) @mock.patch( 'apache_beam.examples.snippets.transforms.aggregation.cogroupbykey.print', str) class CoGroupByKeyTest(unittest.TestCase): def test_cogroupbykey(self): cogroupbykey.cogroupbykey(check_plants) if __name__ == '__main__': unittest.main()
lukecwik/incubator-beam
sdks/python/apache_beam/examples/snippets/transforms/aggregation/cogroupbykey_test.py
Python
apache-2.0
1,975
""" Tests validation functions in validate_extensions.py validate_extensions.py: http://www.github.com/samjabrahams/anchorhub/validation/validate_extensions.py """ from nose.tools import * import anchorhub.validation.validate_extensions as v from anchorhub.exceptions.validationexception import ValidationException class ExtSpace(object): """ Simple class to test out namespace implementations of validate_extensions() """ def __init__(self, e=None): if e is not None: self.extensions = e def test_validate_correct(): """ validate_extensions.py: Test validate() on correct lists """ a = ['.ds', '.md', 'eeyy'] assert v.validate(a) b = ['.md'] assert v.validate(b) c = ['t', 't', 't', 't', 't', 't'] assert v.validate(c) @raises(ValidationException) def test_validate_incorrect_empty_string(): """ validate_extensions.py: Test validate() on lists with empty strings :raises ValidationException: always, if the test is working """ a = ['.md', '.rst', ''] assert v.validate(a) @raises(ValidationException) def test_validate_incorrect_empty_list(): """ validate_extensions.py: Test validate() on an empty list :raises ValidationException: always, if the test is working """ a = [] assert v.validate(a) def test_validate_correct_namespace(): """ validate_extensions.py: Test validate() on correct namespaces """ a = ExtSpace(e=['.ds', '.md', 'eeyy']) assert v.validate(a) b = ExtSpace(e=['.md']) assert v.validate(b) c = ExtSpace(e=['t', 't', 't', 't', 't', 't']) assert v.validate(c) @raises(ValidationException) def test_validate_empty_string_namespace(): """ validate_extensions.py: Test validate() on a namespace with an empty string Try validating a namespace that has an empty string as part of the list under its extensions attribute. :raises ValidationException: always, if the test is working """ a = ExtSpace(e=['.md', '.rst', '']) assert v.validate(a) @raises(ValidationException) def test_validate_empty_array_namespace(): """ validate_extensions.py: Test validate() on a namespace with empty array :raises ValidationException: always, if the test is working """ a = ExtSpace(e=['.md', '.rst', '']) assert v.validate(a) @raises(ValueError) def test_validate_bad_type_string(): """ validate_extensions.py: Test validate() with a single string :raises ValueError: always, if the test is working """ assert v.validate('.md') @raises(ValueError) def test_validate_bad_type_number(): """ validate_extensions.py: Test validate() with a single number :raises ValueError: always, if the test is working """ assert v.validate(3) @raises(ValueError) def test_validate_bad_type_no_extensions_attr(): """ validate_extensions.py: Test validate() with object lacking 'extensions' :raises ValueError: always, if the test is working """ assert v.validate(object())
samjabrahams/anchorhub
anchorhub/validation/tests/test_validate_extensions.py
Python
apache-2.0
3,062
import pf from Var import var import numpy,string from Glitch import Glitch """A faster version of nextTok(), using memory allocated (once only) using numpy, and using functions written in C. The slow, pure python module is NexusToken.py. This version is about twice as fast. Which one is used is under the control of var.nexus_doFastNextTok. This one does not work for CStrings, so we need to revert to the old way whenever CStrings are encountered.""" class NexusToken(object): def __init__(self, max): self.max = numpy.array([max], numpy.int32) self.tokLen = numpy.array([0], numpy.int32) self.tok = numpy.array(['x'] * int(self.max), 'c') self.embeddedCommentLen = numpy.array([0], numpy.int32) self.embeddedComment = numpy.array(['x'] * int(self.max), 'c') self.savedCommentLen = numpy.array([0], numpy.int32) self.filePtr = None self.nexusToken = pf.newNexusToken(var._nexus_writeVisibleComments, var._nexus_getP4CommandComments, var._nexus_getWeightCommandComments, var._nexus_getAllCommandComments, var._nexus_getLineEndingsAsTokens, self.max, self.tokLen, self.tok, self.embeddedCommentLen, self.embeddedComment, self.savedCommentLen) #self.previousTok = None #self.previousEmbeddedComment = None nt = NexusToken(300) def checkLineLengths(flob): global nt #print 'NexusToken2.checkLineLengths here.' flob.seek(0,0) longest = pf.nexusTokenCheckLineLengths(nt.nexusToken, flob) flob.seek(0,0) #print 'The longest line length is %i' % longest if longest > nt.max: nt = NexusToken(longest) def nextTok(flob): #print 'NexusToken2.nextTok() here. nt.nexusToken = %i, max=%s, tokLen=%s, type(tokLen)=%s' % (nt.nexusToken, nt.max, nt.tokLen[0], type(nt.tokLen)) #assert type(nt.tokLen) == type(numpy.array([0], numpy.int32)) #print "NexusToken2.nextTok(). nt.wordIsFinished[0]=%i, nt.tokLen=%i, previousTok=%s, previousComment=%s" % (nt.wordIsFinished[0], nt.tokLen[0], nt.previousTok, nt.previousEmbeddedComment) #if nt.wordIsFinished[0]: # assert nt.tokLen[0] # ret = nt.tok[:int(nt.tokLen[0])].tostring() # nt.tokLen[0] = 0 # nt.wordIsFinished[0] = 0 # #nt.previousTok = ret # return ret #print ' x1 NexusToken2.nextTok() here. savedCommentLen=%i' % nt.savedCommentLen[0] if nt.savedCommentLen[0]: ret = nt.embeddedComment[:int(nt.savedCommentLen[0])].tostring() nt.savedCommentLen[0] = 0 return ret pf.nextToken(nt.nexusToken, flob) #print ' x2 tokLen = %i, embeddedCommentLen[0] = %i' % (nt.tokLen[0], nt.embeddedCommentLen[0]) if nt.embeddedCommentLen[0]: ret = nt.embeddedComment[:int(nt.embeddedCommentLen[0])].tostring() nt.embeddedCommentLen[0] = 0 #nt.previousEmbeddedComment = ret return ret else: if nt.tokLen[0]: ret = nt.tok[:int(nt.tokLen[0])].tostring() nt.tokLen[0] = 0 #nt.previousTok = ret return ret else: return None def safeNextTok(flob, caller=None): t = nextTok(flob) if not t: if caller: gm = ["safeNextTok(), called from %s" % caller] else: gm = ["safeNextTok()"] gm.append("Premature Death.") gm.append("Ran out of understandable things to read in nexus file.") raise Glitch, gm else: return t def nexusSkipPastNextSemiColon(flob): pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob) def nexusSkipPastBlockEnd(flob): """Read up to and including a block 'end' or 'endblock'.""" # This should only ever be issued after a semi-colon complaintHead = '\nNexus: nexusSkipPastBlockEnd()' if hasattr(flob, 'name'): complaintHead += " file: %s" % flob.name while 1: tok = nextTok(flob) if tok: lowTok = string.lower(tok) if lowTok == 'end' or lowTok == 'endblock': tok2 = nextTok(flob) if not tok2 or tok2 != ';': gm = [complaintHead] gm.append(" Expecting a semicolon after %s" % tok) if not tok2: gm.append("Got nothing.") else: gm.append("Got '%s'" % tok2) raise Glitch, gm return elif lowTok == ';': # for pathological cases where the last command is a ';' by itself. continue else: pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob) else: break gm = [complaintHead] gm.append("Failed to find either 'end' or 'endblock'") gm.append("Premature end of file?") raise Glitch, gm
Linhua-Sun/p4-phylogenetics
p4/NexusToken2.py
Python
gpl-2.0
5,196
""" Created on 22 Mar 2014 @author: Max Demian """ # 100% coverage import unittest from contactman import Contact, ContactList, Friend, Supplier, EmailableContact class TestContactman(unittest.TestCase): def setUp(self): self.c = Contact("max", "demian@gmx.de") self.d = Contact("1", "2") def test_contact(self): self.assertEqual(str(self.d), "1, 2") self.assertEqual(self.c.name, "max") self.assertEqual(self.c.email, "demian@gmx.de") self.assertEqual([self.c, self.d], Contact.all_contacts) def test_supplier(self): s = Supplier("mix", "dimian@gmx.de") self.assertEqual(s.order("pizza"), "pizza") def test_contact_list(self): new = Contact("tst", "tst@t.st") self.assertEqual(Contact.all_contacts.search("tst"), [new]) def test_friend(self): ContactList().delete_all() # new_friend = Contact("Tom") new_friend = Friend(name="Tom", email="tom@gmail.com", phone="1-504-298", street="Common St 1", city="New Orleans", state="Louisiana", code="70112") self.assertEqual(Contact.all_contacts.search("Tom"), [new_friend]) print new_friend def test_mailsender(self): e = EmailableContact("John Smith", "jsmith@ex.net") self.assertEqual(e.send_mail("msg"), "msg") if __name__ == "__main__": unittest.main()
mikar/60-days-of-python
addressbook/test_contactman.py
Python
mit
1,410
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsLayoutFrame. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = '(C) 2017 by Nyall Dawson' __date__ = '23/10/2017' __copyright__ = 'Copyright 2017, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA from qgis.testing import start_app, unittest from qgis.core import QgsLayoutFrame, QgsLayoutItemHtml from test_qgslayoutitem import LayoutItemTestCase start_app() class TestQgsLayoutFrame(unittest.TestCase, LayoutItemTestCase): @classmethod def setUpClass(cls): cls.mf = None @classmethod def createItem(cls, layout): cls.mf = QgsLayoutItemHtml(layout) return QgsLayoutFrame(layout, cls.mf) if __name__ == '__main__': unittest.main()
stevenmizuno/QGIS
tests/src/python/test_qgslayoutframe.py
Python
gpl-2.0
1,037
"""target_uid Revision ID: 32df0b86c09a Revises: 125dd1a9bab5 Create Date: 2015-07-14 12:44:15.990112 """ # revision identifiers, used by Alembic. revision = '32df0b86c09a' down_revision = '125dd1a9bab5' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('campaign_target', sa.Column('uid', sa.String(length=100), nullable=True)) op.create_index('ix_campaign_target_uid', 'campaign_target', ['uid'], unique=False) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_index('ix_campaign_target_uid', table_name='campaign_target') op.drop_column('campaign_target', 'uid') ### end Alembic commands ###
18mr/call-congress
alembic/versions/32df0b86c09a_target_uid.py
Python
agpl-3.0
824
# Python code from win32com . server . register import UseCommandLine from win32api import MessageBox from win32com . client import Dispatch from win32ui import MessageBox class StemmerFactory : _reg_clsid_ = "{602D10EB-426C-4D6F-A4DF-C05572EB780B}" _reg_desc_ = "LangTech Stemmer" _reg_progid_ = "LangTech.Stemmer" _public_methods_ = [ 'new' ] def new ( self, scriptFile ) : self . scriptFile = scriptFile stemmer = Dispatch ( "LangTech.Stemmer.Product" ) return stemmer class Stemmer : _reg_clsid_ = "{B306454A-CAE6-4A74-ACAD-0BB11EF256DD}" _reg_desc_ = "LangTech Stemmer Product" _reg_progid_ = "LangTech.Stemmer.Product" _public_methods_ = [ 'stemWord' ] def stemWord ( self, word ) : # extremely simple stemming: if the word ends in 's' then drop the 's' if word [ -1 ] == "s": return word [ : -1 ] else: return word if __name__ == '__main__' : UseCommandLine ( StemmerFactory ) UseCommandLine ( Stemmer ) #---------------------------------------- # # C++ #include <comdef.h> #include <initguid.h> DEFINE_GUID(CLSID_StemmerFactory, 0x602D10EB, 0x426C, 0x4D6F, 0xA4, 0xDF, 0xC0, 0x55, 0x72, 0xEB, 0x78, 0x0B); DISPID rgDispId ; OLECHAR * rgszNames [ ] = { OLESTR ( "new" ) }; DISPPARAMS DispParams; VARIANT VarResult; EXCEPINFO excepinfo; UINT uArgErr; VARIANTARG * pvarg = NULL; _bstr_t stemmedWord; HRESULT hr; IDispatch * stemmerFactory; IDispatch * stemmer; if ( FAILED ( hr = CoInitialize ( NULL ) ) ) { MessageBox ( 0, "CoInitialize failure", "Fault", MB_OK ); break; } if ( FAILED ( hr = CoCreateInstance ( CLSID_StemmerFactory, NULL, CLSCTX_INPROC_SERVER, IID_IDispatch, ( LPVOID * ) & stemmerFactory ) ) ) { MessageBox ( 0, "CoCreateInstance failure", "Fault", MB_OK ); break; } if ( FAILED ( hr = stemmerFactory -> GetIDsOfNames ( IID_NULL, rgszNames, 1, LOCALE_SYSTEM_DEFAULT, & rgDispId ) ) ) { MessageBox ( 0, "GetIDsOfNames failure", "Fault", MB_OK ); break; } DispParams.cArgs = 1; DispParams.cNamedArgs = 0; DispParams.rgdispidNamedArgs = 0; pvarg = new VARIANTARG [ DispParams . cArgs ]; if ( pvarg == NULL ) { MessageBox ( 0, "Insufficient 1st memory", "Fault", MB_OK ); break; } pvarg -> vt = VT_BSTR; pvarg -> bstrVal = SysAllocString ( L"engRules.pl" ); DispParams.rgvarg = pvarg; if ( FAILED ( hr = stemmerFactory -> Invoke ( rgDispId, IID_NULL, LOCALE_SYSTEM_DEFAULT, DISPATCH_METHOD, & DispParams, & VarResult, & excepinfo, & uArgErr ) ) ) { MessageBox ( 0, "1st Invoke failure", "Fault", MB_OK ); break; } delete ( pvarg ); stemmer = VarResult.pdispVal; pvarg = new VARIANTARG [ DispParams . cArgs ]; if ( pvarg == NULL ) { MessageBox ( 0, "Insufficient 2nd memory", "Fault", MB_OK ); break; } pvarg -> vt = VT_BSTR; pvarg -> bstrVal = SysAllocString ( L"cats" ); DispParams.rgvarg = pvarg; if ( FAILED ( hr = stemmer -> Invoke ( rgDispId, IID_NULL, LOCALE_SYSTEM_DEFAULT, DISPATCH_METHOD, & DispParams, & VarResult, & excepinfo, & uArgErr ) ) ) { MessageBox ( 0, "2nd Invoke failure", "Fault", MB_OK ); break; } delete ( pvarg ); stemmedWord = VarResult.bstrVal; MessageBox ( 0, ( const char * ) stemmedWord, "Resulting Stemmed Word", MB_OK ); CoUninitialize ( );
ActiveState/code
recipes/Python/141602_Barebones_VC_code_invoking_PythCOM_factory/recipe-141602.py
Python
mit
3,435
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from .exceptions import MultipleObjectsReturned, YouTubeError, CipherError from .jsinterp import JSInterpreter from .models import Video from .utils import safe_filename try: from urllib2 import urlopen from urlparse import urlparse, parse_qs, unquote except ImportError: from urllib.parse import urlparse, parse_qs, unquote from urllib.request import urlopen import re import json # YouTube quality and codecs id map. # source: http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs YT_ENCODING = { # Flash Video 5: ["flv", "240p", "Sorenson H.263", "N/A", "0.25", "MP3", "64"], 6: ["flv", "270p", "Sorenson H.263", "N/A", "0.8", "MP3", "64"], 34: ["flv", "360p", "H.264", "Main", "0.5", "AAC", "128"], 35: ["flv", "480p", "H.264", "Main", "0.8-1", "AAC", "128"], # 3GP 36: ["3gp", "240p", "MPEG-4 Visual", "Simple", "0.17", "AAC", "38"], 13: ["3gp", "N/A", "MPEG-4 Visual", "N/A", "0.5", "AAC", "N/A"], 17: ["3gp", "144p", "MPEG-4 Visual", "Simple", "0.05", "AAC", "24"], # MPEG-4 18: ["mp4", "360p", "H.264", "Baseline", "0.5", "AAC", "96"], 22: ["mp4", "720p", "H.264", "High", "2-2.9", "AAC", "192"], 37: ["mp4", "1080p", "H.264", "High", "3-4.3", "AAC", "192"], 38: ["mp4", "3072p", "H.264", "High", "3.5-5", "AAC", "192"], 82: ["mp4", "360p", "H.264", "3D", "0.5", "AAC", "96"], 83: ["mp4", "240p", "H.264", "3D", "0.5", "AAC", "96"], 84: ["mp4", "720p", "H.264", "3D", "2-2.9", "AAC", "152"], 85: ["mp4", "1080p", "H.264", "3D", "2-2.9", "AAC", "152"], # WebM 43: ["webm", "360p", "VP8", "N/A", "0.5", "Vorbis", "128"], 44: ["webm", "480p", "VP8", "N/A", "1", "Vorbis", "128"], 45: ["webm", "720p", "VP8", "N/A", "2", "Vorbis", "192"], 46: ["webm", "1080p", "VP8", "N/A", "N/A", "Vorbis", "192"], 100: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "128"], 101: ["webm", "360p", "VP8", "3D", "N/A", "Vorbis", "192"], 102: ["webm", "720p", "VP8", "3D", "N/A", "Vorbis", "192"] } # The keys corresponding to the quality/codec map above. YT_ENCODING_KEYS = ( 'extension', 'resolution', 'video_codec', 'profile', 'video_bitrate', 'audio_codec', 'audio_bitrate' ) class YouTube(object): # TODO: just cause you CAN do this, doesn't mean you should. `hasattr` is # much cleaner. _filename = None _fmt_values = [] _video_url = None _json_data = None _js_code = False _precompiled = False title = None videos = [] # fmt was an undocumented URL parameter that allowed selecting # YouTube quality mode without using player user interface. @property def url(self): """Exposes the video url. """ return self._video_url @url.setter def url(self, url): """ Defines the URL of the YouTube video. """ # TODO: erm, this is ugly. url should just be a method, not a property. self._video_url = url # Reset the filename. self._filename = None # Get the video details. self._get_video_info() @property def json_data(self): """Exposes the video url. """ return self._json_data @json_data.setter def json_data(self, json_data): """ Defines the URL of the YouTube video. """ # Get the video details. self._json_data = json_data self._process_video_info(json_data) @property def filename(self): """Exposes the title of the video. If this is not set, one is generated based on the name of the video. """ if not self._filename: self._filename = safe_filename(self.title) return self._filename @filename.setter def filename(self, filename): """Defines the filename. """ self._filename = filename if self.videos: for video in self.videos: video.filename = filename @property def video_id(self): """Gets the video ID extracted from the URL. """ parts = urlparse(self._video_url) qs = getattr(parts, 'query', None) if qs: video_id = parse_qs(qs).get('v', None) if video_id: return video_id.pop() def get(self, extension=None, resolution=None, profile="High"): """Return a single video given an extention and resolution. :params extention: The desired file extention (e.g.: mp4). :params resolution: The desired video broadcasting standard. :params profile: The desired quality profile. """ result = [] for v in self.videos: if extension and v.extension != extension: continue elif resolution and v.resolution != resolution: continue elif profile and v.profile != profile: continue else: result.append(v) if not len(result): return elif len(result) is 1: return result[0] else: raise MultipleObjectsReturned( "get() returned more than one object") def filter(self, extension=None, resolution=None): """Return a filtered list of videos given an extention and resolution criteria. :params extention: The desired file extention (e.g.: mp4). :params resolution: The desired video broadcasting standard. """ results = [] for v in self.videos: if extension and v.extension != extension: continue elif resolution and v.resolution != resolution: continue else: results.append(v) return results def _fetch(self, path, data): """Given a path, traverse the response for the desired data. (A modified ver. of my dictionary traverse method: https://gist.github.com/2009119) :params path: A tuple representing a path to a node within a tree. :params data: The data containing the tree. """ elem = path[0] # Get first element in tuple, and check if it contains a list. if type(data) is list: # Pop it, and let's continue.. return self._fetch(path, data.pop()) # Parse the url encoded data data = parse_qs(data) # Get the element in our path data = data.get(elem, None) # Offset the tuple by 1. path = path[1::1] # Check if the path has reached the end OR the element return # nothing. if len(path) is 0 or data is None: if type(data) is list and len(data) is 1: data = data.pop() return data else: # Nope, let's keep diggin' return self._fetch(path, data) @staticmethod def decode_video_info(response): json_data = None if response: content = response.decode("utf-8") try: player_conf = content[18 + content.find("ytplayer.config = "):] bracket_count = 0 for i, char in enumerate(player_conf): if char == "{": bracket_count += 1 elif char == "}": bracket_count -= 1 if bracket_count == 0: break else: raise YouTubeError("Cannot get JSON from HTML") index = i + 1 json_data = json.loads(player_conf[:index]) except Exception as e: raise YouTubeError("Cannot decode JSON: {0}".format(e)) return json_data @staticmethod def _parse_stream_map(text): """Python's `parse_qs` can't properly decode the stream map containing video data so we use this instead. """ videoinfo = { "itag": [], "url": [], "quality": [], "fallback_host": [], "s": [], "type": [] } # Split individual videos videos = text.split(",") # Unquote the characters and split to parameters videos = [video.split("&") for video in videos] for video in videos: for kv in video: key, value = kv.split("=") videoinfo.get(key, []).append(unquote(value)) return videoinfo def _process_video_info(self, data): self.title = None self.videos = [] stream_map = self._parse_stream_map(data["args"]["url_encoded_fmt_stream_map"]) self.title = data["args"]["title"] js_url = "http:" + data["assets"]["js"] video_urls = stream_map["url"] for i, url in enumerate(video_urls): try: fmt, fmt_data = self._extract_fmt(url) except (TypeError, KeyError): continue # If the signature must be ciphered... if "signature=" not in url: signature = self._cipher(stream_map["s"][i], js_url) url = "%s&signature=%s" % (url, signature) self.videos.append(Video(url, self.filename, **fmt_data)) self._fmt_values.append(fmt) self.videos.sort() def _get_video_info(self): """This is responsible for executing the request, extracting the necessary details, and populating the different video resolutions and formats into a list. """ self.title = None self.videos = [] response = urlopen(self.url) if response: data = self.decode_video_info(response.read()) self.json_data = data def _cipher(self, s, url): """Get the signature using the cipher implemented in the JavaScript code :params s: Signature :params url: url of JavaScript file """ # TODO: refactor removing tinyJS # Getting JS code (if hasn't downloaded yet) if not self._js_code: # TODO: don't use conditional expression if line > 79 characters. self._js_code = (urlopen(url).read().decode() if not self._js_code else self._js_code) try: mobj = re.search( r'\.sig\|\|([a-zA-Z0-9$]+)\(', self._js_code) if mobj: # return the first matching group funcname= next(g for g in mobj.groups() if g is not None) jsi = JSInterpreter(self._js_code) initial_function = jsi.extract_function(funcname) return initial_function([s]) except Exception as e: raise CipherError("Couldn't cipher the signature. Maybe YouTube " "has changed the cipher algorithm. Notify " "this issue on GitHub: %s" % e) def _extract_fmt(self, text): """YouTube does not pass you a completely valid URLencoded form, I suspect this is suppose to act as a deterrent.. Nothing some regulular expressions couldn't handle. :params text: The malformed data contained within each url node. """ itag = re.findall('itag=(\d+)', text) if itag and len(itag) is 1: itag = int(itag[0]) attr = YT_ENCODING.get(itag, None) if not attr: return itag, None return itag, dict(zip(YT_ENCODING_KEYS, attr))
DomainGroupOSS/pytube
pytube/api.py
Python
mit
11,653
import datetime import itertools import unittest from copy import copy from django.db import ( DatabaseError, IntegrityError, OperationalError, connection, ) from django.db.models import Model from django.db.models.deletion import CASCADE from django.db.models.fields import ( AutoField, BigIntegerField, BinaryField, BooleanField, CharField, DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField, TextField, TimeField, ) from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.db.transaction import atomic from django.test import ( TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from .fields import ( CustomManyToManyField, InheritedManyToManyField, MediumBlobField, ) from .models import ( Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookForeignObj, BookWeak, BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK, Node, Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps, ) class SchemaTests(TransactionTestCase): """ Tests that the schema-alteration code works correctly. Be aware that these tests are more liable than most to false results, as sometimes the code to check if a test has worked is almost as complex as the code it is testing. """ available_apps = [] models = [ Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, ] # Utility functions def setUp(self): # local_models should contain test dependent model classes that will be # automatically removed from the app cache on test tear down. self.local_models = [] def tearDown(self): # Delete any tables made for our models self.delete_tables() new_apps.clear_cache() for model in new_apps.get_models(): model._meta._expire_cache() if 'schema' in new_apps.all_models: for model in self.local_models: for many_to_many in model._meta.many_to_many: through = many_to_many.remote_field.through if through and through._meta.auto_created: del new_apps.all_models['schema'][through._meta.model_name] del new_apps.all_models['schema'][model._meta.model_name] def delete_tables(self): "Deletes all model tables for our models for a clean test environment" converter = connection.introspection.table_name_converter with atomic(): connection.disable_constraint_checking() table_names = connection.introspection.table_names() for model in itertools.chain(SchemaTests.models, self.local_models): tbl = converter(model._meta.db_table) if tbl in table_names: with connection.schema_editor() as editor: editor.delete_model(model) table_names.remove(tbl) connection.enable_constraint_checking() def column_classes(self, model): with connection.cursor() as cursor: columns = { d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description( cursor, model._meta.db_table, ) } # SQLite has a different format for field_type for name, (type, desc) in columns.items(): if isinstance(type, tuple): columns[name] = (type[0], desc) # SQLite also doesn't error properly if not columns: raise DatabaseError("Table does not exist (empty pragma)") return columns def get_indexes(self, table): """ Get the indexes on the table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_indexes(cursor, table) def get_constraints(self, table): """ Get the constraints on a table using a new cursor. """ with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) def get_constraints_for_column(self, model, column_name): constraints = self.get_constraints(model._meta.db_table) constraints_for_column = [] for name, details in constraints.items(): if details['columns'] == [column_name]: constraints_for_column.append(name) return sorted(constraints_for_column) # Tests def test_creation_deletion(self): """ Tries creating a model's table, and then deleting it. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Check that it's there list(Author.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Author) # Check that it's gone with self.assertRaises(DatabaseError): list(Author.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_fk(self): "Tests that creating tables out of FK order, then repointing, works" # Create the table with connection.schema_editor() as editor: editor.create_model(Book) editor.create_model(Author) editor.create_model(Tag) # Check that initial tables are there list(Author.objects.all()) list(Book.objects.all()) # Make sure the FK constraint is present with self.assertRaises(IntegrityError): Book.objects.create( author_id=1, title="Much Ado About Foreign Keys", pub_date=datetime.datetime.now(), ) # Repoint the FK constraint old_field = Book._meta.get_field("author") new_field = ForeignKey(Tag, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Make sure the new FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tag', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_fk_to_proxy(self): "Tests that creating a FK to a proxy model creates database constraints." class AuthorProxy(Author): class Meta: app_label = 'schema' apps = new_apps proxy = True class AuthorRef(Model): author = ForeignKey(AuthorProxy, on_delete=CASCADE) class Meta: app_label = 'schema' apps = new_apps self.local_models = [AuthorProxy, AuthorRef] # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(AuthorRef) constraints = self.get_constraints(AuthorRef._meta.db_table) for details in constraints.values(): if details['columns'] == ['author_id'] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail('No FK constraint for author_id found') @skipUnlessDBFeature('supports_foreign_keys') def test_fk_db_constraint(self): "Tests that the db_constraint parameter is respected" # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(Author) editor.create_model(BookWeak) # Check that initial tables are there list(Author.objects.all()) list(Tag.objects.all()) list(BookWeak.objects.all()) # Check that BookWeak doesn't have an FK constraint constraints = self.get_constraints(BookWeak._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.fail("FK constraint for author_id found") # Make a db_constraint=False FK new_field = ForeignKey(Tag, CASCADE, db_constraint=False) new_field.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Make sure no FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") # Alter to one with a constraint new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Make sure the new FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tag', 'id')) break else: self.fail("No FK constraint for tag_id found") # Alter to one without a constraint again new_field2 = ForeignKey(Tag, CASCADE) new_field2.set_attributes_from_name("tag") with connection.schema_editor() as editor: editor.alter_field(Author, new_field2, new_field, strict=True) # Make sure no FK constraint is present constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") def _test_m2m_db_constraint(self, M2MFieldClass): class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) editor.create_model(LocalAuthorWithM2M) # Check that initial tables are there list(LocalAuthorWithM2M.objects.all()) list(Tag.objects.all()) # Make a db_constraint=False FK new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False) new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Make sure no FK constraint is present constraints = self.get_constraints(new_field.remote_field.through._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["tag_id"] and details['foreign_key']: self.fail("FK constraint for tag_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint(self): self._test_m2m_db_constraint(ManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_custom(self): self._test_m2m_db_constraint(CustomManyToManyField) @skipUnlessDBFeature('supports_foreign_keys') def test_m2m_db_constraint_inherited(self): self._test_m2m_db_constraint(InheritedManyToManyField) def test_add_field(self): """ Tests adding fields to models """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add the new field new_field = IntegerField(null=True) new_field.set_attributes_from_name("age") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['age'][0], "IntegerField") self.assertEqual(columns['age'][1][6], True) def test_add_field_temp_default(self): """ Tests adding fields to models with a temporary default """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = CharField(max_length=30, default="Godwin") new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['surname'][0], "CharField") self.assertEqual(columns['surname'][1][6], connection.features.interprets_empty_strings_as_nulls) def test_add_field_temp_default_boolean(self): """ Tests adding fields to models with a temporary default where the default is False. (#21783) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no age field columns = self.column_classes(Author) self.assertNotIn("age", columns) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add a not-null field new_field = BooleanField(default=False) new_field.set_attributes_from_name("awesome") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # BooleanField are stored as TINYINT(1) on MySQL. field_type = columns['awesome'][0] self.assertEqual( field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True) ) def test_add_field_default_transform(self): """ Tests adding fields to models with a default that is not directly valid in the database (#22581) """ class TestTransformField(IntegerField): # Weird field that saves the count of items in its value def get_default(self): return self.default def get_prep_value(self, value): if value is None: return 0 return len(value) # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add some rows of data Author.objects.create(name="Andrew", height=30) Author.objects.create(name="Andrea") # Add the field with a default it needs to cast (to string in this case) new_field = TestTransformField(default={1: 2}) new_field.set_attributes_from_name("thing") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is there columns = self.column_classes(Author) field_type, field_info = columns['thing'] self.assertEqual(field_type, 'IntegerField') # Make sure the values were transformed correctly self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2) def test_add_field_binary(self): """ Tests binary fields get a sane default (#22851) """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field new_field = BinaryField(blank=True) new_field.set_attributes_from_name("bits") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) # MySQL annoyingly uses the same backend, so it'll come back as one of # these two types. self.assertIn(columns['bits'][0], ("BinaryField", "TextField")) @unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific") def test_add_binaryfield_mediumblob(self): """ Test adding a custom-sized binary field on MySQL (#24846). """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Add the new field with default new_field = MediumBlobField(blank=True, default=b'123') new_field.set_attributes_from_name('bits') with connection.schema_editor() as editor: editor.add_field(Author, new_field) columns = self.column_classes(Author) # Introspection treats BLOBs as TextFields self.assertEqual(columns['bits'][0], "TextField") def test_alter(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) # Alter the name field to a TextField old_field = Author._meta.get_field("name") new_field = TextField(null=True) new_field.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(columns['name'][1][6], True) # Change nullability again new_field2 = TextField(null=False) new_field2.set_attributes_from_name("name") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "TextField") self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls)) def test_alter_text_field(self): # Regression for "BLOB/TEXT column 'info' can't have a default value") # on MySQL. # Create the table with connection.schema_editor() as editor: editor.create_model(Note) old_field = Note._meta.get_field("info") new_field = TextField(blank=True) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) def test_alter_text_field_to_date_field(self): """ #25002 - Test conversion of text field to date field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05') old_field = Note._meta.get_field('info') new_field = DateField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_datetime_field(self): """ #25002 - Test conversion of text field to datetime field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='1988-05-05 3:16:17.4567') old_field = Note._meta.get_field('info') new_field = DateTimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) def test_alter_text_field_to_time_field(self): """ #25002 - Test conversion of text field to time field. """ with connection.schema_editor() as editor: editor.create_model(Note) Note.objects.create(info='3:16:17.4567') old_field = Note._meta.get_field('info') new_field = TimeField(blank=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) # Make sure the field isn't nullable columns = self.column_classes(Note) self.assertFalse(columns['info'][1][6]) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_alter_textual_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = CharField(max_length=50) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): Note.objects.create(info=None) def test_alter_numeric_field_keep_null_status(self): """ Changing a field type shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(UniqueTest) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='aaa') old_field = UniqueTest._meta.get_field("year") new_field = BigIntegerField() new_field.set_attributes_from_name("year") with connection.schema_editor() as editor: editor.alter_field(UniqueTest, old_field, new_field, strict=True) with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=None, slug='bbb') def test_alter_null_to_not_null(self): """ #23609 - Tests handling of default values when altering from NULL to NOT NULL. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertTrue(columns['height'][1][6]) # Create some test data Author.objects.create(name='Not null author', height=12) Author.objects.create(name='Null author') # Verify null value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertIsNone(Author.objects.get(name='Null author').height) # Alter the height field to NOT NULL with default old_field = Author._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertFalse(columns['height'][1][6]) # Verify default value self.assertEqual(Author.objects.get(name='Not null author').height, 12) self.assertEqual(Author.objects.get(name='Null author').height, 42) def test_alter_charfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a CharField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Change the CharField to null old_field = Author._meta.get_field('name') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field) def test_alter_textfield_to_null(self): """ #24307 - Should skip an alter statement on databases with interprets_empty_strings_as_null when changing a TextField to null. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Note) # Change the TextField to null old_field = Note._meta.get_field('info') new_field = copy(old_field) new_field.null = True with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field) @skipUnlessDBFeature('supports_combined_alters') def test_alter_null_to_not_null_keeping_default(self): """ #23738 - Can change a nullable field with default to non-nullable with the same default. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithDefaultHeight) # Ensure the field is right to begin with columns = self.column_classes(AuthorWithDefaultHeight) self.assertTrue(columns['height'][1][6]) # Alter the height field to NOT NULL keeping the previous default old_field = AuthorWithDefaultHeight._meta.get_field("height") new_field = PositiveIntegerField(default=42) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(AuthorWithDefaultHeight, old_field, new_field) # Ensure the field is right afterwards columns = self.column_classes(AuthorWithDefaultHeight) self.assertFalse(columns['height'][1][6]) @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk(self): """ Tests altering of FKs """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") # Alter the FK old_field = Book._meta.get_field("author") new_field = ForeignKey(Author, CASCADE, editable=False) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["author_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_to_fk(self): """ #24447 - Tests adding a FK constraint for an existing column """ class LocalBook(Model): author = IntegerField() title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBook] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBook) # Ensure no FK constraint exists constraints = self.get_constraints(LocalBook._meta.db_table) for name, details in constraints.items(): if details['foreign_key']: self.fail('Found an unexpected FK constraint to %s' % details['columns']) old_field = LocalBook._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(LocalBook, old_field, new_field, strict=True) constraints = self.get_constraints(LocalBook._meta.db_table) # Ensure FK constraint exists for name, details in constraints.items(): if details['foreign_key'] and details['columns'] == ["author_id"]: self.assertEqual(details['foreign_key'], ('schema_author', 'id')) break else: self.fail("No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_o2o_to_fk(self): """ #24163 - Tests altering of OneToOneField to ForeignKey """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithO2O) # Ensure the field is right to begin with columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique author = Author.objects.create(name="Joe") BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) BookWithO2O.objects.all().delete() # Make sure the FK constraint is present constraints = self.get_constraints(BookWithO2O._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") # Alter the OneToOneField to ForeignKey old_field = BookWithO2O._meta.get_field("author") new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(BookWithO2O, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique anymore Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) # Make sure the FK constraint is still present constraints = self.get_constraints(Book._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") @skipUnlessDBFeature('supports_foreign_keys') def test_alter_fk_to_o2o(self): """ #24163 - Tests altering of ForeignKey to OneToOneField """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the field is right to begin with columns = self.column_classes(Book) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is not unique author = Author.objects.create(name="Joe") Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) Book.objects.all().delete() # Make sure the FK constraint is present constraints = self.get_constraints(Book._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") # Alter the ForeignKey to OneToOneField old_field = Book._meta.get_field("author") new_field = OneToOneField(Author, CASCADE) new_field.set_attributes_from_name("author") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(BookWithO2O) self.assertEqual(columns['author_id'][0], "IntegerField") # Ensure the field is unique now BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now()) with self.assertRaises(IntegrityError): BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now()) # Make sure the FK constraint is present constraints = self.get_constraints(BookWithO2O._meta.db_table) author_is_fk = False for name, details in constraints.items(): if details['columns'] == ['author_id']: if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'): author_is_fk = True self.assertTrue(author_is_fk, "No FK constraint for author_id found") def test_alter_implicit_id_to_explicit(self): """ Should be able to convert an implicit "id" field to an explicit "id" primary key field. """ with connection.schema_editor() as editor: editor.create_model(Author) old_field = Author._meta.get_field("id") new_field = AutoField(primary_key=True) new_field.set_attributes_from_name("id") new_field.model = Author with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # This will fail if DROP DEFAULT is inadvertently executed on this # field which drops the id sequence, at least on PostgreSQL. Author.objects.create(name='Foo') Author.objects.create(name='Bar') def test_alter_int_pk_to_autofield_pk(self): """ Should be able to rename an IntegerField(primary_key=True) to AutoField(primary_key=True). """ with connection.schema_editor() as editor: editor.create_model(IntegerPK) old_field = IntegerPK._meta.get_field('i') new_field = AutoField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) def test_alter_int_pk_to_int_unique(self): """ Should be able to rename an IntegerField(primary_key=True) to IntegerField(unique=True). """ class IntegerUnique(Model): i = IntegerField(unique=True) j = IntegerField(primary_key=True) class Meta: app_label = 'schema' apps = new_apps db_table = 'INTEGERPK' with connection.schema_editor() as editor: editor.create_model(IntegerPK) # model requires a new PK old_field = IntegerPK._meta.get_field('j') new_field = IntegerField(primary_key=True) new_field.model = IntegerPK new_field.set_attributes_from_name('j') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) old_field = IntegerPK._meta.get_field('i') new_field = IntegerField(unique=True) new_field.model = IntegerPK new_field.set_attributes_from_name('i') with connection.schema_editor() as editor: editor.alter_field(IntegerPK, old_field, new_field, strict=True) # Ensure unique constraint works. IntegerUnique.objects.create(i=1, j=1) with self.assertRaises(IntegrityError): IntegerUnique.objects.create(i=1, j=2) def test_rename(self): """ Tests simple altering of fields """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the field is right to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") self.assertNotIn("display_name", columns) # Alter the name field's name old_field = Author._meta.get_field("name") new_field = CharField(max_length=254) new_field.set_attributes_from_name("display_name") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) # Ensure the field is right afterwards columns = self.column_classes(Author) self.assertEqual(columns['display_name'][0], "CharField") self.assertNotIn("name", columns) @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_rename_keep_null_status(self): """ Renaming a field shouldn't affect the not null status. """ with connection.schema_editor() as editor: editor.create_model(Note) with self.assertRaises(IntegrityError): Note.objects.create(info=None) old_field = Note._meta.get_field("info") new_field = TextField() new_field.set_attributes_from_name("detail_info") with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) columns = self.column_classes(Note) self.assertEqual(columns['detail_info'][0], "TextField") self.assertNotIn("info", columns) with self.assertRaises(IntegrityError): NoteRename.objects.create(detail_info=None) def _test_m2m_create(self, M2MFieldClass): """ Tests M2M fields on models during creation """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2M) # Ensure there is now an m2m table there columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") def test_m2m_create(self): self._test_m2m_create(ManyToManyField) def test_m2m_create_custom(self): self._test_m2m_create(CustomManyToManyField) def test_m2m_create_inherited(self): self._test_m2m_create(InheritedManyToManyField) def _test_m2m_create_through(self, M2MFieldClass): """ Tests M2M fields on models during creation with through models """ class LocalTagThrough(Model): book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalBookWithM2MThrough(Model): tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalTagThrough, LocalBookWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalTagThrough) editor.create_model(TagM2MTest) editor.create_model(LocalBookWithM2MThrough) # Ensure there is now an m2m table there columns = self.column_classes(LocalTagThrough) self.assertEqual(columns['book_id'][0], "IntegerField") self.assertEqual(columns['tag_id'][0], "IntegerField") def test_m2m_create_through(self): self._test_m2m_create_through(ManyToManyField) def test_m2m_create_through_custom(self): self._test_m2m_create_through(CustomManyToManyField) def test_m2m_create_through_inherited(self): self._test_m2m_create_through(InheritedManyToManyField) def _test_m2m(self, M2MFieldClass): """ Tests adding/removing M2M fields on models """ class LocalAuthorWithM2M(Model): name = CharField(max_length=255) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorWithM2M) editor.create_model(TagM2MTest) # Create an M2M field new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors") new_field.contribute_to_class(LocalAuthorWithM2M, "tags") # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Add the field with connection.schema_editor() as editor: editor.add_field(LocalAuthorWithM2M, new_field) # Ensure there is now an m2m table there columns = self.column_classes(new_field.remote_field.through) self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField") # "Alter" the field. This should not rename the DB table to itself. with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2M, new_field, new_field) # Remove the M2M table again with connection.schema_editor() as editor: editor.remove_field(LocalAuthorWithM2M, new_field) # Ensure there's no m2m table there with self.assertRaises(DatabaseError): self.column_classes(new_field.remote_field.through) # Make sure the model state is coherent with the table one now that # we've removed the tags field. opts = LocalAuthorWithM2M._meta opts.local_many_to_many.remove(new_field) del new_apps.all_models['schema'][new_field.remote_field.through._meta.model_name] opts._expire_cache() def test_m2m(self): self._test_m2m(ManyToManyField) def test_m2m_custom(self): self._test_m2m(CustomManyToManyField) def test_m2m_inherited(self): self._test_m2m(InheritedManyToManyField) def _test_m2m_through_alter(self, M2MFieldClass): """ Tests altering M2Ms with explicit through models (should no-op) """ class LocalAuthorTag(Model): author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE) tag = ForeignKey("schema.TagM2MTest", CASCADE) class Meta: app_label = 'schema' apps = new_apps class LocalAuthorWithM2MThrough(Model): name = CharField(max_length=255) tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough] # Create the tables with connection.schema_editor() as editor: editor.create_model(LocalAuthorTag) editor.create_model(LocalAuthorWithM2MThrough) editor.create_model(TagM2MTest) # Ensure the m2m table is there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) # "Alter" the field's blankness. This should not actually do anything. old_field = LocalAuthorWithM2MThrough._meta.get_field("tags") new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag) new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags") with connection.schema_editor() as editor: editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field) # Ensure the m2m table is still there self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3) def test_m2m_through_alter(self): self._test_m2m_through_alter(ManyToManyField) def test_m2m_through_alter_custom(self): self._test_m2m_through_alter(CustomManyToManyField) def test_m2m_through_alter_inherited(self): self._test_m2m_through_alter(InheritedManyToManyField) def _test_m2m_repoint(self, M2MFieldClass): """ Tests repointing M2M fields """ class LocalBookWithM2M(Model): author = ForeignKey(Author, CASCADE) title = CharField(max_length=100, db_index=True) pub_date = DateTimeField() tags = M2MFieldClass("TagM2MTest", related_name="books") class Meta: app_label = 'schema' apps = new_apps self.local_models = [LocalBookWithM2M] # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(LocalBookWithM2M) editor.create_model(TagM2MTest) editor.create_model(UniqueTest) # Ensure the M2M exists and points to TagM2MTest constraints = self.get_constraints( LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table ) if connection.features.supports_foreign_keys: for name, details in constraints.items(): if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id')) break else: self.fail("No FK constraint for tagm2mtest_id found") # Repoint the M2M old_field = LocalBookWithM2M._meta.get_field("tags") new_field = M2MFieldClass(UniqueTest) new_field.contribute_to_class(LocalBookWithM2M, "uniques") with connection.schema_editor() as editor: editor.alter_field(LocalBookWithM2M, old_field, new_field) # Ensure old M2M is gone with self.assertRaises(DatabaseError): self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through) # This model looks like the new model and is used for teardown. opts = LocalBookWithM2M._meta opts.local_many_to_many.remove(old_field) # Ensure the new M2M exists and points to UniqueTest constraints = self.get_constraints(new_field.remote_field.through._meta.db_table) if connection.features.supports_foreign_keys: for name, details in constraints.items(): if details['columns'] == ["uniquetest_id"] and details['foreign_key']: self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id')) break else: self.fail("No FK constraint for uniquetest_id found") def test_m2m_repoint(self): self._test_m2m_repoint(ManyToManyField) def test_m2m_repoint_custom(self): self._test_m2m_repoint(CustomManyToManyField) def test_m2m_repoint_inherited(self): self._test_m2m_repoint(InheritedManyToManyField) @skipUnlessDBFeature('supports_column_check_constraints') def test_check_constraints(self): """ Tests creating/deleting CHECK constraints """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the constraint exists constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: break else: self.fail("No check constraint for height found") # Alter the column to remove it old_field = Author._meta.get_field("height") new_field = IntegerField(null=True, blank=True) new_field.set_attributes_from_name("height") with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: self.fail("Check constraint for height found") # Alter the column to re-add it new_field2 = Author._meta.get_field("height") with connection.schema_editor() as editor: editor.alter_field(Author, new_field, new_field2, strict=True) constraints = self.get_constraints(Author._meta.db_table) for name, details in constraints.items(): if details['columns'] == ["height"] and details['check']: break else: self.fail("No check constraint for height found") def test_unique(self): """ Tests removing and adding unique constraints to a single column. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the field is unique to begin with Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be non-unique old_field = Tag._meta.get_field("slug") new_field = SlugField(unique=False) new_field.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) # Ensure the field is no longer unique Tag.objects.create(title="foo", slug="foo") Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Alter the slug field to be unique new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field, new_field2, strict=True) # Ensure the field is unique again Tag.objects.create(title="foo", slug="foo") with self.assertRaises(IntegrityError): Tag.objects.create(title="bar", slug="foo") Tag.objects.all().delete() # Rename the field new_field3 = SlugField(unique=True) new_field3.set_attributes_from_name("slug2") with connection.schema_editor() as editor: editor.alter_field(Tag, new_field2, new_field3, strict=True) # Ensure the field is still unique TagUniqueRename.objects.create(title="foo", slug2="foo") with self.assertRaises(IntegrityError): TagUniqueRename.objects.create(title="bar", slug2="foo") Tag.objects.all().delete() def test_unique_together(self): """ Tests removing and adding unique_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(UniqueTest) # Ensure the fields are unique to begin with UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2011, slug="foo") UniqueTest.objects.create(year=2011, slug="bar") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter the model to its non-unique-together companion with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, []) # Ensure the fields are no longer unique UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together) # Ensure the fields are unique again UniqueTest.objects.create(year=2012, slug="foo") with self.assertRaises(IntegrityError): UniqueTest.objects.create(year=2012, slug="foo") UniqueTest.objects.all().delete() def test_unique_together_with_fk(self): """ Tests removing and adding unique_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_unique_together_with_fk_with_existing_index(self): """ Tests removing and adding unique_together constraints that include a foreign key, where the foreign key is added after the model is created. """ # Create the tables with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(BookWithoutAuthor) new_field = ForeignKey(Author, CASCADE) new_field.set_attributes_from_name('author') editor.add_field(BookWithoutAuthor, new_field) # Ensure the fields aren't unique to begin with self.assertEqual(Book._meta.unique_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_unique_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_unique_together(Book, [['author', 'title']], []) def test_index_together(self): """ Tests removing and adding index_together constraints on a model. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure there's no index on the year/slug columns first self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter the model to add an index with connection.schema_editor() as editor: editor.alter_index_together(Tag, [], [("slug", "title")]) # Ensure there is now an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) # Alter it back new_field2 = SlugField(unique=True) new_field2.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_index_together(Tag, [("slug", "title")], []) # Ensure there's no index self.assertEqual( False, any( c["index"] for c in self.get_constraints("schema_tag").values() if c['columns'] == ["slug", "title"] ), ) def test_index_together_with_fk(self): """ Tests removing and adding index_together constraints that include a foreign key. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the fields are unique to begin with self.assertEqual(Book._meta.index_together, ()) # Add the unique_together constraint with connection.schema_editor() as editor: editor.alter_index_together(Book, [], [['author', 'title']]) # Alter it back with connection.schema_editor() as editor: editor.alter_index_together(Book, [['author', 'title']], []) def test_create_index_together(self): """ Tests creating models with index_together already defined """ # Create the table with connection.schema_editor() as editor: editor.create_model(TagIndexed) # Ensure there is an index self.assertEqual( True, any( c["index"] for c in self.get_constraints("schema_tagindexed").values() if c['columns'] == ["slug", "title"] ), ) def test_db_table(self): """ Tests renaming of the table """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure the table is there to begin with columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table with connection.schema_editor() as editor: editor.alter_db_table(Author, "schema_author", "schema_otherauthor") # Ensure the table is there afterwards Author._meta.db_table = "schema_otherauthor" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") # Alter the table again with connection.schema_editor() as editor: editor.alter_db_table(Author, "schema_otherauthor", "schema_author") # Ensure the table is still there Author._meta.db_table = "schema_author" columns = self.column_classes(Author) self.assertEqual(columns['name'][0], "CharField") def test_indexes(self): """ Tests creation/altering of indexes """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) editor.create_model(Book) # Ensure the table is there and has the right index self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to remove the index old_field = Book._meta.get_field("title") new_field = CharField(max_length=100, db_index=False) new_field.set_attributes_from_name("title") with connection.schema_editor() as editor: editor.alter_field(Book, old_field, new_field, strict=True) # Ensure the table is there and has no index self.assertNotIn( "title", self.get_indexes(Book._meta.db_table), ) # Alter to re-add the index new_field2 = Book._meta.get_field("title") with connection.schema_editor() as editor: editor.alter_field(Book, new_field, new_field2, strict=True) # Ensure the table is there and has the index again self.assertIn( "title", self.get_indexes(Book._meta.db_table), ) # Add a unique column, verify that creates an implicit index new_field3 = BookWithSlug._meta.get_field("slug") with connection.schema_editor() as editor: editor.add_field(Book, new_field3) self.assertIn( "slug", self.get_indexes(Book._meta.db_table), ) # Remove the unique, check the index goes with it new_field4 = CharField(max_length=20, unique=False) new_field4.set_attributes_from_name("slug") with connection.schema_editor() as editor: editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True) self.assertNotIn( "slug", self.get_indexes(Book._meta.db_table), ) def test_primary_key(self): """ Tests altering of the primary key """ # Create the table with connection.schema_editor() as editor: editor.create_model(Tag) # Ensure the table is there and has the right PK self.assertTrue( self.get_indexes(Tag._meta.db_table)['id']['primary_key'], ) # Alter to change the PK id_field = Tag._meta.get_field("id") old_field = Tag._meta.get_field("slug") new_field = SlugField(primary_key=True) new_field.set_attributes_from_name("slug") new_field.model = Tag with connection.schema_editor() as editor: editor.remove_field(Tag, id_field) editor.alter_field(Tag, old_field, new_field) # Ensure the PK changed self.assertNotIn( 'id', self.get_indexes(Tag._meta.db_table), ) self.assertTrue( self.get_indexes(Tag._meta.db_table)['slug']['primary_key'], ) def test_context_manager_exit(self): """ Ensures transaction is correctly closed when an error occurs inside a SchemaEditor context. """ class SomeError(Exception): pass try: with connection.schema_editor(): raise SomeError except SomeError: self.assertFalse(connection.in_atomic_block) @skipUnlessDBFeature('supports_foreign_keys') def test_foreign_key_index_long_names_regression(self): """ Regression test for #21497. Only affects databases that supports foreign keys. """ # Create the table with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Find the properly shortened column name column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id") column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase # Ensure the table is there and has an index on the column self.assertIn( column_name, self.get_indexes(BookWithLongName._meta.db_table), ) @skipUnlessDBFeature('supports_foreign_keys') def test_add_foreign_key_long_names(self): """ Regression test for #23009. Only affects databases that supports foreign keys. """ # Create the initial tables with connection.schema_editor() as editor: editor.create_model(AuthorWithEvenLongerName) editor.create_model(BookWithLongName) # Add a second FK, this would fail due to long ref name before the fix new_field = ForeignKey(AuthorWithEvenLongerName, CASCADE, related_name="something") new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk") with connection.schema_editor() as editor: editor.add_field(BookWithLongName, new_field) def test_add_foreign_object(self): with connection.schema_editor() as editor: editor.create_model(BookForeignObj) new_field = ForeignObject(Author, on_delete=CASCADE, from_fields=['author_id'], to_fields=['id']) new_field.set_attributes_from_name('author') with connection.schema_editor() as editor: editor.add_field(BookForeignObj, new_field) def test_creation_deletion_reserved_names(self): """ Tries creating a model's table, and then deleting it when it has a SQL reserved name. """ # Create the table with connection.schema_editor() as editor: try: editor.create_model(Thing) except OperationalError as e: self.fail("Errors when applying initial migration for a model " "with a table named after a SQL reserved word: %s" % e) # Check that it's there list(Thing.objects.all()) # Clean up that table with connection.schema_editor() as editor: editor.delete_model(Thing) # Check that it's gone with self.assertRaises(DatabaseError): list(Thing.objects.all()) @skipUnlessDBFeature('supports_foreign_keys') def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, **kwargs): kwargs['db_column'] = "CamelCase" field = kwargs.pop('field_class', IntegerField)(*args, **kwargs) field.set_attributes_from_name("CamelCase") return field model = Author field = get_field() table = model._meta.db_table column = field.column with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) editor.execute( editor.sql_create_index % { "table": editor.quote_name(table), "name": editor.quote_name("CamelCaseIndex"), "columns": editor.quote_name(column), "extra": "", } ) editor.alter_field(model, get_field(db_index=True), field) editor.execute( editor.sql_create_unique % { "table": editor.quote_name(table), "name": editor.quote_name("CamelCaseUniqConstraint"), "columns": editor.quote_name(field.column), } ) editor.alter_field(model, get_field(unique=True), field) editor.execute( editor.sql_create_fk % { "table": editor.quote_name(table), "name": editor.quote_name("CamelCaseFKConstraint"), "column": editor.quote_name(column), "to_table": editor.quote_name(table), "to_column": editor.quote_name(model._meta.auto_field.column), } ) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field) def test_add_field_use_effective_default(self): """ #23987 - effective_default() should be used as the field default when adding a new field. """ # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField to ensure default will be used from effective_default new_field = CharField(max_length=15, blank=True) new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '') def test_add_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Ensure there's no surname field columns = self.column_classes(Author) self.assertNotIn("surname", columns) # Create a row Author.objects.create(name='Anonymous1') # Add new CharField with a default new_field = CharField(max_length=15, blank=True, default='surname default') new_field.set_attributes_from_name("surname") with connection.schema_editor() as editor: editor.add_field(Author, new_field) # Ensure field was added with the right default with connection.cursor() as cursor: cursor.execute("SELECT surname FROM schema_author;") item = cursor.fetchall()[0] self.assertEqual(item[0], 'surname default') # And that the default is no longer set in the database. field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "surname" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_alter_field_default_dropped(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') self.assertEqual(Author.objects.get().height, None) old_field = Author._meta.get_field('height') # The default from the new field is used in updating existing rows. new_field = IntegerField(blank=True, default=42) new_field.set_attributes_from_name('height') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field) self.assertEqual(Author.objects.get().height, 42) # The database default should be removed. with connection.cursor() as cursor: field = next( f for f in connection.introspection.get_table_description(cursor, "schema_author") if f.name == "height" ) if connection.features.can_introspect_default: self.assertIsNone(field.default) def test_add_textfield_unhashable_default(self): # Create the table with connection.schema_editor() as editor: editor.create_model(Author) # Create a row Author.objects.create(name='Anonymous1') # Create a field that has an unhashable default new_field = TextField(default={}) new_field.set_attributes_from_name("info") with connection.schema_editor() as editor: editor.add_field(Author, new_field) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_charfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Author) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) # Alter to add db_index=True and create 2 indexes. old_field = Author._meta.get_field('name') new_field = CharField(max_length=255, db_index=True) new_field.set_attributes_from_name('name') with connection.schema_editor() as editor: editor.alter_field(Author, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Author, 'name'), ['schema_author_name_1fbc5617_like', 'schema_author_name_1fbc5617_uniq'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Author, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Author, 'name'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_index_to_textfield(self): # Create the table and verify no initial indexes. with connection.schema_editor() as editor: editor.create_model(Note) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) # Alter to add db_index=True and create 2 indexes. old_field = Note._meta.get_field('info') new_field = TextField(db_index=True) new_field.set_attributes_from_name('info') with connection.schema_editor() as editor: editor.alter_field(Note, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Note, 'info'), ['schema_note_info_4b0ea695_like', 'schema_note_info_4b0ea695_uniq'] ) # Remove db_index=True to drop both indexes. with connection.schema_editor() as editor: editor.alter_field(Note, new_field, old_field, strict=True) self.assertEqual(self.get_constraints_for_column(Note, 'info'), []) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_unique_to_charfield_with_db_index(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(BookWithoutAuthor) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_d5d3db17', 'schema_book_title_2dfb2dff_like'] ) # Alter to add unique=True (should add 1 index) old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True, unique=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_d5d3db17', 'schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) # Alter to remove unique=True (should drop unique index) # XXX: bug! old_field = BookWithoutAuthor._meta.get_field('title') new_field = CharField(max_length=100, db_index=True) new_field.set_attributes_from_name('title') with connection.schema_editor() as editor: editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(BookWithoutAuthor, 'title'), ['schema_book_d5d3db17', 'schema_book_title_2dfb2dff_like', 'schema_book_title_2dfb2dff_uniq'] ) @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific") def test_alter_field_add_db_index_to_charfield_with_unique(self): # Create the table and verify initial indexes. with connection.schema_editor() as editor: editor.create_model(Tag) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to add db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(db_index=True, unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) # Alter to remove db_index=True old_field = Tag._meta.get_field('slug') new_field = SlugField(unique=True) new_field.set_attributes_from_name('slug') with connection.schema_editor() as editor: editor.alter_field(Tag, old_field, new_field, strict=True) self.assertEqual( self.get_constraints_for_column(Tag, 'slug'), ['schema_tag_slug_2c418ba3_like', 'schema_tag_slug_key'] ) def test_alter_pk_with_self_referential_field(self): """ Changing the primary key field name of a model with a self-referential foreign key (#26384). """ if connection.vendor == 'mysql' and connection.mysql_version < (5, 6, 6): self.skipTest('Skip known bug renaming primary keys on older MySQL versions (#24995).') old_field = Node._meta.get_field('node_id') new_field = AutoField(primary_key=True) new_field.set_attributes_from_name('id') with connection.schema_editor() as editor: editor.alter_field(Node, old_field, new_field)
daniponi/django
tests/schema/tests.py
Python
bsd-3-clause
79,293
from __future__ import absolute_import from django.test import Client from django.core.handlers.wsgi import WSGIRequest from django.core.handlers.base import BaseHandler class RequestFactory(Client): """Class that lets you create mock Request objects for use in testing. Usage: rf = RequestFactory() get_request = rf.get('/hello/') post_request = rf.post('/submit/', {'foo': 'bar'}) This class re-uses the django.test.client.Client interface, docs here: http://www.djangoproject.com/documentation/testing/#the-test-client Once you have a request object you can pass it to any view function, just as if that view had been hooked up using a URLconf. """ def request(self, **request): """Similar to parent class, but returns the request object as soon as it has created it.""" environ = { 'HTTP_COOKIE': self.cookies, 'HTTP_USER_AGENT': 'Django UnitTest Client 1.0', 'REMOTE_ADDR': '127.0.0.1', 'PATH_INFO': '/', 'QUERY_STRING': '', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': 80, 'SERVER_PROTOCOL': 'HTTP/1.1', } environ.update(self.defaults) environ.update(request) return WSGIRequest(environ) class MockRequest(object): def __init__(self): handler = BaseHandler() handler.load_middleware() self.request_factory = RequestFactory() self.middleware = handler._request_middleware def _make_request(self, request_method, *args, **kwargs): request_method_handler = getattr(self.request_factory, request_method) request = request_method_handler(*args, **kwargs) [middleware_processor(request) for middleware_processor in self.middleware] return request def get(self, *args, **kwargs): return self._make_request("get", *args, **kwargs) def post(self, *args, **kwargs): return self._make_request("post", *args, **kwargs) def put(self, *args, **kwargs): return self._make_request("put", *args, **kwargs) def delete(self, *args, **kwargs): return self._make_request("delete", *args, **kwargs)
mzdaniel/oh-mainline
vendor/packages/django-celery/djcelery/tests/req.py
Python
agpl-3.0
2,296
import os #from datetime import timedelta from celery.schedules import crontab def bool_env(val): """Replaces string based environment values with Python booleans""" return True if os.environ.get(val, False) == "True" else False TIMEZONE = os.getenv("TIMEZONE", "US/Central") ######## # Amazon # MWS_ACCESS_KEY = os.getenv("MWS_ACCESS_KEY", "") MWS_SECRET_KEY = os.getenv("MWS_SECRET_KEY", "") AMAZON_MERCHANT_ID = os.getenv("AMAZON_MERCHANT_ID", "") AMAZON_SANDBOX = bool_env("AMAZON_SANDBOX") AMAZON_CAMPAIGN_ID = os.getenv("AMAZON_CAMPAIGN_ID", "") ######## # Celery # # these lines pending deletion #CELERY_TIMEZONE = TIMEZONE #CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL") #ELERY_RESULT_BACKEND = os.getenv("CELERY_RESULT_BACKEND") #CELERY_ALWAYS_EAGER = bool_env("CELERY_ALWAYS_EAGER") #CHARGE_MINUTES_FREQUENCY = os.getenv("CHARGE_MINUTES_FREQUENCY", 5) #ACH_MINUTES_FREQUENCY = os.getenv("ACH_MINUTES_FREQUENCY", 10) #CELERYBEAT_SCHEDULE = { # "charge-cards": { # "task": "batch.charge_cards", # "schedule": timedelta(minutes=CHARGE_MINUTES_FREQUENCY) # }, # "charge-ach": { # "task": "batch.update_ach_charges", # "schedule": timedelta(minutes=ACH_MINUTES_FREQUENCY) # }, #} # default is 4am and 4pm: BATCH_HOURS = os.getenv("BATCH_HOURS", "4, 16") CELERY_TIMEZONE = TIMEZONE CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL") CELERY_RESULT_BACKEND = os.getenv("CELERY_RESULT_BACKEND") CELERY_ALWAYS_EAGER = bool_env("CELERY_ALWAYS_EAGER") UPDATE_STRIPE_FEES = bool_env('UPDATE_STRIPE_FEES') # either way we need these values CELERYD_LOG_FORMAT = "%(levelname)s %(name)s/%(module)s:%(lineno)d - %(message)s" CELERYD_TASK_LOG_FORMAT = "%(levelname)s %(name)s/%(module)s:%(lineno)d - %(message)s" REDIS_URL = os.getenv("REDIS_URL") ####### # Flask # FLASK_SECRET_KEY = os.getenv("FLASK_SECRET_KEY") FLASK_DEBUG = os.getenv("FLASK_DEBUG", 0) WTF_CSRF_ENABLED = False LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO") ######## # Plaid (for ACH) # PLAID_CLIENT_ID = os.getenv('PLAID_CLIENT_ID') PLAID_SECRET = os.getenv('PLAID_SECRET') PLAID_ENVIRONMENT = os.getenv('PLAID_ENVIRONMENT') ####### # Portal # ENABLE_PORTAL = bool_env("ENABLE_PORTAL") ######## # Salesforce # ADVERTISING_CAMPAIGN_ID = os.getenv('ADVERTISING_CAMPAIGN_ID') ANNIVERSARY_PARTY_CAMPAIGN_ID = os.getenv('ANNIVERSARY_PARTY_CAMPAIGN_ID') COMBINED_EMAIL_FIELD = os.getenv('COMBINED_EMAIL_FIELD', 'Consolidated_EMail__c') DEFAULT_CAMPAIGN_ONETIME = os.getenv('DEFAULT_CAMPAIGN_ONETIME') DEFAULT_CAMPAIGN_RECURRING = os.getenv('DEFAULT_CAMPAIGN_RECURRING') MINNROAST_CAMPAIGN_ID = os.getenv('MINNROAST_CAMPAIGN_ID') SALESFORCE_CONTACT_ADVERTISING_EMAIL = os.getenv('SALESFORCE_CONTACT_ADVERTISING_EMAIL') ######## # Sentry # ENABLE_SENTRY = bool_env("ENABLE_SENTRY") SENTRY_DSN = os.getenv("SENTRY_DSN") SENTRY_ENVIRONMENT = os.getenv("SENTRY_ENVIRONMENT", "unknown") REPORT_URI = os.getenv("REPORT_URI") ####### # Slack # ENABLE_SLACK = bool_env("ENABLE_SLACK") SLACK_CHANNEL = os.getenv("SLACK_CHANNEL", "#stripe") SLACK_API_KEY = os.getenv("SLACK_API_KEY") ###### # SMTP # MAIL_SERVER = os.getenv("MAIL_SERVER", "localhost") MAIL_USERNAME = os.getenv("MAIL_USERNAME", "user") MAIL_PASSWORD = os.getenv("MAIL_PASSWORD", "pass") MAIL_PORT = os.getenv("MAIL_PORT", "2525") MAIL_USE_TLS = bool_env("MAIL_USE_TLS") DEFAULT_MAIL_SENDER = os.getenv("DEFAULT_MAIL_SENDER", "me@myplace.org") MULTIPLE_ACCOUNT_WARNING_MAIL_RECIPIENT = os.getenv( "MULTIPLE_ACCOUNT_WARNING_MAIL_RECIPIENT", "" ) ACCOUNTING_MAIL_RECIPIENT = os.getenv("ACCOUNTING_MAIL_RECIPIENT", "") BUSINESS_MEMBER_RECIPIENT = os.getenv("BUSINESS_MEMBER_RECIPIENT", "") ######## # Stripe # STRIPE_KEYS = { "secret_key": os.getenv("SECRET_KEY"), "publishable_key": os.getenv("PUBLISHABLE_KEY"), } STRIPE_WEBHOOK_SECRET = os.getenv("STRIPE_WEBHOOK_SECRET", "") ######## # Recaptcha # RECAPTCHA_KEYS = { "secret_key": os.getenv("RECAPTCHA_SECRET_KEY"), "site_key": os.getenv("RECAPTCHA_SITE_KEY"), } USE_RECAPTCHA = bool_env("USE_RECAPTCHA") ####### # Tasks # ######## # User Interface options # DEFAULT_FREQUENCY = os.getenv('DEFAULT_FREQUENCY', 'one-time') MINNPOST_ROOT = os.getenv('MINNPOST_ROOT') SHOW_ACH = bool_env("SHOW_ACH") SHOW_PAYMENT_REQUEST = bool_env("SHOW_PAYMENT_REQUEST")
MinnPost/salesforce-stripe
config.py
Python
mit
4,316
from guardian.shortcuts import get_perms, get_users_with_perms, assign_perm, remove_perm from rest_framework import serializers, viewsets from rest_framework.decorators import detail_route from rest_framework.response import Response from rest_framework import status from django.db import transaction from django.contrib.auth.models import User from app import models from .tasks import TaskIDsSerializer from .common import get_and_check_project from django.utils.translation import gettext as _ def normalized_perm_names(perms): return list(map(lambda p: p.replace("_project", ""),perms)) class ProjectSerializer(serializers.ModelSerializer): tasks = TaskIDsSerializer(many=True, read_only=True) owner = serializers.HiddenField( default=serializers.CurrentUserDefault() ) created_at = serializers.ReadOnlyField() permissions = serializers.SerializerMethodField() def get_permissions(self, obj): if 'request' in self.context: return normalized_perm_names(get_perms(self.context['request'].user, obj)) else: # Cannot list permissions, no user is associated with request (happens when serializing ui test mocks) return [] class Meta: model = models.Project exclude = ('deleting', ) class ProjectViewSet(viewsets.ModelViewSet): """ Project get/add/delete/update Projects are the building blocks of processing. Each project can have zero or more tasks associated with it. Users can fine tune the permissions on projects, including whether users/groups have access to view, add, change or delete them. """ filter_fields = ('id', 'name', 'description', 'created_at') serializer_class = ProjectSerializer queryset = models.Project.objects.prefetch_related('task_set').filter(deleting=False).order_by('-created_at') ordering_fields = '__all__' # Disable pagination when not requesting any page def paginate_queryset(self, queryset): if self.paginator and self.request.query_params.get(self.paginator.page_query_param, None) is None: return None return super().paginate_queryset(queryset) @detail_route(methods=['post']) def duplicate(self, request, pk=None): """ Duplicate a task """ project = get_and_check_project(request, pk, ('change_project', )) new_project = project.duplicate() if new_project: return Response({'success': True, 'project': ProjectSerializer(new_project).data}, status=status.HTTP_200_OK) else: return Response({'error': _("Cannot duplicate project")}, status=status.HTTP_200_OK) @detail_route(methods=['get']) def permissions(self, request, pk=None): project = get_and_check_project(request, pk, ('change_project', )) result = [] perms = get_users_with_perms(project, attach_perms=True, with_group_users=False) for user in perms: result.append({'username': user.username, 'owner': project.owner == user, 'permissions': normalized_perm_names(perms[user])}) result.sort(key=lambda r: r['owner'], reverse=True) return Response(result, status=status.HTTP_200_OK) @detail_route(methods=['post']) def edit(self, request, pk=None): project = get_and_check_project(request, pk, ('change_project', )) try: with transaction.atomic(): project.name = request.data.get('name', '') project.description = request.data.get('description', '') project.save() form_perms = request.data.get('permissions') if form_perms is not None: # Build perms map (ignore owners, empty usernames) perms_map = {} for perm in form_perms: if not perm.get('owner') and perm.get('username'): perms_map[perm['username']] = perm['permissions'] db_perms = get_users_with_perms(project, attach_perms=True, with_group_users=False) # Check users to remove for user in db_perms: # Never modify owner permissions if project.owner == user: continue if perms_map.get(user.username) is None: for p in db_perms[user]: remove_perm(p, user, project) # Check users to add/edit for username in perms_map: for p in ["add", "change", "delete", "view"]: perm = p + "_project" user = User.objects.get(username=username) # Skip owners if project.owner == user: continue # Has permission in database but not in form? if user.has_perm(perm, project) and not p in perms_map[username]: remove_perm(perm, user, project) # Has permission in form but not in database? elif p in perms_map[username] and not user.has_perm(perm, project): assign_perm(perm, user, project) except User.DoesNotExist as e: return Response({'error': _("Invalid user in permissions list")}, status=status.HTTP_400_BAD_REQUEST) except AttributeError as e: return Response({'error': _("Invalid permissions")}, status=status.HTTP_400_BAD_REQUEST) return Response({'success': True}, status=status.HTTP_200_OK)
OpenDroneMap/WebODM
app/api/projects.py
Python
agpl-3.0
5,953
#!/usr/bin/env python """ This script is used to run tests, create a coverage report and output the statistics at the end of the tox run. To run this script just execute ``tox`` """ import re from fabric.api import local, warn from fabric.colors import green, red if __name__ == '__main__': local('flake8 --ignore=E126 --ignore=W391 --statistics' ' --exclude=submodules,migrations,south_migrations,build .') local('coverage run --source="cmsplugin_outlets" manage.py test -v 2' ' --traceback --failfast --settings=cmsplugin_outlets.tests.settings' ' --pattern="*_tests.py"') local('coverage html -d coverage --omit="*__init__*,*/settings/*,' '*/migrations/*,*/south_migrations/*,*/tests/*,*admin*"') total_line = local('grep -n pc_cov coverage/index.html', capture=True) percentage = float(re.findall(r'(\d+)%', total_line)[-1]) if percentage < 100: warn(red('Coverage is {0}%'.format(percentage))) print(green('Coverage is {0}%'.format(percentage)))
bitmazk/cmsplugin-django-outlets
runtests.py
Python
mit
1,030
from setuptools import setup setup(name='pycon-tutorial-steve98654', version = '0.1', description='test proj', py_modules=['wordcount_lib'], scripts=['wordcount'], setup_requires=[ 'pytest-runner', ], test_require=[ 'pytest', ] )
steve98654/pycon-tutorial-steve98564
setup.py
Python
bsd-3-clause
310
from zeroconf import ServiceBrowser, Zeroconf import time class DeviceListener: def remove_service(self, zeroconf, typ, name): return def add_service(self, zconf, typ, name): service = None tries = 0 while service is None and tries < 4: service = zconf.get_service_info(typ, name) tries += 1 if service: ips = zconf.cache.entries_with_name(service.server.lower()) host = repr(ips[0]) if ips else service.server name=name.encode("utf-8") print("2") print("ip="+host) print("name="+name[:name.index("._googlecast")]) zeroconf = Zeroconf() listener = DeviceListener() browser = ServiceBrowser(zeroconf, "_googlecast._tcp.local.", listener) time.sleep(10)
druzy/python-protocol
src/discovery.py
Python
mit
799
#!/usr/bin/env python import os import sys import pprint import argparse import pickle import pylab as pl import matplotlib.pyplot as plt import networkx as nx parser = argparse.ArgumentParser(description='Construct meshterm network for search criteria.') parser.add_argument('--outfile', type=argparse.FileType('w'), required=True ) parser.add_argument('--pickle', type=argparse.FileType('r'), required=True ) args = parser.parse_args() outfile = args.outfile pickle_file = args.pickle if __name__ == '__main__': G = pickle.Unpickler(pickle_file).load() print "nodes: ", len(G.nodes()) plt.cla() fig = plt.figure(figsize=(38,38), dpi=800) nx.draw(G, node_size = [G.degree(n) for n in G.nodes()], width = [G.get_edge_data(*e)['citations'] for e in G.edges()], edge_color = [G.get_edge_data(*e)['jin'] for e in G.edges()] ) plt.savefig( outfile )
LC3-INMEGEN/pubmed-mining
plot_pickled_graph.py
Python
gpl-3.0
939
# Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import re from requests import get from lxml.html import fromstring from ..helpers.command import Command from ..helpers.urlutils import get_short @Command(['cve', 'cveid'], ['config']) def cmd(send, msg, args): """Gets info on a CVE id from MITRE's CVE database Syntax: {command} <cveid> """ elements = msg.split('-') if len(elements) > 3 or len(elements) < 2: send("Invalid CVE format") return # If there are three fields, ignore the first (we don't actually need to send CVE- if len(elements) == 3: if not elements[0].upper() == 'CVE': send("Invalid CVE format") return elements.pop(0) # The first digit field should be exactly four digits long, the second is 4+ if not re.search(r"^[\d]{4}$", elements[0]) or not re.search(r"^[\d]{4,}$", elements[1]): send("Invalid CVE format") return search = "%s-%s" % (elements[0], elements[1]) url = 'http://cve.mitre.org/cgi-bin/cvename.cgi?name=%s' % search html = fromstring(get(url).text) title = html.find(".//title").text.splitlines()[2] if title.startswith('ERROR'): output = 'Invalid CVE Number' else: key = args['config']['api']['googleapikey'] output = "%s -- %s" % (title, get_short(url, key)) send(output)
N6UDP/cslbot
cslbot/commands/cve.py
Python
gpl-2.0
2,166
"""atom.py: ... """ import numpy as np from numpy import nan import scipy.linalg as spl import sympy.physics.units as u import piratechem as pc class Atom(pc.atom.Atom): """Allow each atom to contain more specific quantum chemical properties than piratechem can currently handle. """ def __init__(self, index, name, r): pc.atom.Atom.__init__(self, name, r) self.index = index self.nmr = NMR() self.hyperfine = Hyperfine() self.efg = EFG() self.euler = Euler() def __str__(self): s = "Atom(%d, %s, [%6.3f, %6.3f, %6.3f])" return s % (self.index, self.name, self.posx, self.posy, self.posz) class Euler(object): """Store all possible Euler angle information for a single atom. """ def __init__(self): self.hyperfine = self.Hyperfine() self.efg = self.EFG() class Hyperfine: """Store the Euler angle information for the atom's hyperfine tensor. """ def __init__(self): self.alpha = self.beta = self.gamma = nan self.ax = self.ay = self.az = nan def __str__(self): s = "EulerHyperfine([{0}, {1}, {2}]; [{3} {4} {5}])" return s.format(self.alpha, self.beta, self.gamma, self.ax, self.ay, self.az) def return_angles(self): """Return the three Euler angles as a NumPy row vector. """ return np.array([self.alpha, self.beta, self.gamma]) class EFG: """Store the Euler angle information for the atom's electric field gradient (EFG) tensor. """ def __init__(self): self.alpha = self.beta = self.gamma = nan self.efgx = self.efgy = self.efgz = nan def __str__(self): s = "EulerEFG([{0}, {1}, {2}]; [{3} {4} {5}])" return s.format(self.alpha, self.beta, self.gamma, self.efgx, self.efgy, self.efgz) def return_angles(self): """Return the three Euler angles as a NumPy row vector. """ return np.array([self.alpha, self.beta, self.gamma]) class NMR: """Hold all of the fields that may be present in the output file from an NMR shift calculation. """ def __init__(self): self.shiftmat = np.array([[nan, nan, nan], [nan, nan, nan], [nan, nan, nan]]) self.sdso = np.array([nan, nan, nan]) self.spso = np.array([nan, nan, nan]) self.shiftpri = np.array([nan, nan, nan]) self.sdso_iso = nan self.spso_iso = nan self.shiftiso = nan self.shiftori = np.array([[nan, nan, nan], [nan, nan, nan], [nan, nan, nan]]) self.eigvals = np.array([nan, nan, nan]) self.iso = nan def __str__(self): s = "NMR([{0} {1} {2}]; {3})" return s.format(self.shiftpri[0], self.shiftpri[1], self.shiftpri[2], self.shiftiso) def _scale(self): """Convert the absolute values given by ORCA to ppm (mutate in place). """ abs_to_ppm = 1e6 self.shiftmat *= abs_to_ppm self.sdso *= abs_to_ppm self.spso *= abs_to_ppm self.shiftpri *= abs_to_ppm self.sdso_iso *= abs_to_ppm self.spso_iso *= abs_to_ppm self.shiftiso *= abs_to_ppm def _diag(self): """Diagonalize the raw shift matrix to get the three principal shift values, and use them to calculate an isotropic result. """ self.eigvals = np.sqrt(spl.eigvals( np.dot(self.shiftmat.T, self.shiftmat)).real) self.iso = np.sum(self.eigvals) / 3.0 class Hyperfine: """Hold all of the fields that may be present in the output file from an electron-nuclear hyperfine interaction calculation. """ def __init__(self): self.aiso = nan self.atensor = np.array([nan, nan, nan]) self.amatrix = np.array([[nan, nan, nan], [nan, nan, nan], [nan, nan, nan]]) self.afc = np.array([nan, nan, nan]) self.asd = np.array([nan, nan, nan]) self.aso = np.array([nan, nan, nan]) self.apc = nan self.aori = np.array([[nan, nan, nan], [nan, nan, nan], [nan, nan, nan]]) self.rho = nan self.tdip = nan def __str__(self): s = "Hyperfine([{0} {1} {2}]; {3})" return s.format(self.atensor[0], self.atensor[1], self.atensor[2], self.aiso) def _calc_eff_spin_params(self): """Calculate the rho and T_dip terms that appear [...] """ Axx, Ayy, Azz = self.atensor[0], self.atensor[1], self.atensor[2] Aiso = self.aiso rho = (3*Aiso - 2*Axx - Azz)/(Aiso - Azz) # rho = (-3*Aiso + 2*Ayy + Azz)/(Aiso - Azz) # need to add an assertion that both of these are equal # tdip = (-Aiso + Axx)/(rho - 1) # tdip = (Aiso - Ayy)/(rho + 1) tdip = (Azz - Aiso)/2 # need to add an assertion that these three are equal self.rho = rho self.tdip = tdip class EFG: """Hold all of the fields that may be present in the output file from an electric field gradient calculation. """ def __init__(self): self.vmatrix = np.array([[nan, nan, nan], [nan, nan, nan], [nan, nan, nan]]) self.vel = np.array([nan, nan, nan]) self.vnuc = np.array([nan, nan, nan]) self.vtot = np.array([nan, nan, nan]) self.vori = np.array([[nan, nan, nan], [nan, nan, nan], [nan, nan, nan]]) self.nqcc = nan self.k = nan self.eta = nan self.px = nan self.py = nan self.pz = nan self.p = np.array([nan, nan, nan]) def __str__(self): s = "EFG([{0} {1} {2}]; {3})" return s.format(self.vtot[0], self.vtot[1], self.vtot[2], self.nqcc) def _calc_nqi_tensor(self): """Calculate the diagonal representation of the NQI tensor as I*Q*I = e**2qQ/(4I(2I-1))*[-(1-eta),-(1+eta),2]. """ self.px = self.k * (-(1-self.eta)) self.py = self.k * (-(1+self.eta)) self.pz = self.k * 2 self.p = np.array([self.px, self.py, self.pz]) # eta = (self.px - self.py)/self.pz def _diag(self): """... """ eigvals = spl.eigvalsh(self.vmaxtrix) # needs an assertion against self.vtot V_xx, V_yy, V_zz = sorted(eigvals, key = lambda x: abs(x)) # needs an assertion against self.eta eta = (V_xx - V_yy) / V_zz e = float(u.eV / u.J) planck = float(u.planck / (u.J * u.s)) barn = 10e-28
berquist/orcaparse
orcaparse/atom.py
Python
mpl-2.0
7,226
"""Convenience module for backwards compatibility.""" from souliss import Souliss from souliss.Macaco_frame import Macaco_frame
maoterodapena/pysouliss
souliss/pysouliss.py
Python
mit
128
# -*- coding: utf-8 -*- from os import path from gluon import * from gluon.storage import Storage from s3 import * # ============================================================================= class index(): """ Custom Home Page """ def __call__(self): request = current.request response = current.response settings = current.deployment_settings response.title = settings.get_system_name() T = current.T s3 = response.s3 appname = request.application project_items = project()() datatable_ajax_source = "/%s/default/index/project.aadata" % \ appname s3.actions = None project_box = DIV(H3(T("Projects")), A(T("Add Project"), _href = URL(c="project", f="project", args=["create"]), _id = "add-btn", _class = "action-btn", _style = "margin-right:10px;"), project_items, _id = "org_box", _class = "menu_box fleft" ) # Login/Registration forms self_registration = settings.get_security_self_registration() registered = False login_form = None login_div = None register_form = None register_div = None roles = current.session.s3.roles auth = current.auth system_roles = auth.get_system_roles() AUTHENTICATED = system_roles.AUTHENTICATED if AUTHENTICATED not in roles: # This user isn't yet logged-in if request.cookies.has_key("registered"): # This browser has logged-in before registered = True if self_registration: # Provide a Registration box on front page register_form = auth.s3_registration_form() register_div = DIV(H3(T("Register")), P(XML(T("If you would like to add data, then please %(sign_up_now)s") % \ dict(sign_up_now=B(T("sign-up now")))))) if request.env.request_method == "POST": post_script = \ '''$('#register_form').removeClass('hide') $('#login_form').addClass('hide')''' else: post_script = "" register_script = \ '''$('#register-btn').attr('href','#register') $('#login-btn').attr('href','#login') %s $('#register-btn').click(function(){ $('#register_form').removeClass('hide') $('#login_form').addClass('hide') }) $('#login-btn').click(function(){ $('#register_form').addClass('hide') $('#login_form').removeClass('hide') })''' % post_script s3.jquery_ready.append(register_script) # Provide a login box on front page request.args = ["login"] auth.messages.submit_button = T("Login") login_form = auth() login_div = DIV(H3(T("Login")), P(XML(T("Registered users can %(login)s to access the system") % \ dict(login=B(T("login")))))) view = path.join(request.folder, "private", "templates", "OCHA", "views", "index.html") try: # Pass view as file not str to work in compiled mode response.view = open(view, "rb") except IOError: from gluon.http import HTTP raise HTTP(404, "Unable to open Custom View: %s" % view) return dict(title = response.title, project_box = project_box, r = None, # Required for dataTable to work datatable_ajax_source = datatable_ajax_source, self_registration=self_registration, registered=registered, login_form=login_form, login_div=login_div, register_form=register_form, register_div=register_div ) # ============================================================================= class project(): """ Function to handle pagination for the project list on the homepage """ def __call__(self): request = current.request get_vars = request.get_vars resource = current.s3db.resource("project_project") totalrows = resource.count() if "iDisplayLength" in get_vars: display_length = int(get_vars["iDisplayLength"]) else: display_length = 10 limit = 4 * display_length list_fields = ["id", "name"] filter, orderby, left = resource.datatable_filter(list_fields, get_vars) resource.add_filter(filter) data = resource.select(list_fields, start=0, limit=limit, orderby=orderby, left=left, count=True, represent=True) filteredrows = data["numrows"] rfields = data["rfields"] rows = data["rows"] dt = S3DataTable(rfields, rows) dt_id = "proj_dt" if request.extension == "html": dt.defaultActionButtons(resource) current.response.s3.no_formats = True items = dt.html(totalrows, filteredrows, dt_id, dt_displayLength=display_length, dt_ajax_url=URL(c="default", f="index", args=["project"], extension="aadata", vars={"id": dt_id}, ), dt_pagination="true", ) elif request.extension == "aadata": if "sEcho" in get_vars: echo = int(get_vars.sEcho) else: echo = None items = dt.json(totalrows, filteredrows, dt_id, echo) current.response.headers["Content-Type"] = "application/json" else: from gluon.http import HTTP raise HTTP(501, resource.ERROR.BAD_FORMAT) return items # END =========================================================================
flavour/tldrmp
private/templates/OCHA/controllers.py
Python
mit
6,803
# -*- coding: utf-8 -*- #--------------------------------------------------------------------# # This file is part of Py-cnotify. # # # # Copyright (C) 2007, 2008 Paul Pogonyshev. # # # # This library is free software; you can redistribute it and/or # # modify it under the terms of the GNU Lesser General Public License # # as published by the Free Software Foundation; either version 2.1 # # of the License, or (at your option) any later version. # # # # This library is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # # Lesser General Public License for more details. # # # # You should have received a copy of the GNU Lesser General Public # # License along with this library; if not, write to the Free # # Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # # Boston, MA 02110-1301 USA # #--------------------------------------------------------------------# """ cNotify package provides three main concepts: I{L{signals <signal>}}, I{L{conditions <condition>}} and I{L{variables <variable>}}. Signals are basically lists of callables that can be I{emitted} and then will call all contained callables (I{handler} of a signal) in turn. Conditions are boolean values complemented with a signal that is emitted when condition’s I{state} changes. Variables are akin to conditions but can hold arbitrary I{values}, not just booleans. Conditions, unlike variables, can also be combined using standard logic operators, like negation, conjunction and so on. All three concepts provide separation between providers (writers, setters) and listeners (readers, getters) of some entity. Conditions and variables make the entity explicit—it is a boolean state for the former and arbitrary Python object for the latter (though derived variable classes can restrict the set of allowed values.) Here is a quick example: >>> from cnotify.variable import * ... name = Variable () ... ... import sys ... name.changed.connect ( ... lambda string: sys.stdout.write ('Hello there, %s!\\n' % string)) ... ... name.value = 'Chuk' Note that when setting the C{name} variable, you don’t need to know who, if anyone, listens to changes to it. Interested parties take care to express their interest themselves and are informed upon a change automatically. Here is a little more elaborate example with the same functionality (it requires U{PyGTK <http://pygtk.org/>}): >>> from cnotify.variable import * ... import gtk ... ... name = Variable () ... ... def welcome_user (name_string): ... dialog = gtk.MessageDialog (None, 0, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, ... 'Hello there, %s!' % name_string) ... dialog.run () ... dialog.destroy () ... ... name.changed.connect (welcome_user) ... ... def set_name_from_entry (entry): ... name.value = entry.get_text () ... ... window = gtk.Window () ... window.set_title ('Enter name') ... ... entry = gtk.Entry () ... entry.show () ... window.add (entry) ... ... entry.connect ('activate', set_name_from_entry) ... window.connect ('destroy', lambda window: gtk.main_quit ()) ... ... window.present () ... ... gtk.main () Note that C{window} knows absolutely nothing about how changes to C{name} variable are handled. If you play with this example, you will notice one thing: pressing C{Enter} in the main window twice doesn’t pop the welcoming dialog twice. That is because both conditions and variables emit their ‘changed’ signal I{only} when their state/value actually changes, not on every assignment. Now a final, quite complicated, example introducing conditions and some other features: >>> from cnotify.all import * ... ... pilots = Variable () ... fuel = Variable () ... ... import sys ... ... pilots.changed.connect ( ... lambda pilots: sys.stdout.write ('Pilots are %s\\n' % pilots)) ... fuel.changed.connect ( ... lambda amount: sys.stdout.write ('Got %d litres of fuel\\n' % amount)) ... ... def ready_state_changed (ready): ... if ready: ... sys.stdout.write ('Ready to get off!\\n') ... else: ... sys.stdout.write ('Missing pilots or fuel\\n') ... ... ready = pilots.is_true () & fuel.predicate (lambda amount: amount > 0) ... ready.store (ready_state_changed) ... ... pilots.value = 'Jen and Jim' ... fuel.value = 500 ... ... fuel.value = 0 First line of example shows a way to save typing by importing all package contents at once. Whether to use this technique is up to you. Following lines up to C{ready = ...} should be familiar. Now let’s consider that assignment closer. First, C{L{pilots.is_true () <variable.AbstractVariable.is_true>}} code creates a condition that is true depending on C{pilots} value (true for non-empty sequences in our case.) It is just a convenience wrapper over C{L{AbstractVariable.predicate <variable.AbstractVariable.predicate>}} method. Now, the latter is also used directly in this line of code. It creates a condition that is true as long as variable’s value conforms to the passed in predicate. In particular, C{fuel.predicate (lambda amount: amount > 0)} creates a condition that is true if C{fuel}’s value is greater than zero. Predicate conditions will recompute their state each time variable’s value changes and that’s the point in using them. Finally, two just constructed conditions are combined into a third condition using ‘and’ operator (C{&}). This third condition will be true if and only if I{both} its term conditions are true. Conditions support four logic operations: negation, conjunction, disjunction and xoring (with these operators: C{~}, C{&}, C{|} and C{^}.) In addition, each condition has C{L{if_else <condition.AbstractCondition.if_else>}} method, which is much like Python’s C{if} operator. The next line introduces one more new method: C{L{store <base.AbstractValueObject.store>}}. It is really just like connecting its only argument to the ‘changed’ signal, except that it is also called once with the current state of the condition (or value of a variable.) The example should produce this output:: Missing pilots or fuel Pilots are Jen and Jim Got 500 litres of fuel Ready to get off! Got 0 litres of fuel Missing pilots or fuel Notable here is the output from C{ready_state_changed} function. It is called once at the beginning from the C{store} method with the state of C{ready} condition (then C{False}.) Both later calls correspond to changes in C{ready}’s state. When both C{pilots} and C{fuel} variables are set, corresponding predicate conditions become true and so does the C{ready} condition. However, when one of the predicate conditions becomes false (as the result of C{fuel} being set to zero), C{ready} turns false again. Note that C{ready_state_changed} is not called in between of setting C{pilots} and C{fuel} variable. C{ready} state is recomputed, but since it remains the same, ‘changed’ signal is not emitted. G{packagetree} """ __docformat__ = 'epytext en' # CONFIGURATION __version__ = '0.3.2.1' """ Version of Py-cnotify, as a string. """ version_tuple = (0, 3, 2, 1) """ Version of Py-cnotify, as a tuple of integers. It is guaranteed that version tuples of later versions will compare greater that those of earlier versions. """ # /CONFIGURATION # Local variables: # mode: python # python-indent: 4 # indent-tabs-mode: nil # fill-column: 90 # End:
kived/py-cnotify
cnotify/__init__.py
Python
lgpl-2.1
8,208
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper classes for tensor shape inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import operator import six from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.python import tf2 from tensorflow.python.eager import monitoring from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export _TENSORSHAPE_V2_OVERRIDE = None _api_usage_gauge = monitoring.BoolGauge( "/tensorflow/api/v2_tensorshape", "Whether tensor_shape.enable_v2_tensorshape() is called.") @tf_export(v1=["enable_v2_tensorshape"]) def enable_v2_tensorshape(): """In TensorFlow 2.0, iterating over a TensorShape instance returns values. This enables the new behavior. Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but it V2 it returns either an integer, or None. Examples: ``` ####################### # If you had this in V1: value = tensor_shape[i].value # Do this in V2 instead: value = tensor_shape[i] ####################### # If you had this in V1: for dim in tensor_shape: value = dim.value print(value) # Do this in V2 instead: for value in tensor_shape: print(value) ####################### # If you had this in V1: dim = tensor_shape[i] dim.assert_is_compatible_with(other_shape) # or using any other shape method # Do this in V2 instead: if tensor_shape.rank is None: dim = Dimension(None) else: dim = tensor_shape.dims[i] dim.assert_is_compatible_with(other_shape) # or using any other shape method # The V2 suggestion above is more explicit, which will save you from # the following trap (present in V1): # you might do in-place modifications to `dim` and expect them to be reflected # in `tensor_shape[i]`, but they would not be. ``` """ global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name _TENSORSHAPE_V2_OVERRIDE = True logging.vlog(1, "Enabling v2 tensorshape") _api_usage_gauge.get_cell().set(True) @tf_export(v1=["disable_v2_tensorshape"]) def disable_v2_tensorshape(): """Disables the V2 TensorShape behavior and reverts to V1 behavior. See docstring for `enable_v2_tensorshape` for details about the new behavior. """ global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name _TENSORSHAPE_V2_OVERRIDE = False logging.vlog(1, "Disabling v2 tensorshape") _api_usage_gauge.get_cell().set(False) @tf_export( "compat.dimension_value", v1=["dimension_value", "compat.dimension_value"]) def dimension_value(dimension): """Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. When accessing the value of a TensorShape dimension, use this utility, like this: ``` # If you had this in your V1 code: value = tensor_shape[i].value # Use `dimension_value` as direct replacement compatible with both V1 & V2: value = dimension_value(tensor_shape[i]) # This would be the V2 equivalent: value = tensor_shape[i] # Warning: this will return the dim value in V2! ``` Args: dimension: Either a `Dimension` instance, an integer, or None. Returns: A plain value, i.e. an integer or None. """ if isinstance(dimension, Dimension): return dimension.value return dimension @tf_export( "compat.dimension_at_index", v1=["dimension_at_index", "compat.dimension_at_index"]) def dimension_at_index(shape, index): """Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. If you want to retrieve the Dimension instance corresponding to a certain index in a TensorShape instance, use this utility, like this: ``` # If you had this in your V1 code: dim = tensor_shape[i] # Use `dimension_at_index` as direct replacement compatible with both V1 & V2: dim = dimension_at_index(tensor_shape, i) # Another possibility would be this, but WARNING: it only works if the # tensor_shape instance has a defined rank. dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined! # In native V2 code, we recommend instead being more explicit: if tensor_shape.rank is None: dim = Dimension(None) else: dim = tensor_shape.dims[i] # Being more explicit will save you from the following trap (present in V1): # you might do in-place modifications to `dim` and expect them to be reflected # in `tensor_shape[i]`, but they would not be (as the Dimension object was # instantiated on the fly. ``` Args: shape: A TensorShape instance. index: An integer index. Returns: A dimension object. """ assert isinstance(shape, TensorShape) if shape.rank is None: return Dimension(None) else: return shape.dims[index] @tf_export(v1=["Dimension"]) class Dimension(object): """Represents the value of one dimension in a TensorShape.""" __slots__ = ["_value"] def __init__(self, value): """Creates a new Dimension with the given value.""" if isinstance(value, int): # Most common case. if value < 0: raise ValueError("Dimension %d must be >= 0" % value) self._value = value elif value is None: self._value = None elif isinstance(value, Dimension): self._value = value._value else: try: # int(...) compensates for the int/long dichotomy on Python 2.X. # TODO(b/143206389): Remove once we fully migrate to 3.X. self._value = int(value.__index__()) except AttributeError: six.raise_from( TypeError("Dimension value must be integer or None or have " "an __index__ method, got value '{0!r}' with type '{1!r}'" .format(value, type(value))), None) if self._value < 0: raise ValueError("Dimension %d must be >= 0" % self._value) def __repr__(self): return "Dimension(%s)" % repr(self._value) def __str__(self): value = self._value return "?" if value is None else str(value) def __eq__(self, other): """Returns true if `other` has the same known value as this Dimension.""" try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return None return self._value == other.value def __ne__(self, other): """Returns true if `other` has a different known value from `self`.""" try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return None return self._value != other.value def __bool__(self): """Equivalent to `bool(self.value)`.""" return bool(self._value) def __int__(self): return self._value # This is needed for Windows. # See https://github.com/tensorflow/tensorflow/pull/9780 def __long__(self): return self._value def __index__(self): # Allow use in Python 3 range return self._value @property def value(self): """The value of this dimension, or None if it is unknown.""" return self._value def is_compatible_with(self, other): """Returns true if `other` is compatible with this Dimension. Two known Dimensions are compatible if they have the same value. An unknown Dimension is compatible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and `other` are compatible. """ other = as_dimension(other) return (self._value is None or other.value is None or self._value == other.value) def assert_is_compatible_with(self, other): """Raises an exception if `other` is not compatible with this Dimension. Args: other: Another Dimension. Raises: ValueError: If `self` and `other` are not compatible (see is_compatible_with). """ if not self.is_compatible_with(other): raise ValueError("Dimensions %s and %s are not compatible" % (self, other)) def merge_with(self, other): """Returns a Dimension that combines the information in `self` and `other`. Dimensions are combined as follows: ```python tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(n)) == tf.compat.v1.Dimension(n) tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(None)) == tf.compat.v1.Dimension(n) tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(n)) == tf.compat.v1.Dimension(n) # equivalent to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(None)) # raises ValueError for n != m tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(m)) ``` Args: other: Another Dimension. Returns: A Dimension containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not compatible (see is_compatible_with). """ other = as_dimension(other) self.assert_is_compatible_with(other) if self._value is None: return Dimension(other.value) else: return Dimension(self._value) def __add__(self, other): """Returns the sum of `self` and `other`. Dimensions are summed as follows: ```python tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m + n) tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value + other.value) def __radd__(self, other): """Returns the sum of `other` and `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`. """ return self + other def __sub__(self, other): """Returns the subtraction of `other` from `self`. Dimensions are subtracted as follows: ```python tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m - n) tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `other` from `self`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value - other.value) def __rsub__(self, other): """Returns the subtraction of `self` from `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `self` from `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value - self._value) def __mul__(self, other): """Returns the product of `self` and `other`. Dimensions are summed as follows: ```python tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m * n) tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value * other.value) def __rmul__(self, other): """Returns the product of `self` and `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`. """ return self * other def __floordiv__(self, other): """Returns the quotient of `self` and `other` rounded down. Dimensions are divided as follows: ```python tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m // n) tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value // other.value) def __rfloordiv__(self, other): """Returns the quotient of `other` and `self` rounded down. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value // self._value) def __div__(self, other): """DEPRECATED: Use `__floordiv__` via `x // y` instead. This function exists only for backwards compatibility purposes; new code should use `__floordiv__` via the syntax `x // y`. Using `x // y` communicates clearly that the result rounds down, and is forward compatible to Python 3. Args: other: Another `Dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ return self // other def __rdiv__(self, other): """Use `__floordiv__` via `x // y` instead. This function exists only to have a better error message. Instead of: `TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`, this function will explicitly call for usage of `//` instead. Args: other: Another `Dimension`. Raises: TypeError. """ raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', " "please use // instead".format(type(other).__name__)) def __truediv__(self, other): """Use `__floordiv__` via `x // y` instead. This function exists only to have a better error message. Instead of: `TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`, this function will explicitly call for usage of `//` instead. Args: other: Another `Dimension`. Raises: TypeError. """ raise TypeError("unsupported operand type(s) for /: 'Dimension' and '{}', " "please use // instead".format(type(other).__name__)) def __rtruediv__(self, other): """Use `__floordiv__` via `x // y` instead. This function exists only to have a better error message. Instead of: `TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`, this function will explicitly call for usage of `//` instead. Args: other: Another `Dimension`. Raises: TypeError. """ raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', " "please use // instead".format(type(other).__name__)) def __mod__(self, other): """Returns `self` modulo `other`. Dimension modulo are computed as follows: ```python tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) == tf.compat.v1.Dimension(m % n) tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n) # equiv. to tf.compat.v1.Dimension(None) tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None) # equiv. to tf.compat.v1.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `self` modulo `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value % other.value) def __rmod__(self, other): """Returns `other` modulo `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `other` modulo `self`. """ other = as_dimension(other) return other % self def __lt__(self, other): """Returns True if `self` is known to be less than `other`. Dimensions are compared as follows: ```python (tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(n)) == (m < n) (tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(None)) == None (tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n)) == None (tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value < other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value < other.value def __le__(self, other): """Returns True if `self` is known to be less than or equal to `other`. Dimensions are compared as follows: ```python (tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(n)) == (m <= n) (tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(None)) == None (tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n)) == None (tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value <= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value <= other.value def __gt__(self, other): """Returns True if `self` is known to be greater than `other`. Dimensions are compared as follows: ```python (tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(n)) == (m > n) (tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(None)) == None (tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n)) == None (tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value > other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value > other.value def __ge__(self, other): """Returns True if `self` is known to be greater than or equal to `other`. Dimensions are compared as follows: ```python (tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(n)) == (m >= n) (tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(None)) == None (tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n)) == None (tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value >= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value >= other.value def __reduce__(self): return Dimension, (self._value,) def as_dimension(value): """Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of `None` will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value. """ if isinstance(value, Dimension): return value else: return Dimension(value) @tf_export("TensorShape") class TensorShape(object): """Represents the shape of a `Tensor`. A `TensorShape` represents a possibly-partial shape specification for a `Tensor`. It may be one of the following: * *Fully-known shape:* has a known number of dimensions and a known size for each dimension. e.g. `TensorShape([16, 256])` * *Partially-known shape:* has a known number of dimensions, and an unknown size for one or more dimension. e.g. `TensorShape([None, 256])` * *Unknown shape:* has an unknown number of dimensions, and an unknown size in all dimensions. e.g. `TensorShape(None)` If a tensor is produced by an operation of type `"Foo"`, its shape may be inferred if there is a registered shape function for `"Foo"`. See [Shape functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c) for details of shape functions and how to register them. Alternatively, the shape may be set explicitly using `tf.Tensor.set_shape`. """ __slots__ = ["_dims"] def __init__(self, dims): """Creates a new TensorShape with the given dimensions. Args: dims: A list of Dimensions, or None if the shape is unspecified. Raises: TypeError: If dims cannot be converted to a list of dimensions. """ if isinstance(dims, (tuple, list)): # Most common case. self._dims = [Dimension(d) for d in dims] elif dims is None: self._dims = None elif isinstance(dims, tensor_shape_pb2.TensorShapeProto): if dims.unknown_rank: self._dims = None else: self._dims = [ # Protos store variable-size dimensions as -1 as_dimension(dim.size if dim.size != -1 else None) for dim in dims.dim ] elif isinstance(dims, TensorShape): self._dims = dims.dims else: try: dims_iter = iter(dims) except TypeError: # Treat as a singleton dimension self._dims = [as_dimension(dims)] else: self._dims = [] for d in dims_iter: try: self._dims.append(as_dimension(d)) except TypeError as e: six.raise_from( TypeError( "Failed to convert '{0!r}' to a shape: '{1!r}'" "could not be converted to a dimension. A shape should " "either be single dimension (e.g. 10), or an iterable of " "dimensions (e.g. [1, 10, None])." .format(dims, d)), e) @property def _v2_behavior(self): if _TENSORSHAPE_V2_OVERRIDE is None: return tf2.enabled() return _TENSORSHAPE_V2_OVERRIDE def __repr__(self): if self._v2_behavior: if self._dims is not None: return "TensorShape(%r)" % [dim.value for dim in self._dims] else: return "TensorShape(None)" else: return "TensorShape(%r)" % self._dims def __str__(self): if self.rank is None: return "<unknown>" elif self.rank == 1: if self._v2_behavior: return "(%s,)" % self._dims[0].value else: return "(%s,)" % self._dims[0] else: if self._v2_behavior: return "(%s)" % ", ".join(str(d.value) for d in self._dims) else: return "(%s)" % ", ".join(str(d) for d in self._dims) @property def rank(self): """Returns the rank of this shape, or None if it is unspecified.""" if self._dims is not None: return len(self._dims) return None @property def dims(self): """Deprecated. Returns list of dimensions for this shape. Suggest `TensorShape.as_list` instead. Returns: A list containing `tf.compat.v1.Dimension`s, or None if the shape is unspecified. """ return self._dims @property def ndims(self): """Deprecated accessor for `rank`.""" return self.rank def __len__(self): """Returns the rank of this shape, or raises ValueError if unspecified.""" if self._dims is None: raise ValueError("Cannot take the length of shape with unknown rank.") return len(self._dims) def __bool__(self): """Returns True if this shape contains non-zero information.""" return self._dims is not None # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __iter__(self): """Returns `self.dims` if the rank is known, otherwise raises ValueError.""" if self._dims is None: raise ValueError("Cannot iterate over a shape with unknown rank.") else: if self._v2_behavior: return iter(d.value for d in self._dims) else: return iter(d for d in self._dims) def __getitem__(self, key): """Returns the value of a dimension or a shape, depending on the key. Args: key: If `key` is an integer, returns the dimension at that index; otherwise if `key` is a slice, returns a TensorShape whose dimensions are those selected by the slice from `self`. Returns: An integer if `key` is an integer, or a `TensorShape` if `key` is a slice. Raises: ValueError: If `key` is a slice and `self` is completely unknown and the step is set. """ if self._dims is not None: if isinstance(key, slice): return TensorShape(self._dims[key]) else: if self._v2_behavior: return self._dims[key].value else: return self._dims[key] else: if isinstance(key, slice): start = key.start if key.start is not None else 0 stop = key.stop if key.step is not None: # TODO(mrry): Handle these maybe. raise ValueError("Steps are not yet handled") if stop is None: # NOTE(mrry): This implies that TensorShape(None) is compatible with # TensorShape(None)[1:], which is obviously not true. It would be # possible to track the number of dimensions symbolically, # and perhaps we should do that. return unknown_shape() elif start < 0 or stop < 0: # TODO(mrry): Handle this better, as it will be useful for handling # suffixes of otherwise unknown shapes. return unknown_shape() else: return unknown_shape(rank=stop - start) else: if self._v2_behavior: return None else: return Dimension(None) def num_elements(self): """Returns the total number of elements, or none for incomplete shapes.""" if self.is_fully_defined(): return functools.reduce(operator.mul, self.as_list(), 1) else: return None def merge_with(self, other): """Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged element-wise, according to the rules below: ```python Dimension(n).merge_with(Dimension(None)) == Dimension(n) Dimension(None).merge_with(Dimension(n)) == Dimension(n) Dimension(None).merge_with(Dimension(None)) == Dimension(None) # raises ValueError for n != m Dimension(n).merge_with(Dimension(m)) ``` >> ts = tf.TensorShape([1,2]) >> ot1 = tf.TensorShape([1,2]) >> ts.merge_with(ot).as_list() [1,2] >> ot2 = tf.TensorShape([1,None]) >> ts.merge_with(ot2).as_list() [1,2] >> ot3 = tf.TensorShape([None, None]) >> ot3.merge_with(ot2).as_list() [1, None] Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not compatible. """ other = as_shape(other) if self._dims is None: return other if other.dims is None: return self else: try: self.assert_same_rank(other) new_dims = [ dim.merge_with(other_dim) for dim, other_dim in zip(self._dims, other.dims) ] return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not compatible" % (self, other)) def __add__(self, other): return self.concatenate(other) def __radd__(self, other): if not isinstance(other, TensorShape): other = TensorShape(other) return other.concatenate(self) def concatenate(self, other): """Returns the concatenation of the dimension in `self` and `other`. *N.B.* If either `self` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. Args: other: Another `TensorShape`. Returns: A `TensorShape` whose dimensions are the concatenation of the dimensions in `self` and `other`. """ # TODO(mrry): Handle the case where we concatenate a known shape with a # completely unknown shape, so that we can use the partial information. other = as_shape(other) if self._dims is None or other.dims is None: return unknown_shape() else: return TensorShape(self._dims + other.dims) def assert_same_rank(self, other): """Raises an exception if `self` and `other` do not have compatible ranks. Args: other: Another `TensorShape`. Raises: ValueError: If `self` and `other` do not represent shapes with the same rank. """ other = as_shape(other) if self.rank is not None and other.rank is not None: if self.rank != other.rank: raise ValueError("Shapes %s and %s must have the same rank" % (self, other)) def assert_has_rank(self, rank): """Raises an exception if `self` is not compatible with the given `rank`. Args: rank: An integer. Raises: ValueError: If `self` does not represent a shape with the given `rank`. """ if self.rank not in (None, rank): raise ValueError("Shape %s must have rank %d" % (self, rank)) def with_rank(self, rank): """Returns a shape based on `self` with the given rank. This method promotes a completely unknown shape to one with a known rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with the given rank. Raises: ValueError: If `self` does not represent a shape with the given `rank`. """ try: return self.merge_with(unknown_shape(rank=rank)) except ValueError: raise ValueError("Shape %s must have rank %d" % (self, rank)) def with_rank_at_least(self, rank): """Returns a shape based on `self` with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at least the given rank. Raises: ValueError: If `self` does not represent a shape with at least the given `rank`. """ if self.rank is not None and self.rank < rank: raise ValueError("Shape %s must have rank at least %d" % (self, rank)) else: return self def with_rank_at_most(self, rank): """Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`. """ if self.rank is not None and self.rank > rank: raise ValueError("Shape %s must have rank at most %d" % (self, rank)) else: return self def is_compatible_with(self, other): """Returns True iff `self` is compatible with `other`. Two possibly-partially-defined shapes are compatible if there exists a fully-defined shape that both shapes can represent. Thus, compatibility allows the shape inference code to reason about partially-defined shapes. For example: * TensorShape(None) is compatible with all shapes. * TensorShape([None, None]) is compatible with all two-dimensional shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is not compatible with, for example, TensorShape([None]) or TensorShape([None, None, None]). * TensorShape([32, None]) is compatible with all two-dimensional shapes with size 32 in the 0th dimension, and also TensorShape([None, None]) and TensorShape(None). It is not compatible with, for example, TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]). * TensorShape([32, 784]) is compatible with itself, and also TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None, None]) and TensorShape(None). It is not compatible with, for example, TensorShape([32, 1, 784]) or TensorShape([None]). The compatibility relation is reflexive and symmetric, but not transitive. For example, TensorShape([32, 784]) is compatible with TensorShape(None), and TensorShape(None) is compatible with TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with TensorShape([4, 4]). Args: other: Another TensorShape. Returns: True iff `self` is compatible with `other`. """ other = as_shape(other) if self._dims is not None and other.dims is not None: if self.rank != other.rank: return False for x_dim, y_dim in zip(self._dims, other.dims): if not x_dim.is_compatible_with(y_dim): return False return True def assert_is_compatible_with(self, other): """Raises exception if `self` and `other` do not represent the same shape. This method can be used to assert that there exists a shape that both `self` and `other` represent. Args: other: Another TensorShape. Raises: ValueError: If `self` and `other` do not represent the same shape. """ if not self.is_compatible_with(other): raise ValueError("Shapes %s and %s are incompatible" % (self, other)) def most_specific_compatible_shape(self, other): """Returns the most specific TensorShape compatible with `self` and `other`. * TensorShape([None, 1]) is the most specific TensorShape compatible with both TensorShape([2, 1]) and TensorShape([5, 1]). Note that TensorShape(None) is also compatible with above mentioned TensorShapes. * TensorShape([1, 2, 3]) is the most specific TensorShape compatible with both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more less specific TensorShapes compatible with above mentioned TensorShapes, e.g. TensorShape([1, 2, None]), TensorShape(None). Args: other: Another `TensorShape`. Returns: A `TensorShape` which is the most specific compatible shape of `self` and `other`. """ other = as_shape(other) if self._dims is None or other.dims is None or self.rank != other.rank: return unknown_shape() dims = [ d1 if d1 is not None and d2 is not None and d1 == d2 else None for d1, d2 in zip(self._dims, other.dims) ] return TensorShape(dims) def is_fully_defined(self): """Returns True iff `self` is fully defined in every dimension.""" return (self._dims is not None and all(dim.value is not None for dim in self._dims)) def assert_is_fully_defined(self): """Raises an exception if `self` is not fully defined in every dimension. Raises: ValueError: If `self` does not have a known value for every dimension. """ if not self.is_fully_defined(): raise ValueError("Shape %s is not fully defined" % self) def as_list(self): """Returns a list of integers or `None` for each dimension. Returns: A list of integers or `None` for each dimension. Raises: ValueError: If `self` is an unknown shape with an unknown rank. """ if self._dims is None: raise ValueError("as_list() is not defined on an unknown TensorShape.") return [dim.value for dim in self._dims] def as_proto(self): """Returns this shape as a `TensorShapeProto`.""" if self._dims is None: return tensor_shape_pb2.TensorShapeProto(unknown_rank=True) else: return tensor_shape_pb2.TensorShapeProto(dim=[ tensor_shape_pb2.TensorShapeProto.Dim( size=-1 if d.value is None else d.value) for d in self._dims ]) def __eq__(self, other): """Returns True if `self` is equivalent to `other`. It first tries to convert `other` to `TensorShape`. `TypeError` is thrown when the conversion fails. Otherwise, it compares each element in the TensorShape dimensions. * Two *Fully known* shapes, return True iff each element is equal. >>> t_a = tf.TensorShape([1,2]) >>> a = [1, 2] >>> t_b = tf.TensorShape([1,2]) >>> t_c = tf.TensorShape([1,2,3]) >>> t_a.__eq__(a) True >>> t_a.__eq__(t_b) True >>> t_a.__eq__(t_c) False * Two *Partially-known* shapes, return False. >>> p_a = tf.TensorShape([1,None]) >>> p_b = tf.TensorShape([2,None]) >>> p_a.__eq__(p_b) False >>> t_a.__eq__(p_a) False * Two *Unknown shape*, return True. >>> unk_a = tf.TensorShape(None) >>> unk_b = tf.TensorShape(None) >>> unk_a.__eq__(unk_b) True >>> unk_a.__eq__(t_a) False Args: other: A `TensorShape` or type that can be converted to `TensorShape`. Returns: True if the dimensions are all equal. Raises: TypeError if `other` can not be converted to `TensorShape`. """ try: other = as_shape(other) except TypeError: return NotImplemented return self._dims == other.dims def __ne__(self, other): """Returns True if `self` is known to be different from `other`.""" try: other = as_shape(other) except TypeError: return NotImplemented if self.rank is None or other.rank is None: raise ValueError("The inequality of unknown TensorShapes is undefined.") if self.rank != other.rank: return True return self._dims != other.dims def __reduce__(self): return TensorShape, (self._dims,) def __concat__(self, other): return self.concatenate(other) def as_shape(shape): """Converts the given object to a TensorShape.""" if isinstance(shape, TensorShape): return shape else: return TensorShape(shape) def unknown_shape(rank=None, **kwargs): """Returns an unknown TensorShape, optionally with a known rank. Args: rank: (Optional) If specified, the number of dimensions in the shape. **kwargs: For backwards compatibility. Returns: An unknown TensorShape. Raises: TypeError: In case of invalid arguments. """ if rank is None and "ndims" in kwargs: rank = kwargs.pop("ndims") if kwargs: raise TypeError("Unknown argument: %s" % kwargs) if rank is None: return TensorShape(None) else: return TensorShape([Dimension(None)] * rank)
sarvex/tensorflow
tensorflow/python/framework/tensor_shape.py
Python
apache-2.0
41,529
# # Copyright 2006 The Apache Software Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from org.apache.hadoop.fs import Path from org.apache.hadoop.io import * from org.apache.hadoop.mapred import * from org.apache.hadoop.abacus import * from java.util import *; import sys class AbacusMapper(ValueAggregatorMapper): def map(self, key, value, output, reporter): ValueAggregatorMapper.map(self, key, value, output, reporter); class AbacusReducer(ValueAggregatorReducer): def reduce(self, key, values, output, reporter): ValueAggregatorReducer.reduce(self, key, values, output, reporter); class AbacusCombiner(ValueAggregatorCombiner): def reduce(self, key, values, output, reporter): ValueAggregatorCombiner.reduce(self, key, values, output, reporter); def printUsage(code): print "Abacus <input> <output> <numOfReducers> <inputformat> <specfile>" sys.exit(code) def main(args): if len(args) < 6: printUsage(1); inDir = args[1]; outDir = args[2]; numOfReducers = int(args[3]); theInputFormat = args[4]; specFile = args[5]; print "numOfReducers: ", numOfReducers, "theInputFormat: ", theInputFormat, "specFile: ", specFile conf = JobConf(AbacusMapper); conf.setJobName("recordcount"); conf.addDefaultResource(Path(specFile)); if theInputFormat=="textinputformat": conf.setInputFormat(TextInputFormat); else: conf.setInputFormat(SequenceFileInputFormat); conf.setOutputFormat(TextOutputFormat); conf.setMapOutputKeyClass(Text); conf.setMapOutputValueClass(Text); conf.setOutputKeyClass(Text); conf.setOutputValueClass(Text); conf.setNumMapTasks(1); conf.setNumReduceTasks(numOfReducers); conf.setMapperClass(AbacusMapper); conf.setCombinerClass(AbacusCombiner); conf.setReducerClass(AbacusReducer); conf.setInputPath(Path(args[1])) conf.setOutputPath(Path(args[2])) JobClient.runJob(conf); if __name__ == "__main__": main(sys.argv)
moreus/hadoop
hadoop-0.11.2/src/contrib/abacus/examples/pyAbacus/JythonAbacus.py
Python
apache-2.0
2,577
# Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import TYPE_CHECKING, Any, Optional, Tuple from synapse.api.errors import InvalidAPICallError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, parse_integer, parse_json_object_from_request, parse_string, ) from synapse.http.site import SynapseRequest from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.types import JsonDict, StreamToken from ._base import client_patterns, interactive_auth_handler if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) class KeyUploadServlet(RestServlet): """ POST /keys/upload HTTP/1.1 Content-Type: application/json { "device_keys": { "user_id": "<user_id>", "device_id": "<device_id>", "valid_until_ts": <millisecond_timestamp>, "algorithms": [ "m.olm.curve25519-aes-sha2", ] "keys": { "<algorithm>:<device_id>": "<key_base64>", }, "signatures:" { "<user_id>" { "<algorithm>:<device_id>": "<signature_base64>" } } }, "one_time_keys": { "<algorithm>:<key_id>": "<key_base64>" }, } """ PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = hs.get_device_handler() @trace(opname="upload_keys") async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() body = parse_json_object_from_request(request) if device_id is not None: # Providing the device_id should only be done for setting keys # for dehydrated devices; however, we allow it for any device for # compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: dehydrated_device = await self.device_handler.get_dehydrated_device( user_id ) if dehydrated_device is not None and device_id != dehydrated_device[0]: set_tag("error", True) log_kv( { "message": "Client uploading keys for a different device", "logged_in_id": requester.device_id, "key_being_uploaded": device_id, } ) logger.warning( "Client uploading keys for a different device " "(logged in as %s, uploading for %s)", requester.device_id, device_id, ) else: device_id = requester.device_id if device_id is None: raise SynapseError( 400, "To upload keys, you must pass device_id when authenticating" ) result = await self.e2e_keys_handler.upload_keys_for_user( user_id, device_id, body ) return 200, result class KeyQueryServlet(RestServlet): """ POST /keys/query HTTP/1.1 Content-Type: application/json { "device_keys": { "<user_id>": ["<device_id>"] } } HTTP/1.1 200 OK { "device_keys": { "<user_id>": { "<device_id>": { "user_id": "<user_id>", // Duplicated to be signed "device_id": "<device_id>", // Duplicated to be signed "valid_until_ts": <millisecond_timestamp>, "algorithms": [ // List of supported algorithms "m.olm.curve25519-aes-sha2", ], "keys": { // Must include a ed25519 signing key "<algorithm>:<key_id>": "<key_base64>", }, "signatures:" { // Must be signed with device's ed25519 key "<user_id>/<device_id>": { "<algorithm>:<key_id>": "<signature_base64>" } // Must be signed by this server. "<server_name>": { "<algorithm>:<key_id>": "<signature_base64>" } } } } } } """ PATTERNS = client_patterns("/keys/query$") def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() device_id = requester.device_id timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) device_keys = body.get("device_keys") if not isinstance(device_keys, dict): raise InvalidAPICallError("'device_keys' must be a JSON object") def is_list_of_strings(values: Any) -> bool: return isinstance(values, list) and all(isinstance(v, str) for v in values) if any(not is_list_of_strings(keys) for keys in device_keys.values()): raise InvalidAPICallError( "'device_keys' values must be a list of strings", ) result = await self.e2e_keys_handler.query_devices( body, timeout, user_id, device_id ) return 200, result class KeyChangesServlet(RestServlet): """Returns the list of changes of keys between two stream tokens (may return spurious extra results, since we currently ignore the `to` param). GET /keys/changes?from=...&to=... 200 OK { "changed": ["@foo:example.com"] } """ PATTERNS = client_patterns("/keys/changes$") def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) from_token_string = parse_string(request, "from", required=True) set_tag("from", from_token_string) # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. set_tag("to", parse_string(request, "to")) from_token = await StreamToken.from_string(self.store, from_token_string) user_id = requester.user.to_string() results = await self.device_handler.get_user_ids_changed(user_id, from_token) return 200, results class OneTimeKeyServlet(RestServlet): """ POST /keys/claim HTTP/1.1 { "one_time_keys": { "<user_id>": { "<device_id>": "<algorithm>" } } } HTTP/1.1 200 OK { "one_time_keys": { "<user_id>": { "<device_id>": { "<algorithm>:<key_id>": "<key_base64>" } } } } """ PATTERNS = client_patterns("/keys/claim$") def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await self.auth.get_user_by_req(request, allow_guest=True) timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = await self.e2e_keys_handler.claim_one_time_keys(body, timeout) return 200, result class SigningKeyUploadServlet(RestServlet): """ POST /keys/device_signing/upload HTTP/1.1 Content-Type: application/json { } """ PATTERNS = client_patterns("/keys/device_signing/upload$", releases=("v3",)) def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() self.auth_handler = hs.get_auth_handler() @interactive_auth_handler async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) await self.auth_handler.validate_user_via_ui_auth( requester, request, body, "add a device signing key to your account", # Allow skipping of UI auth since this is frequently called directly # after login and it is silly to ask users to re-auth immediately. can_skip_ui_auth=True, ) result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body) return 200, result class SignaturesUploadServlet(RestServlet): """ POST /keys/signatures/upload HTTP/1.1 Content-Type: application/json { "@alice:example.com": { "<device_id>": { "user_id": "<user_id>", "device_id": "<device_id>", "algorithms": [ "m.olm.curve25519-aes-sha2", "m.megolm.v1.aes-sha2" ], "keys": { "<algorithm>:<device_id>": "<key_base64>", }, "signatures": { "<signing_user_id>": { "<algorithm>:<signing_key_base64>": "<signature_base64>>" } } } } } """ PATTERNS = client_patterns("/keys/signatures/upload$") def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) user_id = requester.user.to_string() body = parse_json_object_from_request(request) result = await self.e2e_keys_handler.upload_signatures_for_device_keys( user_id, body ) return 200, result def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: KeyUploadServlet(hs).register(http_server) KeyQueryServlet(hs).register(http_server) KeyChangesServlet(hs).register(http_server) OneTimeKeyServlet(hs).register(http_server) SigningKeyUploadServlet(hs).register(http_server) SignaturesUploadServlet(hs).register(http_server)
matrix-org/synapse
synapse/rest/client/keys.py
Python
apache-2.0
11,478
#import webget #import filmarkivet
pugo/filmarkivet-xbmc
lib/__init__.py
Python
gpl-3.0
36
""" This is a very simple logger. It might evolve over time and add file support and stuff. """ import datetime debugmode = False separator = '::' def printf(label, basestring, *args, **kw): print(label, separator, datetime.datetime.now(), separator, basestring.format(*args, **kw)) def info(basestring, *args, **kw): printf('INFO ', basestring, *args, **kw) def warn(basestring, *args, **kw): printf('WARN ', basestring, *args, **kw) def debug(basestring, *args, **kw): if debugmode: printf('DEBUG', basestring, *args, **kw)
yukaritan/kawaiirc
util/logger.py
Python
gpl-2.0
562
import time from libardrone import libardrone from PID import PID class Actuator(object): def __init__(self, drone, picture_width, desired_move): self.turn = PID(K_p=0.6, K_d=0.1) self.move = PID(K_p=0.15, K_d=0.01) self.height = PID(K_p=0.2, K_d=0.00) self.picture_width = picture_width self.desired_move = desired_move self.drone = drone time.sleep(0.05) self.drone.takeoff() time.sleep(0.05) def step(self, wdithmid, width): desired_turn = self.picture_width / 2 actual_turn = wdithmid actual_move = width ut = self.turn.step(desired_turn, actual_turn) um = self.move.step(self.desired_move, actual_move) height = 550 nav_data = self.drone.get_navdata() nav_data = nav_data[0] uh = self.height.step(height, nav_data['altitude']) self.drone.at(libardrone.at_pcmd, True, 0, self.moveDrone(um), self.heightDrone(uh), self.turnDrone(ut)) def turnDrone(self, u): speed = - u / (self.picture_width / 2.) print "move horizontal to" + str(speed) return speed def moveDrone(self, u): speed = - u / (self.picture_width / 2.) print "move near to" + str(speed) return speed def heightDrone(self, u): speed = u / 500 print "height near to" + str(speed) return speed
PatrickChrist/CDTM-Deep-Learning-Drones
Yolonese/actuators.py
Python
mit
1,409
from pastebin.testcase import CacheAwareTestCase from freezegun import freeze_time from django.core.urlresolvers import reverse @freeze_time("2015-01-01") class LatestPastesTests(CacheAwareTestCase): def test_latest_pastes_empty(self): """ Test that latest pastes shows the "no pastes uploaded" message when no pastes have been uploaded """ response = self.client.get(reverse("latest_pastes")) self.assertContains(response, "No pastes uploaded") def test_latest_pastes_with_pastes(self): """ Upload two pastes and check that they're visible on the list """ self.client.post(reverse("home:home"), { "title": "Paste", "text": "This is a test.", "syntax_highlighting": "text", "expiration": "never", "visibility": "public"}, follow=True) self.client.post(reverse("home:home"), { "title": "Paste 2", "text": "This is a test.", "syntax_highlighting": "text", "expiration": "never", "visibility": "public"}, follow=True) response = self.client.get(reverse("latest_pastes")) self.assertContains(response, "Paste") self.assertContains(response, "Paste 2") def test_latest_pastes_shows_correct_pastes(self): """ Upload hidden and expiring paste and make sure hidden and expiring pastes aren't shown when they shouldn't be shown """ with freeze_time("2015-01-01 12:00:00"): for i in range(0, 5): self.client.post(reverse("home:home"), {"title": "Normal paste %d" % i, "text": "This is a test.", "syntax_highlighting": "text", "expiration": "never", "visibility": "public"}, follow=True) for i in range(0, 5): self.client.post(reverse("home:home"), {"title": "Expiring paste %d" % i, "text": "This is a test", "syntax_highlighting": "text", "expiration": "1h", "visibility": "public"}, follow=True) self.client.post(reverse("home:home"), {"title": "Hidden paste", "text": "This is a test", "syntax_highlighting": "text", "expiration": "1h", "visibility": "hidden"}, follow=True) response = self.client.get(reverse("latest_pastes")) self.assertContains(response, "Normal paste", count=5) self.assertContains(response, "Expiring paste", count=5) self.assertNotContains(response, "Hidden paste") with freeze_time("2015-01-01 13:00:01"): self.clearCache() response = self.client.get(reverse("latest_pastes")) self.assertContains(response, "Normal paste", count=5) self.assertNotContains(response, "Expiring paste") self.assertNotContains(response, "Hidden paste") def test_latest_pastes_redirects_to_last_page(self): """ Try checking a page of latest pastes which doesn't exist User should be redirected to the last page """ self.client.post(reverse("home:home"), {"title": "Test paste", "text": "This is a test.", "syntax_highlighting": "text", "expiration": "never", "visibility": "public"}, follow=True) response = self.client.get(reverse("latest_pastes", kwargs={"page": 2})) self.assertContains(response, "Test paste") self.assertContains(response, "1</span>") self.assertNotContains(response, "2</span>") def test_latest_pastes_doesnt_show_hidden_pastes(self): """ Upload a hidden paste and check that it isn't visible in the latest pastes """ self.client.post(reverse("home:home"), {"title": "Paste paste", "text": "This is a test.", "syntax_highlighting": "text", "expiration": "never", "visibility": "hidden"}, follow=True) response = self.client.get(reverse("latest_pastes")) self.assertContains(response, "No pastes uploaded") def test_latest_pastes_doesnt_show_expired_pastes(self): """ Upload an expiring paste and check that it isn't visible after it has expired """ with freeze_time("2015-01-01 12:00:00"): self.client.post(reverse("home:home"), {"title": "Paste paste", "text": "This is a test.", "syntax_highlighting": "text", "expiration": "1h", "visibility": "public"}, follow=True) self.clearCache() response = self.client.get(reverse("home:home")) self.assertContains(response, "Paste paste") with freeze_time("2015-01-01 13:00:01"): self.clearCache() response = self.client.get(reverse("home:home")) self.assertContains(response, "No pastes have been submitted yet") def test_random_with_no_pastes_redirects_to_home(self): """ Try going to a random paste when no pastes have been uploaded User should be redirect to home. """ response = self.client.post(reverse("random_paste"), follow=True) self.assertContains(response, "Upload a new paste") def test_random_with_paste(self): """ Upload one paste and go to a random paste """ self.client.post(reverse("home:home"), { "title": "Test paste", "text": "This is a test.", "syntax_highlighting": "text", "expiration": "never", "visibility": "public"}, follow=True) response = self.client.post(reverse("random_paste"), follow=True) self.assertContains(response, "Test paste")
Matoking/pastebin-django
home/tests.py
Python
unlicense
7,947
from cms.plugin_pool import plugin_pool from cms.plugin_base import CMSPluginBase from django.utils.translation import ugettext_lazy as _ import models from django.conf import settings class FilerTeaserPlugin(CMSPluginBase): """ TODO: this plugin is becoming very similar to the image plugin... code should be re-used somehow. """ model = models.FilerTeaser name = _("Teaser") render_template = "cmsplugin_filer_teaser/teaser.html" def _get_thumbnail_options(self, context, instance): """ Return the size and options of the thumbnail that should be inserted """ width, height = None, None subject_location = False placeholder_width = context.get('width', None) placeholder_height = context.get('height', None) if instance.use_autoscale and placeholder_width: # use the placeholder width as a hint for sizing width = int(placeholder_width) if instance.use_autoscale and placeholder_height: height = int(placeholder_height) elif instance.width: width = instance.width if instance.height: height = instance.height if instance.image: if instance.image.subject_location: subject_location = instance.image.subject_location if not height and width: # height was not externally defined: use ratio to scale it by the width height = int( float(width)*float(instance.image.height)/float(instance.image.width) ) if not width and height: # width was not externally defined: use ratio to scale it by the height width = int( float(height)*float(instance.image.width)/float(instance.image.height) ) if not width: # width is still not defined. fallback the actual image width width = instance.image.width if not height: # height is still not defined. fallback the actual image height height = instance.image.height return {'size': (width, height), 'subject_location': subject_location} def get_thumbnail(self, context, instance): if instance.image: return instance.image.image.file.get_thumbnail(self._get_thumbnail_options(context, instance)) def render(self, context, instance, placeholder): options = self._get_thumbnail_options(context, instance) context.update({ 'instance': instance, 'link': instance.link, 'opts': options, 'size': options.get('size',None), 'placeholder': placeholder }) return context plugin_pool.register_plugin(FilerTeaserPlugin)
mitar/cmsplugin-filer
src/cmsplugin_filer_teaser/cms_plugins.py
Python
mit
2,802
# -*- mode: python; indent-tabs-mode: nil; tab-width: 2 -*- """ aria_api.py - implements handlers which are for the Aria to talk to helvetic. """ from __future__ import absolute_import from base64 import b16encode from crc16 import crc16xmodem from datetime import timedelta from decimal import Decimal from django.contrib.auth.models import User from django.db import transaction from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from string import hexdigits import struct from time import time from ..models import AuthorisationToken, Measurement, Scale, utcnow class ScaleValidateView(View): def get(self, request): # Context: https://github.com/micolous/helvetic/issues/1 # # Sometimes the scale is trying to verify that it authenticated with the # correct token. We don't really care about these requests (it is handled # by /scale/register aka ScaleRegisterView), so we can just always return # "T" (OK). # # The real service returns "F" on error. return HttpResponse('T') class ScaleRegisterView(View): def get(self, request): if 'serialNumber' not in request.GET: return HttpResponseBadRequest('serialNumber missing') if 'token' not in request.GET: return HttpResponseBadRequest('token missing') if 'ssid' not in request.GET: return HttpResponseBadRequest('ssid missing') serial = request.GET['serialNumber'].upper() token = request.GET['token'] ssid = request.GET['ssid'] if len(serial) != 12: return HttpResponseBadRequest('serialNumber must be 12 bytes') if any(((x not in hexdigits) for x in serial)): return HttpResponseBadRequest('serial must only contain hex') # Lookup the authorisation token auth_token = AuthorisationToken.lookup_token(token) if auth_token is None: return HttpResponseForbidden('Bad auth token') owner = auth_token.user # Delete the token. auth_token.delete() # Register the Aria scale = Scale.objects.create( hw_address=serial, ssid=ssid, owner=owner, ) # Only return 200 OK return HttpResponse('') class ScaleUploadView(View): @method_decorator(csrf_exempt) @method_decorator(transaction.atomic) def dispatch(self, *args, **kwargs): return super(ScaleUploadView, self).dispatch(*args, **kwargs) def post(self, request): now = utcnow() body = request.body # Version 3 protocol proto_ver, battery_pc, mac, auth_code = struct.unpack('<LL6s16s', body[:30]) body = body[30:] if proto_ver != 3: return HttpResponseBadRequest('Unknown protocol version: %d' % proto_ver) if battery_pc > 100 or battery_pc < 0: return HttpResponseBadRequest('Battery percentage must be 0..100 (got %d)' % battery_pc) mac, auth_code = [b16encode(x) for x in (mac, auth_code)] scale = None try: scale = Scale.objects.get(hw_address=mac) except Scale.DoesNotExist: return HttpResponseBadRequest('Unknown scale: %s' % mac) # Check authcode if scale.auth_code is None or scale.auth_code == '': scale.auth_code = auth_code elif scale.auth_code != auth_code: return HttpResponseForbidden('Invalid auth code') scale.battery_percent = battery_pc fw_ver, unknown2, scale_now, measurement_count = struct.unpack('<LLLL', body[:16]) body = body[16:] scale.fw_version = fw_ver scale.save() for x in range(measurement_count): if len(body) < 32: return HttpResponseBadRequest('Measurement truncated.') id2, imp, weight, ts, uid, fat1, covar, fat2 = \ struct.unpack('<LLLLLLLL', body[:32]) # Record the measurement # Look up the owner of this measurement if uid == 0: measured_user = None else: try: measured_user = User.objects.get(id=uid) except User.NotFound: measured_user = None measurement = Measurement.objects.create( user=measured_user, scale=scale, when=now - timedelta(seconds=scale_now - ts), weight=weight, body_fat=Decimal(fat1) / Decimal(1000), ) body = body[32:] # Formulate a response scale_users = scale.users.all() response = struct.pack('<LBBBL', int(time()), # Fill with current time, to account for processing delay scale.unit, 0x32, # status = configured 0x01, # unknown len(scale_users) ) # Insert user info for profile in scale_users: last_weight = min_var = max_var = 0 last_measurement = profile.latest_measurement() if last_measurement is not None: last_weight = ((last_measurement.weight) // 1000) * 1000 min_var = last_weight - 4000 if min_var < 0: min_var = 0 max_var = last_weight + 4000 response += struct.pack('<L16x20sLLLBLLLLLL', profile.user.id, profile.short_name_formatted(), min_var, max_var, profile.age(), profile.gender, profile.height, 0, # some weight 0, # body fat 0, # covariance 0, # another weight 0, # timestamp ) response = response + struct.pack('<LLL', 0, # always 0 3, # update status: no 0, # unknown ) trailer = 0x19 + (len(scale_users) * 0x4d) response = response + struct.pack('<HH', crc16xmodem(response), # checksum trailer, ) hr = HttpResponse(response) # Content-Length is a required element hr['Content-Length'] = str(len(response)) return hr
micolous/helvetic
helvetic/views/aria_api.py
Python
agpl-3.0
5,411
# -*- coding: utf-8 -*- """ *************************************************************************** ModelerAlgorithmProvider.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.core import QgsApplication from processing.core.AlgorithmProvider import AlgorithmProvider from processing.core.ProcessingConfig import ProcessingConfig, Setting from processing.core.ProcessingLog import ProcessingLog from processing.modeler.ModelerUtils import ModelerUtils from processing.modeler.ModelerAlgorithm import ModelerAlgorithm from processing.modeler.WrongModelException import WrongModelException from processing.modeler.EditModelAction import EditModelAction from processing.modeler.CreateNewModelAction import CreateNewModelAction from processing.modeler.DeleteModelAction import DeleteModelAction from processing.modeler.AddModelFromFileAction import AddModelFromFileAction from processing.gui.GetScriptsAndModels import GetModelsAction pluginPath = os.path.split(os.path.dirname(__file__))[0] class ModelerAlgorithmProvider(AlgorithmProvider): def __init__(self): super().__init__() self.actions = [CreateNewModelAction(), AddModelFromFileAction(), GetModelsAction()] self.contextMenuActions = [EditModelAction(), DeleteModelAction()] def initializeSettings(self): AlgorithmProvider.initializeSettings(self) ProcessingConfig.addSetting(Setting(self.name(), ModelerUtils.MODELS_FOLDER, self.tr('Models folder', 'ModelerAlgorithmProvider'), ModelerUtils.defaultModelsFolder(), valuetype=Setting.MULTIPLE_FOLDERS)) def modelsFolder(self): return ModelerUtils.modelsFolders()[0] def name(self): return self.tr('Models', 'ModelerAlgorithmProvider') def id(self): return 'model' def icon(self): return QgsApplication.getThemeIcon("/processingModel.svg") def svgIconPath(self): return QgsApplication.iconPath("processingModel.svg") def _loadAlgorithms(self): folders = ModelerUtils.modelsFolders() self.algs = [] for f in folders: self.loadFromFolder(f) def loadFromFolder(self, folder): if not os.path.exists(folder): return for path, subdirs, files in os.walk(folder): for descriptionFile in files: if descriptionFile.endswith('model'): try: fullpath = os.path.join(path, descriptionFile) alg = ModelerAlgorithm.fromFile(fullpath) if alg.name: alg.provider = self alg.descriptionFile = fullpath self.algs.append(alg) else: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, self.tr('Could not load model {0}', 'ModelerAlgorithmProvider').format(descriptionFile)) except WrongModelException as e: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, self.tr('Could not load model {0}\n{1}', 'ModelerAlgorithmProvider').format(descriptionFile, e.msg))
myarjunar/QGIS
python/plugins/processing/modeler/ModelerAlgorithmProvider.py
Python
gpl-2.0
4,267
# coding=utf-8 # Copyright 2022 The Tensor2Robot Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tensor2robot.predictors.ensemble_exported_savedmodel_predictor.""" import os from absl import flags from absl.testing import parameterized import gin import numpy as np from tensor2robot.input_generators import default_input_generator from tensor2robot.predictors import ensemble_exported_savedmodel_predictor from tensor2robot.utils import mocks from tensor2robot.utils import tensorspec_utils from tensor2robot.utils import train_eval import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS _EXPORT_DIR = 'asyn_export' _BATCH_SIZE = 2 _MAX_TRAIN_STEPS = 3 _MAX_EVAL_STEPS = 2 class ExportedSavedmodelPredictorTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(ExportedSavedmodelPredictorTest, self).setUp() gin.clear_config() gin.parse_config('tf.estimator.RunConfig.save_checkpoints_steps=1') def test_predictor_with_default_exporter(self): input_generator = default_input_generator.DefaultRandomInputGenerator( batch_size=_BATCH_SIZE) model_dir = self.create_tempdir().full_path mock_model = mocks.MockT2RModel() train_eval.train_eval_model( t2r_model=mock_model, input_generator_train=input_generator, input_generator_eval=input_generator, max_train_steps=_MAX_TRAIN_STEPS, eval_steps=_MAX_EVAL_STEPS, model_dir=model_dir, create_exporters_fn=train_eval.create_default_exporters) # Create ensemble by duplicating the same directory multiple times. export_dirs = ','.join( [os.path.join(model_dir, 'export', 'latest_exporter_numpy')] * 2) predictor = ensemble_exported_savedmodel_predictor.EnsembleExportedSavedModelPredictor( export_dirs=export_dirs, local_export_root=None, ensemble_size=2) predictor.resample_ensemble() with self.assertRaises(ValueError): predictor.get_feature_specification() with self.assertRaises(ValueError): predictor.predict({'does_not_matter': np.zeros(1)}) with self.assertRaises(ValueError): _ = predictor.model_version self.assertEqual(predictor.global_step, -1) self.assertTrue(predictor.restore(is_async=False)) self.assertGreater(predictor.model_version, 0) self.assertEqual(predictor.global_step, -1) ref_feature_spec = mock_model.preprocessor.get_in_feature_specification( tf.estimator.ModeKeys.PREDICT) tensorspec_utils.assert_equal(predictor.get_feature_specification(), ref_feature_spec) features = tensorspec_utils.make_random_numpy( ref_feature_spec, batch_size=_BATCH_SIZE) predictions = predictor.predict(features) self.assertLen(predictions, 1) self.assertCountEqual(predictions.keys(), ['logit']) self.assertEqual(predictions['logit'].shape, (2, 1)) if __name__ == '__main__': tf.test.main()
google-research/tensor2robot
predictors/ensemble_exported_savedmodel_predictor_test.py
Python
apache-2.0
3,441
# Copyright (C) 2011 Equinor ASA, Norway. # # This file is part of ERT - Ensemble based Reservoir Tool. # # ERT is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ERT is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html> # for more details. from ecl import EclPrototype __arglist = 'double, double, double, ' __arglist += 'ecl_grid, ecl_file, ' __arglist += 'ecl_kw, ecl_kw, ecl_kw, ecl_kw, ecl_kw, ecl_kw' _phase_deltag = EclPrototype("double ecl_grav_phase_deltag(%s)" % __arglist) def phase_deltag(xyz, grid, init, sat1, rho1, porv1, sat2, rho2, porv2): return _phase_deltag(xyz[0], xyz[1], xyz[2], grid.c_ptr, init.c_ptr, sat1.c_ptr, rho1.c_ptr, porv1.c_ptr, sat2.c_ptr, rho2.c_ptr, porv2.c_ptr) def deltag(xyz, grid, init_file, restart_file1, restart_file2): """ 1. All restart files should have water, i.e. the SWAT keyword. 2. All phases present in the restart file should also be present as densities, in addition the model must contain one additional phase - which should have a density. 3. The restart files can never contain oil saturation. """ swat1 = restart_file1.iget_named_kw("SWAT", 0) swat2 = restart_file2.iget_named_kw("SWAT", 0) phase_list = [(swat1, swat2)] if restart_file1.has_kw("SGAS"): # This is a three phase Water / Gas / Oil system sgas1 = restart_file1.iget_named_kw("SGAS", 0) sgas2 = restart_file2.iget_named_kw("SGAS", 0) soil1 = 1 - (sgas1 + swat1) soil2 = 1 - (sgas2 + swat2) soil1.name = "SOIL" soil2.name = "SOIL" phase_list += [(sgas1, sgas2), (soil1, soil2)] else: # This is a two phase Water / xxx System. Will look for # OIL_DEN and GAS_DEN keywords to determine whether it is a # Water / Oil or Water / Gas system. if restart_file1.has_kw("OIL_DEN"): # Oil / Water system soil1 = 1 - swat1 soil2 = 1 - swat2 soil1.name = "SOIL" soil2.name = "SOIL" phase_list += [(soil1, soil2)] else: # Gas / Water system sgas1 = 1 - swat1 sgas2 = 1 - swat2 sgas1.name = "SGAS" sgas2.name = "SGAS" phase_list += [(sgas1, sgas2)] porv1 = restart_file1.iget_named_kw("RPORV", 0) porv2 = restart_file2.iget_named_kw("RPORV", 0) deltag = 0 for (sat1, sat2) in phase_list: rho_name = "%s_DEN" % sat1.name[1:] rho1 = restart_file1.iget_named_kw(rho_name, 0) rho2 = restart_file2.iget_named_kw(rho_name, 0) deltag += phase_deltag(xyz, grid, init_file, sat1, rho1, porv1, sat2, rho2, porv2) return deltag
Statoil/libecl
python/ecl/gravimetry/ecl_grav_calc.py
Python
gpl-3.0
3,182
""" Script to autogenerate pyplot wrappers. When this script is run, the current contents of pyplot are split into generatable and non-generatable content (via the magic header :attr:`PYPLOT_MAGIC_HEADER`) and the generatable content is overwritten. Hence, the non-generatable content should be edited in the pyplot.py file itself, whereas the generatable content must be edited via templates in this file. """ # We did try to do the wrapping the smart way, # with callable functions and new.function, but could never get the # docstrings right for python2.2. See # http://groups.google.com/group/comp.lang.python/browse_frm/thread/dcd63ec13096a0f6/1b14640f3a4ad3dc?#1b14640f3a4ad3dc # For some later history, see # http://thread.gmane.org/gmane.comp.python.matplotlib.devel/7068 from __future__ import (absolute_import, division, print_function, unicode_literals) import six import os import inspect import random import types import textwrap # this line imports the installed copy of matplotlib, and not the local copy from matplotlib.axes import Axes # this is the magic line that must exist in pyplot, after which the boilerplate content will be # appended PYPLOT_MAGIC_HEADER = '################# REMAINING CONTENT GENERATED BY boilerplate.py ##############\n' PYPLOT_PATH = os.path.join(os.path.dirname(__file__), 'lib', 'matplotlib', 'pyplot.py') AUTOGEN_MSG = """ # This function was autogenerated by boilerplate.py. Do not edit as # changes will be lost""" PLOT_TEMPLATE = AUTOGEN_MSG + """ @_autogen_docstring(Axes.%(func)s) def %(func)s(%(argspec)s): %(ax)s = gca() # allow callers to override the hold state by passing hold=True|False %(washold)s = %(ax)s.ishold() %(sethold)s if hold is not None: %(ax)s.hold(hold) try: %(ret)s = %(ax)s.%(func)s(%(call)s) draw_if_interactive() finally: %(ax)s.hold(%(washold)s) %(mappable)s return %(ret)s """ # Used for misc functions such as cla/legend etc. MISC_FN_TEMPLATE = AUTOGEN_MSG + """ @docstring.copy_dedent(Axes.%(func)s) def %(func)s(%(argspec)s): %(ret)s = gca().%(func)s(%(call)s) draw_if_interactive() return %(ret)s """ # Used for colormap functions CMAP_TEMPLATE = AUTOGEN_MSG + """ def {name}(): ''' set the default colormap to {name} and apply to current image if any. See help(colormaps) for more information ''' rc('image', cmap='{name}') im = gci() if im is not None: im.set_cmap(cm.{name}) draw_if_interactive() """ def boilerplate_gen(): """Generator of lines for the automated part of pyplot.""" # these methods are all simple wrappers of Axes methods by the same # name. _plotcommands = ( 'acorr', 'angle_spectrum', 'arrow', 'axhline', 'axhspan', 'axvline', 'axvspan', 'bar', 'barh', 'broken_barh', 'boxplot', 'cohere', 'clabel', 'contour', 'contourf', 'csd', 'errorbar', 'eventplot', 'fill', 'fill_between', 'fill_betweenx', 'hexbin', 'hist', 'hist2d', 'hlines', 'imshow', 'loglog', 'magnitude_spectrum', 'pcolor', 'pcolormesh', 'phase_spectrum', 'pie', 'plot', 'plot_date', 'psd', 'quiver', 'quiverkey', 'scatter', 'semilogx', 'semilogy', 'specgram', #'spy', 'stackplot', 'stem', 'step', 'streamplot', 'tricontour', 'tricontourf', 'tripcolor', 'triplot', 'violinplot', 'vlines', 'xcorr', 'barbs', ) _misccommands = ( 'cla', 'grid', 'legend', 'table', 'text', 'annotate', 'ticklabel_format', 'locator_params', 'tick_params', 'margins', 'autoscale', ) cmappable = { 'contour' : 'if %(ret)s._A is not None: sci(%(ret)s)', 'contourf': 'if %(ret)s._A is not None: sci(%(ret)s)', 'hexbin' : 'sci(%(ret)s)', 'scatter' : 'sci(%(ret)s)', 'pcolor' : 'sci(%(ret)s)', 'pcolormesh': 'sci(%(ret)s)', 'hist2d' : 'sci(%(ret)s[-1])', 'imshow' : 'sci(%(ret)s)', #'spy' : 'sci(%(ret)s)', ### may return image or Line2D 'quiver' : 'sci(%(ret)s)', 'specgram' : 'sci(%(ret)s[-1])', 'streamplot' : 'sci(%(ret)s.lines)', 'tricontour' : 'if %(ret)s._A is not None: sci(%(ret)s)', 'tricontourf': 'if %(ret)s._A is not None: sci(%(ret)s)', 'tripcolor' : 'sci(%(ret)s)', } def format_value(value): """ Format function default values as needed for inspect.formatargspec. The interesting part is a hard-coded list of functions used as defaults in pyplot methods. """ if isinstance(value, types.FunctionType): if value.__name__ in ('detrend_none', 'window_hanning'): return '=mlab.' + value.__name__ if value.__name__ == 'mean': return '=np.' + value.__name__ raise ValueError(('default value %s unknown to boilerplate.' + \ 'formatvalue') % value) return '='+repr(value) text_wrapper = textwrap.TextWrapper(break_long_words=False) for fmt, cmdlist in [(PLOT_TEMPLATE, _plotcommands), (MISC_FN_TEMPLATE, _misccommands)]: for func in cmdlist: # For some commands, an additional line is needed to set the # color map if func in cmappable: mappable = ' ' + cmappable[func] % locals() else: mappable = '' # Get argspec of wrapped function args, varargs, varkw, defaults = inspect.getargspec(getattr(Axes, func)) args.pop(0) # remove 'self' argument if defaults is None: defaults = () else: def_edited = [] for val in defaults: if isinstance(val, unicode): val = val.encode('ascii', 'ignore') def_edited.append(val) defaults = tuple(def_edited) # How to call the wrapped function call = [] for i, arg in enumerate(args): if len(defaults) < len(args) - i: call.append('%s' % arg) else: call.append('%s=%s' % (arg, arg)) if varargs is not None: call.append('*'+varargs) if varkw is not None: call.append('**'+varkw) call = ', '.join(call) text_wrapper.width = 80 - 19 - len(func) join_with = '\n' + ' ' * (18 + len(func)) call = join_with.join(text_wrapper.wrap(call)) # Add a hold keyword argument if needed (fmt is PLOT_TEMPLATE) and # possible (if *args is used, we can't just add a hold # argument in front of it since it would gobble one of the # arguments the user means to pass via *args) if varargs: sethold = " hold = %(varkw)s.pop('hold', None)" % locals() elif fmt is PLOT_TEMPLATE: args.append('hold') defaults = defaults + (None,) sethold = '' # Now we can build the argspec for defining the wrapper argspec = inspect.formatargspec(args, varargs, varkw, defaults, formatvalue=format_value) argspec = argspec[1:-1] # remove parens text_wrapper.width = 80 - 5 - len(func) join_with = '\n' + ' ' * (5 + len(func)) argspec = join_with.join(text_wrapper.wrap(argspec)) # A gensym-like facility in case some function takes an # argument named washold, ax, or ret washold, ret, ax = 'washold', 'ret', 'ax' bad = set(args) | set((varargs, varkw)) while washold in bad or ret in bad or ax in bad: washold = 'washold' + str(random.randrange(10**12)) ret = 'ret' + str(random.randrange(10**12)) ax = 'ax' + str(random.randrange(10**12)) # Since we can't avoid using some function names, # bail out if they are used as argument names for reserved in ('gca', 'gci', 'draw_if_interactive'): if reserved in bad: msg = 'Axes method %s has kwarg named %s' % (func, reserved) raise ValueError(msg) yield fmt % locals() cmaps = ( 'autumn', 'bone', 'cool', 'copper', 'flag', 'gray' , 'hot', 'hsv', 'jet' , 'pink', 'prism', 'spring', 'summer', 'winter', 'spectral' ) # add all the colormaps (autumn, hsv, ....) for name in cmaps: yield CMAP_TEMPLATE.format(name=name) yield '' yield '_setup_pyplot_info_docstrings()' def build_pyplot(): pyplot_path = os.path.join(os.path.dirname(__file__), 'lib', 'matplotlib', 'pyplot.py') pyplot_orig = open(pyplot_path, 'r').readlines() try: pyplot_orig = pyplot_orig[:pyplot_orig.index(PYPLOT_MAGIC_HEADER)+1] except IndexError: raise ValueError('The pyplot.py file *must* have the exact line: %s' % PYPLOT_MAGIC_HEADER) pyplot = open(pyplot_path, 'w') pyplot.writelines(pyplot_orig) pyplot.write('\n') pyplot.writelines(boilerplate_gen()) pyplot.write('\n') if __name__ == '__main__': # Write the matplotlib.pyplot file build_pyplot()
yavalvas/yav_com
build/matplotlib/boilerplate.py
Python
mit
10,009
# Copyright (c) 2018 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Tools for interpolating to a vertical slice/cross section through data.""" import numpy as np import xarray as xr from ..package_tools import Exporter from ..units import units from ..xarray import check_axis exporter = Exporter(globals()) @exporter.export def interpolate_to_slice(data, points, interp_type='linear'): r"""Obtain an interpolated slice through data using xarray. Utilizing the interpolation functionality in `xarray`, this function takes a slice the given data (currently only regular grids are supported), which is given as an `xarray.DataArray` so that we can utilize its coordinate metadata. Parameters ---------- data: `xarray.DataArray` or `xarray.Dataset` Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each DataArray in the Dataset) must have been parsed by MetPy and include both an x and y coordinate dimension. points: (N, 2) array_like A list of x, y points in the data projection at which to interpolate the data interp_type: str, optional The interpolation method, either 'linear' or 'nearest' (see `xarray.DataArray.interp()` for details). Defaults to 'linear'. Returns ------- `xarray.DataArray` or `xarray.Dataset` The interpolated slice of data, with new index dimension of size N. See Also -------- cross_section """ try: x, y = data.metpy.coordinates('x', 'y') except AttributeError: raise ValueError('Required coordinate information not available. Verify that ' 'your data has been parsed by MetPy with proper x and y ' 'dimension coordinates.') data_sliced = data.interp({ x.name: xr.DataArray(points[:, 0], dims='index', attrs=x.attrs), y.name: xr.DataArray(points[:, 1], dims='index', attrs=y.attrs) }, method=interp_type) data_sliced.coords['index'] = range(len(points)) # Bug in xarray: interp strips units if ( isinstance(data.data, units.Quantity) and not isinstance(data_sliced.data, units.Quantity) ): data_sliced.data = units.Quantity(data_sliced.data, data.data.units) return data_sliced @exporter.export def geodesic(crs, start, end, steps): r"""Construct a geodesic path between two points. This function acts as a wrapper for the geodesic construction available in `pyproj`. Parameters ---------- crs: `pyproj.CRS` PyProj Coordinate Reference System to use for the output start: (2, ) array_like A latitude-longitude pair designating the start point of the geodesic (units are degrees north and degrees east). end: (2, ) array_like A latitude-longitude pair designating the end point of the geodesic (units are degrees north and degrees east). steps: int, optional The number of points along the geodesic between the start and the end point (including the end points). Returns ------- `numpy.ndarray` The list of x, y points in the given CRS of length `steps` along the geodesic. See Also -------- cross_section """ from pyproj import Proj g = crs.get_geod() p = Proj(crs) # Geod.npts only gives points *in between* the start and end, and we want to include # the endpoints. geodesic = np.concatenate([ np.array(start[::-1])[None], np.array(g.npts(start[1], start[0], end[1], end[0], steps - 2)), np.array(end[::-1])[None] ]).transpose() points = np.stack(p(geodesic[0], geodesic[1], inverse=False, radians=False), axis=-1) return points @exporter.export def cross_section(data, start, end, steps=100, interp_type='linear'): r"""Obtain an interpolated cross-sectional slice through gridded data. Utilizing the interpolation functionality in `xarray`, this function takes a vertical cross-sectional slice along a geodesic through the given data on a regular grid, which is given as an `xarray.DataArray` so that we can utilize its coordinate and projection metadata. Parameters ---------- data: `xarray.DataArray` or `xarray.Dataset` Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each DataArray in the Dataset) must have been parsed by MetPy and include both an x and y coordinate dimension and the added `crs` coordinate. start: (2, ) array_like A latitude-longitude pair designating the start point of the cross section (units are degrees north and degrees east). end: (2, ) array_like A latitude-longitude pair designating the end point of the cross section (units are degrees north and degrees east). steps: int, optional The number of points along the geodesic between the start and the end point (including the end points) to use in the cross section. Defaults to 100. interp_type: str, optional The interpolation method, either 'linear' or 'nearest' (see `xarray.DataArray.interp()` for details). Defaults to 'linear'. Returns ------- `xarray.DataArray` or `xarray.Dataset` The interpolated cross section, with new index dimension along the cross-section. See Also -------- interpolate_to_slice, geodesic """ if isinstance(data, xr.Dataset): # Recursively apply to dataset return data.map(cross_section, True, (start, end), steps=steps, interp_type=interp_type) elif data.ndim == 0: # This has no dimensions, so it is likely a projection variable. In any case, there # are no data here to take the cross section with. Therefore, do nothing. return data else: # Get the projection and coordinates try: crs_data = data.metpy.pyproj_crs x = data.metpy.x except AttributeError: raise ValueError('Data missing required coordinate information. Verify that ' 'your data have been parsed by MetPy with proper x and y ' 'dimension coordinates and added crs coordinate of the ' 'correct projection for each variable.') # Get the geodesic points_cross = geodesic(crs_data, start, end, steps) # Patch points_cross to match given longitude range, whether [0, 360) or (-180, 180] if check_axis(x, 'longitude') and (x > 180).any(): points_cross[points_cross[:, 0] < 0, 0] += 360. # Return the interpolated data return interpolate_to_slice(data, points_cross, interp_type=interp_type)
dopplershift/MetPy
src/metpy/interpolate/slices.py
Python
bsd-3-clause
6,857
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2009-2012: # Gabes Jean, naparuba@gmail.com # Gerhard Lausser, Gerhard.Lausser@consol.de # Gregory Starck, g.starck@gmail.com # Hartmut Goebel, h.goebel@goebel-consult.de # # This file is part of Shinken. # # Shinken is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Shinken is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Shinken. If not, see <http://www.gnu.org/licenses/>. from shinken.log import logger properties = { 'daemons': ['arbiter'], 'type': 'ip_tag', } # called by the plugin manager to get a module def get_instance(plugin): logger.info("[IP Tag] Get a IPTag module for plugin %s" % plugin.get_name()) # First try to import try: from ip_tag_arbiter import Ip_Tag_Arbiter except ImportError, exp: logger.warning("[IP Tag] Warning: the plugin type %s is unavailable: %s" % ('ip_tag', exp)) return None # Catch errors ip_range = plugin.ip_range prop = plugin.property value = plugin.value method = getattr(plugin, 'method', 'replace') ignore_hosts = getattr(plugin, 'ignore_hosts', None) instance = Ip_Tag_Arbiter(plugin, ip_range, prop, value, method, ignore_hosts) return instance
wbsavage/shinken
shinken/modules/ip_tag_arbiter/__init__.py
Python
agpl-3.0
1,728
# -*- coding: utf-8 -*- # Import required libraries import os import pandas as pd import numpy as np import plotly.plotly as py import flask from flask_cors import CORS import dash from dash.dependencies import Input, Output, State, Event import dash_core_components as dcc import dash_html_components as html # Setup the app app = dash.Dash(__name__) server = app.server app.css.append_css({ 'external_url': ( 'https://cdn.rawgit.com/chriddyp/0247653a7c52feb4c48437e1c1837f75' '/raw/a68333b876edaf62df2efa7bac0e9b3613258851/dash.css' ) }) if 'DYNO' in os.environ: app.scripts.append_script({ 'external_url': 'https://cdn.rawgit.com/chriddyp/ca0d8f02a1659981a0ea7f013a378bbd/raw/e79f3f789517deec58f41251f7dbb6bee72c44ab/plotly_ga.js' # noqa: E501 }) app.layout = html.Div([ html.Div( [ dcc.Markdown( ''' ### A View of a Chart That Predicts The Economic Future: The Yield Curve This interactive report is a rendition of a [New York Times original](https://www.nytimes.com/interactive/2015/03/19/upshot/3d-yield-curve-economic-growth.html). '''.replace(' ', ''), className='eight columns offset-by-two' ) ], className='row', style={'text-align': 'center', 'margin-bottom': '15px'} ), html.Div( [ html.Div( [ dcc.Slider( min=0, max=5, value=0, marks={i: ''.format(i + 1) for i in range(6)}, id='slider' ), ], className='row', style={'margin-bottom': '10px'} ), html.Div( [ html.Div( [ html.Button('Back', id='back', style={ 'display': 'inline-block'}), html.Button('Next', id='next', style={ 'display': 'inline-block'}) ], className='two columns offset-by-two' ), dcc.Markdown( id='text', className='six columns' ), ], className='row', style={'margin-bottom': '10px'} ), dcc.Graph( id='graph', style={'height': '60vh'} ), ], id='page' ), ]) # Internal logic last_back = 0 last_next = 0 df = pd.read_csv("data/yield_curve.csv") xlist = list(df["x"].dropna()) ylist = list(df["y"].dropna()) del df["x"] del df["y"] zlist = [] for row in df.iterrows(): index, data = row zlist.append(data.tolist()) UPS = { 0: dict(x=0, y=0, z=1), 1: dict(x=0, y=0, z=1), 2: dict(x=0, y=0, z=1), 3: dict(x=0, y=0, z=1), 4: dict(x=0, y=0, z=1), 5: dict(x=0, y=0, z=1), } CENTERS = { 0: dict(x=0.3, y=0.8, z=-0.5), 1: dict(x=0, y=0, z=-0.37), 2: dict(x=0, y=1.1, z=-1.3), 3: dict(x=0, y=-0.7, z=0), 4: dict(x=0, y=-0.2, z=0), 5: dict(x=-0.11, y=-0.5, z=0), } EYES = { 0: dict(x=2.7, y=2.7, z=0.3), 1: dict(x=0.01, y=3.8, z=-0.37), 2: dict(x=1.3, y=3, z=0), 3: dict(x=2.6, y=-1.6, z=0), 4: dict(x=3, y=-0.2, z=0), 5: dict(x=-0.1, y=-0.5, z=2.66) } TEXTS = { 0: ''' #### Yield curve 101 The yield curve shows how much it costs the federal government to borrow money for a given amount of time, revealing the relationship between long- and short-term interest rates. >> It is, inherently, a forecast for what the economy holds in the future — how much inflation there will be, for example, and how healthy growth will be over the years ahead — all embodied in the price of money today, tomorrow and many years from now. '''.replace(' ', ''), 1: ''' #### Where we stand On Wednesday, both short-term and long-term rates were lower than they have been for most of history – a reflection of the continuing hangover from the financial crisis. >> The yield curve is fairly flat, which is a sign that investors expect mediocre growth in the years ahead. '''.replace(' ', ''), 2: ''' #### Deep in the valley In response to the last recession, the Federal Reserve has kept short-term rates very low — near zero — since 2008. (Lower interest rates stimulate the economy, by making it cheaper for people to borrow money, but also spark inflation.) >> Now, the Fed is getting ready to raise rates again, possibly as early as June. '''.replace(' ', ''), 3: ''' #### Last time, a puzzle The last time the Fed started raising rates was in 2004. From 2004 to 2006, short-term rates rose steadily. >> But long-term rates didn't rise very much. >> The Federal Reserve chairman called this phenomenon a “conundrum," and it raised questions about the ability of the Fed to guide the economy. Part of the reason long-term rates failed to rise was because of strong foreign demand. '''.replace(' ', ''), 4: ''' #### Long-term rates are low now, too Foreign buyers have helped keep long-term rates low recently, too — as have new rules encouraging banks to hold government debt and expectations that economic growth could be weak for a long time. >> The 10-year Treasury yield was as low as it has ever been in July 2012 and has risen only modestly since. Some economists refer to the economic pessimism as “the new normal.” '''.replace(' ', ''), 5: ''' #### Long-term rates are low now, too Here is the same chart viewed from above. '''.replace(' ', '') } ANNOTATIONS = { 0: [], 1: [dict( showarrow=False, x="1-month", y='2015-03-18', z=0.046, text="Short-term rates basically <br>follow the interest rates set <br>by the Federal Reserve.", xref='x', yref='y', zref='z', xanchor='left', yanchor='auto' )], 2: [], 3: [], 4: [], 5: [], } # Make 3d graph @app.callback(Output('graph', 'figure'), [Input('slider', 'value')]) def make_graph(value): if value is None: value = 0 if value in [0, 2, 3]: z_secondary_beginning = [z[1] for z in zlist if z[0] == 'None'] z_secondary_end = [z[0] for z in zlist if z[0] != 'None'] z_secondary = z_secondary_beginning + z_secondary_end x_secondary = [ '3-month'] * len(z_secondary_beginning) + ['1-month'] * len(z_secondary_end) y_secondary = ylist opacity = 0.7 elif value == 1: x_secondary = xlist y_secondary = [ylist[-1] for i in xlist] z_secondary = zlist[-1] opacity = 0.7 elif value == 4: z_secondary = [z[8] for z in zlist] x_secondary = ['10-year' for i in z_secondary] y_secondary = ylist opacity = 0.25 if value in range(0, 5): trace1 = dict( type="surface", x=xlist, y=ylist, z=zlist, hoverinfo='x+y+z', lighting={ "ambient": 0.95, "diffuse": 0.99, "fresnel": 0.01, "roughness": 0.01, "specular": 0.01, }, colorscale=[[0, "rgb(230,245,254)"], [0.4, "rgb(123,171,203)"], [ 0.8, "rgb(40,119,174)"], [1, "rgb(37,61,81)"]], opacity=opacity, showscale=False, zmax=9.18, zmin=0, scene="scene", ) trace2 = dict( type='scatter3d', mode='lines', x=x_secondary, y=y_secondary, z=z_secondary, hoverinfo='x+y+z', line=dict(color='#444444') ) data = [trace1, trace2] else: trace1 = dict( type="contour", x=ylist, y=xlist, z=np.array(zlist).T, colorscale=[[0, "rgb(230,245,254)"], [0.4, "rgb(123,171,203)"], [ 0.8, "rgb(40,119,174)"], [1, "rgb(37,61,81)"]], showscale=False, zmax=9.18, zmin=0, line=dict(smoothing=1, color='rgba(40,40,40,0.15)'), contours=dict(coloring='heatmap') ) data = [trace1] # margin = dict( # t=5, # l=50, # b=50, # r=5, # ), layout = dict( autosize=True, font=dict( size=12, color="#CCCCCC", ), margin=dict( t=5, l=5, b=5, r=5, ), showlegend=False, hovermode='closest', scene=dict( aspectmode="manual", aspectratio=dict(x=2, y=5, z=1.5), camera=dict( up=UPS[value], center=CENTERS[value], eye=EYES[value] ), annotations=[dict( showarrow=False, y="2015-03-18", x="1-month", z=0.046, text="Point 1", xanchor="left", xshift=10, opacity=0.7 ), dict( y="2015-03-18", x="3-month", z=0.048, text="Point 2", textangle=0, ax=0, ay=-75, font=dict( color="black", size=12 ), arrowcolor="black", arrowsize=3, arrowwidth=1, arrowhead=1 )], xaxis={ "showgrid": True, "title": "", "type": "category", "zeroline": False, "categoryorder": 'array', "categoryarray": list(reversed(xlist)) }, yaxis={ "showgrid": True, "title": "", "type": "date", "zeroline": False, }, ) ) figure = dict(data=data, layout=layout) # py.iplot(figure) return figure # Make annotations @app.callback(Output('text', 'children'), [Input('slider', 'value')]) def make_text(value): if value is None: value = 0 return TEXTS[value] # Button controls @app.callback(Output('slider', 'value'), [Input('back', 'n_clicks'), Input('next', 'n_clicks')], [State('slider', 'value')]) def advance_slider(back, nxt, slider): if back is None: back = 0 if nxt is None: nxt = 0 if slider is None: slider = 0 global last_back global last_next if back > last_back: last_back = back return max(0, slider - 1) if nxt > last_next: last_next = nxt return min(5, slider + 1) # Run the Dash app if __name__ == '__main__': app.server.run()
timkpaine/lantern
experimental/dash/dash-yield-curve-master/app.py
Python
apache-2.0
11,392
''' pydevd - a debugging daemon This is the daemon you launch for python remote debugging. Protocol: each command has a format: id\tsequence-num\ttext id: protocol command number sequence-num: each request has a sequence number. Sequence numbers originating at the debugger are odd, sequence numbers originating at the daemon are even. Every response uses the same sequence number as the request. payload: it is protocol dependent. When response is a complex structure, it is returned as XML. Each attribute value is urlencoded, and then the whole payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings Commands: NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE 100 series: program execution 101 RUN JAVA - - 102 LIST_THREADS JAVA RETURN with XML listing of all threads 103 THREAD_CREATE PYDB - XML with thread information 104 THREAD_KILL JAVA id (or * to exit) kills the thread PYDB id nofies JAVA that thread was killed 105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread reason for suspension PYDB id notifies JAVA that thread was suspended 106 CMD_THREAD_RUN JAVA id resume the thread PYDB id \t reason notifies JAVA that thread was resumed 107 STEP_INTO JAVA thread_id 108 STEP_OVER JAVA thread_id 109 STEP_RETURN JAVA thread_id 110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content FRAME|GLOBAL \t attributes* 111 SET_BREAK JAVA file/line of the breakpoint 112 REMOVE_BREAK JAVA file/line of the return 113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression 114 CMD_GET_FRAME JAVA request for frame contents 115 CMD_EXEC_EXPRESSION JAVA 116 CMD_WRITE_TO_CONSOLE PYDB 117 CMD_CHANGE_VARIABLE 118 CMD_RUN_TO_LINE 119 CMD_RELOAD_CODE 120 CMD_GET_COMPLETIONS JAVA 500 series diagnostics/ok 501 VERSION either Version string (1.0) Currently just used at startup 502 RETURN either Depends on caller - 900 series: errors 901 ERROR either - This is reserved for unexpected errors. * JAVA - remote debugger, the java end * PYDB - pydevd, the python end ''' from _pydev_bundle.pydev_imports import _queue from _pydev_imps._pydev_saved_modules import time from _pydev_imps._pydev_saved_modules import thread from _pydev_imps._pydev_saved_modules import threading from _pydev_imps._pydev_saved_modules import socket from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR from _pydevd_bundle.pydevd_constants import * #@UnusedWildImport try: from urllib import quote_plus, unquote, unquote_plus except: from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport import pydevconsole from _pydevd_bundle import pydevd_vars from _pydevd_bundle import pydevd_tracing from _pydevd_bundle import pydevd_vm_type import pydevd_file_utils import traceback from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string from _pydev_bundle import pydev_log from _pydev_bundle import _pydev_completer from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str from _pydevd_bundle import pydevd_console from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules CMD_RUN = 101 CMD_LIST_THREADS = 102 CMD_THREAD_CREATE = 103 CMD_THREAD_KILL = 104 CMD_THREAD_SUSPEND = 105 CMD_THREAD_RUN = 106 CMD_STEP_INTO = 107 CMD_STEP_OVER = 108 CMD_STEP_RETURN = 109 CMD_GET_VARIABLE = 110 CMD_SET_BREAK = 111 CMD_REMOVE_BREAK = 112 CMD_EVALUATE_EXPRESSION = 113 CMD_GET_FRAME = 114 CMD_EXEC_EXPRESSION = 115 CMD_WRITE_TO_CONSOLE = 116 CMD_CHANGE_VARIABLE = 117 CMD_RUN_TO_LINE = 118 CMD_RELOAD_CODE = 119 CMD_GET_COMPLETIONS = 120 # Note: renumbered (conflicted on merge) CMD_CONSOLE_EXEC = 121 CMD_ADD_EXCEPTION_BREAK = 122 CMD_REMOVE_EXCEPTION_BREAK = 123 CMD_LOAD_SOURCE = 124 CMD_ADD_DJANGO_EXCEPTION_BREAK = 125 CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126 CMD_SET_NEXT_STATEMENT = 127 CMD_SMART_STEP_INTO = 128 CMD_EXIT = 129 CMD_SIGNATURE_CALL_TRACE = 130 CMD_SET_PY_EXCEPTION = 131 CMD_GET_FILE_CONTENTS = 132 CMD_SET_PROPERTY_TRACE = 133 # Pydev debug console commands CMD_EVALUATE_CONSOLE_EXPRESSION = 134 CMD_RUN_CUSTOM_OPERATION = 135 CMD_GET_BREAKPOINT_EXCEPTION = 136 CMD_STEP_CAUGHT_EXCEPTION = 137 CMD_SEND_CURR_EXCEPTION_TRACE = 138 CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139 CMD_IGNORE_THROWN_EXCEPTION_AT = 140 CMD_ENABLE_DONT_TRACE = 141 CMD_SHOW_CONSOLE = 142 CMD_GET_ARRAY = 143 CMD_STEP_INTO_MY_CODE = 144 CMD_GET_CONCURRENCY_EVENT = 145 CMD_VERSION = 501 CMD_RETURN = 502 CMD_ERROR = 901 ID_TO_MEANING = { '101': 'CMD_RUN', '102': 'CMD_LIST_THREADS', '103': 'CMD_THREAD_CREATE', '104': 'CMD_THREAD_KILL', '105': 'CMD_THREAD_SUSPEND', '106': 'CMD_THREAD_RUN', '107': 'CMD_STEP_INTO', '108': 'CMD_STEP_OVER', '109': 'CMD_STEP_RETURN', '110': 'CMD_GET_VARIABLE', '111': 'CMD_SET_BREAK', '112': 'CMD_REMOVE_BREAK', '113': 'CMD_EVALUATE_EXPRESSION', '114': 'CMD_GET_FRAME', '115': 'CMD_EXEC_EXPRESSION', '116': 'CMD_WRITE_TO_CONSOLE', '117': 'CMD_CHANGE_VARIABLE', '118': 'CMD_RUN_TO_LINE', '119': 'CMD_RELOAD_CODE', '120': 'CMD_GET_COMPLETIONS', '121': 'CMD_CONSOLE_EXEC', '122': 'CMD_ADD_EXCEPTION_BREAK', '123': 'CMD_REMOVE_EXCEPTION_BREAK', '124': 'CMD_LOAD_SOURCE', '125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK', '126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK', '127': 'CMD_SET_NEXT_STATEMENT', '128': 'CMD_SMART_STEP_INTO', '129': 'CMD_EXIT', '130': 'CMD_SIGNATURE_CALL_TRACE', '131': 'CMD_SET_PY_EXCEPTION', '132': 'CMD_GET_FILE_CONTENTS', '133': 'CMD_SET_PROPERTY_TRACE', '134': 'CMD_EVALUATE_CONSOLE_EXPRESSION', '135': 'CMD_RUN_CUSTOM_OPERATION', '136': 'CMD_GET_BREAKPOINT_EXCEPTION', '137': 'CMD_STEP_CAUGHT_EXCEPTION', '138': 'CMD_SEND_CURR_EXCEPTION_TRACE', '139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED', '140': 'CMD_IGNORE_THROWN_EXCEPTION_AT', '141': 'CMD_ENABLE_DONT_TRACE', '143': 'CMD_GET_ARRAY', '144': 'CMD_STEP_INTO_MY_CODE', '145': 'CMD_GET_CONCURRENCY_EVENT', '501': 'CMD_VERSION', '502': 'CMD_RETURN', '901': 'CMD_ERROR', } MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive) #this number can be changed if there's need to do so VERSION_STRING = "PY-145.1504.1" from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding file_system_encoding = getfilesystemencoding() #--------------------------------------------------------------------------------------------------- UTILITIES #======================================================================================================================= # pydevd_log #======================================================================================================================= def pydevd_log(level, *args): """ levels are: 0 most serious warnings/errors 1 warnings/significant events 2 informational trace """ if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL: #yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something) try: sys.stderr.write('%s\n' % (args,)) except: pass #======================================================================================================================= # GlobalDebuggerHolder #======================================================================================================================= class GlobalDebuggerHolder: ''' Holder for the global debugger. ''' global_dbg = None # Note: don't rename (the name is used in our attach to process) #======================================================================================================================= # get_global_debugger #======================================================================================================================= def get_global_debugger(): return GlobalDebuggerHolder.global_dbg GetGlobalDebugger = get_global_debugger # Backward-compatibility #======================================================================================================================= # set_global_debugger #======================================================================================================================= def set_global_debugger(dbg): GlobalDebuggerHolder.global_dbg = dbg #------------------------------------------------------------------- ACTUAL COMM #======================================================================================================================= # PyDBDaemonThread #======================================================================================================================= class PyDBDaemonThread(threading.Thread): created_pydb_daemon_threads = {} def __init__(self): threading.Thread.__init__(self) self.setDaemon(True) self.killReceived = False self.dontTraceMe = True self.is_pydev_daemon_thread = True def run(self): created_pydb_daemon = self.created_pydb_daemon_threads created_pydb_daemon[self] = 1 try: try: if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread): # we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading' # module, and the new instance of main thread is created import org.python.core as PyCore #@UnresolvedImport ss = PyCore.PySystemState() # Note: Py.setSystemState() affects only the current thread. PyCore.Py.setSystemState(ss) self._on_run() except: if sys is not None and traceback is not None: traceback.print_exc() finally: del created_pydb_daemon[self] def _on_run(self): raise NotImplementedError('Should be reimplemented by: %s' % self.__class__) def do_kill_pydev_thread(self): #that was not working very well because jython gave some socket errors self.killReceived = True def _stop_trace(self): if self.dontTraceMe: disable_tracing = True if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0: # don't run untraced threads if we're in jython 2.2.1 or lower # jython bug: if we start a thread and another thread changes the tracing facility # it affects other threads (it's not set only for the thread but globally) # Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867 disable_tracing = False if disable_tracing: pydevd_tracing.SetTrace(None) # no debugging on this thread #======================================================================================================================= # ReaderThread #======================================================================================================================= class ReaderThread(PyDBDaemonThread): """ reader thread reads and dispatches commands in an infinite loop """ def __init__(self, sock): PyDBDaemonThread.__init__(self) self.sock = sock self.setName("pydevd.Reader") from _pydevd_bundle.pydevd_process_net_command import process_net_command self.process_net_command = process_net_command self.global_debugger_holder = GlobalDebuggerHolder def do_kill_pydev_thread(self): #We must close the socket so that it doesn't stay halted there. self.killReceived = True try: self.sock.shutdown(SHUT_RD) #shutdown the socket for read except: #just ignore that pass def _on_run(self): self._stop_trace() read_buffer = "" try: while not self.killReceived: try: r = self.sock.recv(1024) except: if not self.killReceived: traceback.print_exc() self.handle_except() return #Finished communication. #Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode #internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames #on python 2 may need to be converted to the filesystem encoding). if hasattr(r, 'decode'): r = r.decode('utf-8') read_buffer += r if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS: sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,)) sys.stderr.flush() if len(read_buffer) == 0: self.handle_except() break while read_buffer.find('\n') != -1: command, read_buffer = read_buffer.split('\n', 1) args = command.split('\t', 2) try: cmd_id = int(args[0]) pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,)) self.process_command(cmd_id, int(args[1]), args[2]) except: traceback.print_exc() sys.stderr.write("Can't process net command: %s\n" % command) sys.stderr.flush() except: traceback.print_exc() self.handle_except() def handle_except(self): self.global_debugger_holder.global_dbg.finish_debugging_session() def process_command(self, cmd_id, seq, text): self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text) #----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER #======================================================================================================================= # WriterThread #======================================================================================================================= class WriterThread(PyDBDaemonThread): """ writer thread writes out the commands in an infinite loop """ def __init__(self, sock): PyDBDaemonThread.__init__(self) self.sock = sock self.setName("pydevd.Writer") self.cmdQueue = _queue.Queue() if pydevd_vm_type.get_vm_type() == 'python': self.timeout = 0 else: self.timeout = 0.1 def add_command(self, cmd): """ cmd is NetCommand """ if not self.killReceived: #we don't take new data after everybody die self.cmdQueue.put(cmd) def _on_run(self): """ just loop and write responses """ self._stop_trace() get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it. try: while True: try: try: if get_has_timeout: cmd = self.cmdQueue.get(1, 0.1) else: time.sleep(.01) cmd = self.cmdQueue.get(0) except _queue.Empty: if self.killReceived: try: self.sock.shutdown(SHUT_WR) self.sock.close() except: pass return #break if queue is empty and killReceived else: continue except: #pydevd_log(0, 'Finishing debug communication...(1)') #when liberating the thread here, we could have errors because we were shutting down #but the thread was still not liberated return out = cmd.outgoing if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1: out_message = 'sending cmd --> ' out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN') out_message += ' ' out_message += unquote(unquote(out)).replace('\n', ' ') try: sys.stderr.write('%s\n' % (out_message,)) except: pass if IS_PY3K: out = bytearray(out, 'utf-8') self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all) if cmd.id == CMD_EXIT: break if time is None: break #interpreter shutdown time.sleep(self.timeout) except Exception: GlobalDebuggerHolder.global_dbg.finish_debugging_session() if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0: traceback.print_exc() def empty(self): return self.cmdQueue.empty() #--------------------------------------------------- CREATING THE SOCKET THREADS #======================================================================================================================= # start_server #======================================================================================================================= def start_server(port): """ binds to a port, waits for the debugger to connect """ s = socket(AF_INET, SOCK_STREAM) s.bind(('', port)) s.listen(1) newSock, _addr = s.accept() return newSock #======================================================================================================================= # start_client #======================================================================================================================= def start_client(host, port): """ connects to a host/port """ pydevd_log(1, "Connecting to ", host, ":", str(port)) s = socket(AF_INET, SOCK_STREAM) MAX_TRIES = 100 i = 0 while i<MAX_TRIES: try: s.connect((host, port)) except: i+=1 time.sleep(0.2) continue pydevd_log(1, "Connected.") return s sys.stderr.write("Could not connect to %s: %s\n" % (host, port)) sys.stderr.flush() traceback.print_exc() sys.exit(1) #TODO: is it safe? #------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF #======================================================================================================================= # NetCommand #======================================================================================================================= class NetCommand: """ Commands received/sent over the network. Command can represent command received from the debugger, or one to be sent by daemon. """ next_seq = 0 # sequence numbers def __init__(self, id, seq, text): """ smart handling of parameters if sequence is 0, new sequence will be generated if text has carriage returns they'll be replaced""" self.id = id if seq == 0: NetCommand.next_seq += 2 seq = NetCommand.next_seq self.seq = seq self.text = text encoded = quote(to_string(text), '/<>_=" \t') self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded) #======================================================================================================================= # NetCommandFactory #======================================================================================================================= class NetCommandFactory: def _thread_to_xml(self, thread): """ thread information as XML """ name = pydevd_vars.make_valid_xml_value(thread.getName()) cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread)) return cmdText def make_error_message(self, seq, text): cmd = NetCommand(CMD_ERROR, seq, text) if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: sys.stderr.write("Error: %s" % (text,)) return cmd def make_thread_created_message(self, thread): cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>" return NetCommand(CMD_THREAD_CREATE, 0, cmdText) def make_custom_frame_created_message(self, frameId, frameDescription): frameDescription = pydevd_vars.make_valid_xml_value(frameDescription) cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId) return NetCommand(CMD_THREAD_CREATE, 0, cmdText) def make_list_threads_message(self, seq): """ returns thread listing as XML """ try: t = threading.enumerate() cmd_text = ["<xml>"] append = cmd_text.append for i in t: if t.isAlive(): append(self._thread_to_xml(i)) append("</xml>") return NetCommand(CMD_RETURN, seq, ''.join(cmd_text)) except: return self.make_error_message(seq, get_exception_traceback_str()) def make_variable_changed_message(self, seq, payload): # notify debugger that value was changed successfully return NetCommand(CMD_RETURN, seq, payload) def make_io_message(self, v, ctx, dbg=None): ''' @param v: the message to pass to the debug server @param ctx: 1 for stdio 2 for stderr @param dbg: If not none, add to the writer ''' try: if len(v) > MAX_IO_MSG_SIZE: v = v[0:MAX_IO_MSG_SIZE] v += '...' v = pydevd_vars.make_valid_xml_value(quote(v, '/>_= \t')) net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx)) except: net = self.make_error_message(0, get_exception_traceback_str()) if dbg: dbg.writer.add_command(net) return net def make_version_message(self, seq): try: return NetCommand(CMD_VERSION, seq, VERSION_STRING) except: return self.make_error_message(seq, get_exception_traceback_str()) def make_thread_killed_message(self, id): try: return NetCommand(CMD_THREAD_KILL, 0, str(id)) except: return self.make_error_message(0, get_exception_traceback_str()) def make_thread_suspend_str(self, thread_id, frame, stop_reason, message): """ <xml> <thread id="id" stop_reason="reason"> <frame id="id" name="functionName " file="file" line="line"> <var variable stuffff.... </frame> </thread> """ cmd_text_list = ["<xml>"] append = cmd_text_list.append make_valid_xml_value = pydevd_vars.make_valid_xml_value if message: message = make_valid_xml_value(message) append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message)) curr_frame = frame try: while curr_frame: #print cmdText my_id = id(curr_frame) #print "id is ", my_id if curr_frame.f_code is None: break #Iron Python sometimes does not have it! my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global if my_name is None: break #Iron Python sometimes does not have it! #print "name is ", my_name abs_path_real_path_and_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curr_frame) myFile = pydevd_file_utils.norm_file_to_client(abs_path_real_path_and_base[0]) if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"): # myFile is a byte string encoded using the file system encoding # convert it to utf8 myFile = myFile.decode(file_system_encoding).encode("utf-8") #print "file is ", myFile #myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame) myLine = str(curr_frame.f_lineno) #print "line is ", myLine #the variables are all gotten 'on-demand' #variables = pydevd_vars.frame_vars_to_xml(curr_frame.f_locals) variables = '' append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name))) append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine)) append(variables) append("</frame>") curr_frame = curr_frame.f_back except : traceback.print_exc() append("</thread></xml>") return ''.join(cmd_text_list) def make_thread_suspend_message(self, thread_id, frame, stop_reason, message): try: return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message)) except: return self.make_error_message(0, get_exception_traceback_str()) def make_thread_run_message(self, id, reason): try: return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason)) except: return self.make_error_message(0, get_exception_traceback_str()) def make_get_variable_message(self, seq, payload): try: return NetCommand(CMD_GET_VARIABLE, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_get_array_message(self, seq, payload): try: return NetCommand(CMD_GET_ARRAY, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_get_frame_message(self, seq, payload): try: return NetCommand(CMD_GET_FRAME, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_evaluate_expression_message(self, seq, payload): try: return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_get_completions_message(self, seq, payload): try: return NetCommand(CMD_GET_COMPLETIONS, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_get_file_contents(self, seq, payload): try: return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_send_breakpoint_exception_message(self, seq, payload): try: return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj): try: while trace_obj.tb_next is not None: trace_obj = trace_obj.tb_next exc_type = pydevd_vars.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown' exc_desc = pydevd_vars.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description' payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \ self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '') return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id): try: return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id)) except: return self.make_error_message(0, get_exception_traceback_str()) def make_send_console_message(self, seq, payload): try: return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_custom_operation_message(self, seq, payload): try: return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload) except Exception: return self.make_error_message(seq, get_exception_traceback_str()) def make_load_source_message(self, seq, source, dbg=None): try: net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source) except: net = self.make_error_message(0, get_exception_traceback_str()) if dbg: dbg.writer.add_command(net) return net def make_show_console_message(self, thread_id, frame): try: return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, '')) except: return self.make_error_message(0, get_exception_traceback_str()) def make_exit_message(self): try: net = NetCommand(CMD_EXIT, 0, '') except: net = self.make_error_message(0, get_exception_traceback_str()) return net INTERNAL_TERMINATE_THREAD = 1 INTERNAL_SUSPEND_THREAD = 2 #======================================================================================================================= # InternalThreadCommand #======================================================================================================================= class InternalThreadCommand: """ internal commands are generated/executed by the debugger. The reason for their existence is that some commands have to be executed on specific threads. These are the InternalThreadCommands that get get posted to PyDB.cmdQueue. """ def can_be_executed_by(self, thread_id): '''By default, it must be in the same thread to be executed ''' return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id) def do_it(self, dbg): raise NotImplementedError("you have to override do_it") class ReloadCodeCommand(InternalThreadCommand): def __init__(self, module_name, thread_id): self.thread_id = thread_id self.module_name = module_name self.executed = False self.lock = thread.allocate_lock() def can_be_executed_by(self, thread_id): if self.thread_id == '*': return True #Any thread can execute it! return InternalThreadCommand.can_be_executed_by(self, thread_id) def do_it(self, dbg): self.lock.acquire() try: if self.executed: return self.executed = True finally: self.lock.release() module_name = self.module_name if not dict_contains(sys.modules, module_name): if '.' in module_name: new_module_name = module_name.split('.')[-1] if dict_contains(sys.modules, new_module_name): module_name = new_module_name if not dict_contains(sys.modules, module_name): sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n') # Too much info... # sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n') else: sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n') from _pydevd_bundle import pydevd_reload if pydevd_reload.xreload(sys.modules[module_name]): sys.stderr.write('pydev debugger: reload finished\n') else: sys.stderr.write('pydev debugger: reload finished without applying any change\n') #======================================================================================================================= # InternalTerminateThread #======================================================================================================================= class InternalTerminateThread(InternalThreadCommand): def __init__(self, thread_id): self.thread_id = thread_id def do_it(self, dbg): pydevd_log(1, "killing ", str(self.thread_id)) cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalRunThread #======================================================================================================================= class InternalRunThread(InternalThreadCommand): def __init__(self, thread_id): self.thread_id = thread_id def do_it(self, dbg): t = pydevd_find_thread_by_id(self.thread_id) if t: t.additional_info.pydev_step_cmd = -1 t.additional_info.pydev_step_stop = None t.additional_info.pydev_state = STATE_RUN #======================================================================================================================= # InternalStepThread #======================================================================================================================= class InternalStepThread(InternalThreadCommand): def __init__(self, thread_id, cmd_id): self.thread_id = thread_id self.cmd_id = cmd_id def do_it(self, dbg): t = pydevd_find_thread_by_id(self.thread_id) if t: t.additional_info.pydev_step_cmd = self.cmd_id t.additional_info.pydev_state = STATE_RUN #======================================================================================================================= # InternalSetNextStatementThread #======================================================================================================================= class InternalSetNextStatementThread(InternalThreadCommand): def __init__(self, thread_id, cmd_id, line, func_name): self.thread_id = thread_id self.cmd_id = cmd_id self.line = line if IS_PY2: if isinstance(func_name, unicode): # On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes). func_name = func_name.encode('utf-8') self.func_name = func_name def do_it(self, dbg): t = pydevd_find_thread_by_id(self.thread_id) if t: t.additional_info.pydev_step_cmd = self.cmd_id t.additional_info.pydev_next_line = int(self.line) t.additional_info.pydev_func_name = self.func_name t.additional_info.pydev_state = STATE_RUN #======================================================================================================================= # InternalGetVariable #======================================================================================================================= class InternalGetVariable(InternalThreadCommand): """ gets the value of a variable """ def __init__(self, seq, thread_id, frame_id, scope, attrs): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.scope = scope self.attributes = attrs def do_it(self, dbg): """ Converts request into python variable """ try: xml = "<xml>" valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes) if valDict is None: valDict = {} keys = valDict.keys() if hasattr(keys, 'sort'): keys.sort(compare_object_attrs) #Python 3.0 does not have it else: if IS_PY3K: keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings). else: keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings). for k in keys: xml += pydevd_vars.var_to_xml(valDict[k], to_string(k)) xml += "</xml>" cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml) dbg.writer.add_command(cmd) except Exception: cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str()) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalGetArray #======================================================================================================================= class InternalGetArray(InternalThreadCommand): def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.scope = scope self.name = attrs.split("\t")[-1] self.attrs = attrs self.roffset = int(roffset) self.coffset = int(coffset) self.rows = int(rows) self.cols = int(cols) self.format = format def do_it(self, dbg): try: frame = pydevd_vars.find_frame(self.thread_id, self.frame_id) var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals) xml = "<xml>" var, metaxml, rows, cols, format = pydevd_vars.array_to_meta_xml(var, self.name, self.format) xml += metaxml self.format = '%' + format if self.rows == -1 and self.cols == -1: self.rows = rows self.cols = cols xml += pydevd_vars.array_to_xml(var, self.roffset, self.coffset, self.rows, self.cols, self.format) xml += "</xml>" cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml) dbg.writer.add_command(cmd) except: cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str()) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalChangeVariable #======================================================================================================================= class InternalChangeVariable(InternalThreadCommand): """ changes the value of a variable """ def __init__(self, seq, thread_id, frame_id, scope, attr, expression): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.scope = scope self.attr = attr self.expression = expression def do_it(self, dbg): """ Converts request into python variable """ try: result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg) xml = "<xml>" xml += pydevd_vars.var_to_xml(result, "") xml += "</xml>" cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml) dbg.writer.add_command(cmd) except Exception: cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str())) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalGetFrame #======================================================================================================================= class InternalGetFrame(InternalThreadCommand): """ gets the value of a variable """ def __init__(self, seq, thread_id, frame_id): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id def do_it(self, dbg): """ Converts request into python variable """ try: frame = pydevd_vars.find_frame(self.thread_id, self.frame_id) if frame is not None: xml = "<xml>" xml += pydevd_vars.frame_vars_to_xml(frame.f_locals) del frame xml += "</xml>" cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml) dbg.writer.add_command(cmd) else: #pydevd_vars.dump_frames(self.thread_id) #don't print this error: frame not found: means that the client is not synchronized (but that's ok) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id)) dbg.writer.add_command(cmd) except: cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id)) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalEvaluateExpression #======================================================================================================================= class InternalEvaluateExpression(InternalThreadCommand): """ gets the value of a variable """ def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.expression = expression self.doExec = doExec self.doTrim = doTrim def do_it(self, dbg): """ Converts request into python variable """ try: result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec) xml = "<xml>" xml += pydevd_vars.var_to_xml(result, self.expression, self.doTrim) xml += "</xml>" cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalGetCompletions #======================================================================================================================= class InternalGetCompletions(InternalThreadCommand): """ Gets the completions in a given scope """ def __init__(self, seq, thread_id, frame_id, act_tok): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.act_tok = act_tok def do_it(self, dbg): """ Converts request into completions """ try: remove_path = None try: frame = pydevd_vars.find_frame(self.thread_id, self.frame_id) if frame is not None: msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok) cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg) dbg.writer.add_command(cmd) else: cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id)) dbg.writer.add_command(cmd) finally: if remove_path is not None: sys.path.remove(remove_path) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalGetBreakpointException #======================================================================================================================= class InternalGetBreakpointException(InternalThreadCommand): """ Send details of exception raised while evaluating conditional breakpoint """ def __init__(self, thread_id, exc_type, stacktrace): self.sequence = 0 self.thread_id = thread_id self.stacktrace = stacktrace self.exc_type = exc_type def do_it(self, dbg): try: callstack = "<xml>" makeValid = pydevd_vars.make_valid_xml_value for filename, line, methodname, methodobj in self.stacktrace: if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"): # filename is a byte string encoded using the file system encoding # convert it to utf8 filename = filename.decode(file_system_encoding).encode("utf-8") callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \ % (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj)) callstack += "</xml>" cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalSendCurrExceptionTrace #======================================================================================================================= class InternalSendCurrExceptionTrace(InternalThreadCommand): """ Send details of the exception that was caught and where we've broken in. """ def __init__(self, thread_id, arg, curr_frame_id): ''' :param arg: exception type, description, traceback object ''' self.sequence = 0 self.thread_id = thread_id self.curr_frame_id = curr_frame_id self.arg = arg def do_it(self, dbg): try: cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg) del self.arg dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalSendCurrExceptionTraceProceeded #======================================================================================================================= class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand): """ Send details of the exception that was caught and where we've broken in. """ def __init__(self, thread_id): self.sequence = 0 self.thread_id = thread_id def do_it(self, dbg): try: cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalEvaluateConsoleExpression #======================================================================================================================= class InternalEvaluateConsoleExpression(InternalThreadCommand): """ Execute the given command in the debug console """ def __init__(self, seq, thread_id, frame_id, line, buffer_output=True): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.line = line self.buffer_output = buffer_output def do_it(self, dbg): """ Create an XML for console output, error and more (true/false) <xml> <output message=output_message></output> <error message=error_message></error> <more>true/false</more> </xml> """ try: frame = pydevd_vars.find_frame(self.thread_id, self.frame_id) if frame is not None: console_message = pydevd_console.execute_console_command( frame, self.thread_id, self.frame_id, self.line, self.buffer_output) cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml()) else: from _pydevd_bundle.pydevd_console import ConsoleMessage console_message = ConsoleMessage() console_message.add_console_message( pydevd_console.CONSOLE_ERROR, "Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id), ) cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml()) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalRunCustomOperation #======================================================================================================================= class InternalRunCustomOperation(InternalThreadCommand): """ Run a custom command on an expression """ def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.scope = scope self.attrs = attrs self.style = style self.code_or_file = unquote_plus(encoded_code_or_file) self.fnname = fnname def do_it(self, dbg): try: res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs, self.style, self.code_or_file, self.fnname) resEncoded = quote_plus(res) cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalConsoleGetCompletions #======================================================================================================================= class InternalConsoleGetCompletions(InternalThreadCommand): """ Fetch the completions in the debug console """ def __init__(self, seq, thread_id, frame_id, act_tok): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.act_tok = act_tok def do_it(self, dbg): """ Get completions and write back to the client """ try: frame = pydevd_vars.find_frame(self.thread_id, self.frame_id) completions_xml = pydevd_console.get_completions(frame, self.act_tok) cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc) dbg.writer.add_command(cmd) #======================================================================================================================= # InternalConsoleExec #======================================================================================================================= class InternalConsoleExec(InternalThreadCommand): """ gets the value of a variable """ def __init__(self, seq, thread_id, frame_id, expression): self.sequence = seq self.thread_id = thread_id self.frame_id = frame_id self.expression = expression def do_it(self, dbg): """ Converts request into python variable """ try: try: #don't trace new threads created by console command disable_trace_thread_modules() result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression) xml = "<xml>" xml += pydevd_vars.var_to_xml(result, "") xml += "</xml>" cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc) dbg.writer.add_command(cmd) finally: enable_trace_thread_modules() sys.stderr.flush() sys.stdout.flush() #======================================================================================================================= # pydevd_find_thread_by_id #======================================================================================================================= def pydevd_find_thread_by_id(thread_id): try: # there was a deadlock here when I did not remove the tracing function when thread was dead threads = threading.enumerate() for i in threads: tid = get_thread_id(i) if thread_id == tid or thread_id.endswith('|' + tid): return i sys.stderr.write("Could not find thread %s\n" % thread_id) sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads]) sys.stderr.flush() except: traceback.print_exc() return None
mrknow/filmkodi
plugin.video.mrknow/mylib/_pydevd_bundle/pydevd_comm.py
Python
apache-2.0
57,299
import select import six import unittest from kazoo.client import KazooClient, KazooState from kazoo.exceptions import NoNodeError, NodeExistsError from kazoo.handlers.threading import SequentialThreadingHandler from kazoo.protocol.states import KeeperState, ZnodeStat from mock import Mock, PropertyMock, patch from patroni.dcs.zookeeper import Cluster, Leader, PatroniKazooClient,\ PatroniSequentialThreadingHandler, ZooKeeper, ZooKeeperError class MockKazooClient(Mock): leader = False exists = True def __init__(self, *args, **kwargs): super(MockKazooClient, self).__init__() self._session_timeout = 30000 @property def client_id(self): return (-1, '') @staticmethod def retry(func, *args, **kwargs): return func(*args, **kwargs) def get(self, path, watch=None): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if path == '/broken/status': return (b'{', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) elif path in ('/no_node', '/legacy/status'): raise NoNodeError elif '/members/' in path: return ( b'postgres://repuser:rep-pass@localhost:5434/postgres?application_name=http://127.0.0.1:8009/patroni', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0 if self.exists else -1, 0, 0, 0) ) elif path.endswith('/optime/leader'): return (b'500', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) elif path.endswith('/leader'): if self.leader: return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) elif path.endswith('/initialize'): return (b'foo', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) elif path.endswith('/status'): return (b'{"optime":500,"slots":{"ls":1234567}}', ZnodeStat(0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0)) return (b'', ZnodeStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) @staticmethod def get_children(path, watch=None, include_data=False): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if path.startswith('/no_node'): raise NoNodeError elif path in ['/service/bla/', '/service/test/']: return ['initialize', 'leader', 'members', 'optime', 'failover', 'sync'] return ['foo', 'bar', 'buzz'] def create(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if not isinstance(value, (six.binary_type,)): raise TypeError("Invalid type for 'value' (must be a byte string)") if b'Exception' in value: raise Exception if path.endswith('/initialize') or path == '/service/test/optime/leader': raise Exception elif b'retry' in value or (b'exists' in value and self.exists): raise NodeExistsError def create_async(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False): return self.create(path, value, acl, ephemeral, sequence, makepath) or Mock() @staticmethod def set(path, value, version=-1): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") if not isinstance(value, (six.binary_type,)): raise TypeError("Invalid type for 'value' (must be a byte string)") if path == '/service/bla/optime/leader': raise Exception if path == '/service/test/members/bar' and b'retry' in value: return if path in ('/service/test/failover', '/service/test/config', '/service/test/sync'): if b'Exception' in value: raise Exception elif value == b'ok': return raise NoNodeError def set_async(self, path, value, version=-1): return self.set(path, value, version) or Mock() def delete(self, path, version=-1, recursive=False): if not isinstance(path, six.string_types): raise TypeError("Invalid type for 'path' (string expected)") self.exists = False if path == '/service/test/leader': self.leader = True raise Exception elif path == '/service/test/members/buzz': raise Exception elif path.endswith('/') or path.endswith('/initialize') or path == '/service/test/members/bar': raise NoNodeError def delete_async(self, path, version=-1, recursive=False): return self.delete(path, version, recursive) or Mock() class TestPatroniSequentialThreadingHandler(unittest.TestCase): def setUp(self): self.handler = PatroniSequentialThreadingHandler(10) @patch.object(SequentialThreadingHandler, 'create_connection', Mock()) def test_create_connection(self): self.assertIsNotNone(self.handler.create_connection(())) self.assertIsNotNone(self.handler.create_connection((), 40)) self.assertIsNotNone(self.handler.create_connection(timeout=40)) @patch.object(SequentialThreadingHandler, 'select', Mock(side_effect=ValueError)) def test_select(self): self.assertRaises(select.error, self.handler.select) class TestPatroniKazooClient(unittest.TestCase): def test__call(self): c = PatroniKazooClient() with patch.object(KazooClient, '_call', Mock()): self.assertIsNotNone(c._call(None, Mock())) c._state = KeeperState.CONNECTING self.assertFalse(c._call(None, Mock())) class TestZooKeeper(unittest.TestCase): @patch('patroni.dcs.zookeeper.PatroniKazooClient', MockKazooClient) def setUp(self): self.zk = ZooKeeper({'hosts': ['localhost:2181'], 'scope': 'test', 'name': 'foo', 'ttl': 30, 'retry_timeout': 10, 'loop_wait': 10, 'set_acls': {'CN=principal2': ['ALL']}}) def test_session_listener(self): self.zk.session_listener(KazooState.SUSPENDED) def test_reload_config(self): self.zk.reload_config({'ttl': 20, 'retry_timeout': 10, 'loop_wait': 10}) self.zk.reload_config({'ttl': 20, 'retry_timeout': 10, 'loop_wait': 5}) def test_get_node(self): self.assertIsNone(self.zk.get_node('/no_node')) def test_get_children(self): self.assertListEqual(self.zk.get_children('/no_node'), []) def test__inner_load_cluster(self): self.zk._base_path = self.zk._base_path.replace('test', 'bla') self.zk._inner_load_cluster() self.zk._base_path = self.zk._base_path = '/broken' self.zk._inner_load_cluster() self.zk._base_path = self.zk._base_path = '/legacy' self.zk._inner_load_cluster() self.zk._base_path = self.zk._base_path = '/no_node' self.zk._inner_load_cluster() def test_get_cluster(self): self.assertRaises(ZooKeeperError, self.zk.get_cluster) cluster = self.zk.get_cluster(True) self.assertIsInstance(cluster.leader, Leader) self.zk.status_watcher(None) self.zk.get_cluster() self.zk.touch_member({'foo': 'foo'}) self.zk._name = 'bar' self.zk.status_watcher(None) with patch.object(ZooKeeper, 'get_node', Mock(side_effect=Exception)): self.zk.get_cluster() cluster = self.zk.get_cluster() self.assertEqual(cluster.last_lsn, 500) def test_delete_leader(self): self.assertTrue(self.zk.delete_leader()) def test_set_failover_value(self): self.zk.set_failover_value('') self.zk.set_failover_value('ok') self.zk.set_failover_value('Exception') def test_set_config_value(self): self.zk.set_config_value('', 1) self.zk.set_config_value('ok') self.zk.set_config_value('Exception') def test_initialize(self): self.assertFalse(self.zk.initialize()) def test_cancel_initialization(self): self.zk.cancel_initialization() def test_touch_member(self): self.zk._name = 'buzz' self.zk.get_cluster() self.zk.touch_member({'new': 'new'}) self.zk._name = 'bar' self.zk.touch_member({'new': 'new'}) self.zk._name = 'na' self.zk._client.exists = 1 self.zk.touch_member({'Exception': 'Exception'}) self.zk._name = 'bar' self.zk.touch_member({'retry': 'retry'}) self.zk._fetch_cluster = True self.zk.get_cluster() self.zk.touch_member({'retry': 'retry'}) self.zk.touch_member({'conn_url': 'postgres://repuser:rep-pass@localhost:5434/postgres', 'api_url': 'http://127.0.0.1:8009/patroni'}) def test_take_leader(self): self.zk.take_leader() with patch.object(MockKazooClient, 'create', Mock(side_effect=Exception)): self.zk.take_leader() def test_update_leader(self): self.assertTrue(self.zk.update_leader(12345)) @patch.object(Cluster, 'min_version', PropertyMock(return_value=(2, 0))) def test_write_leader_optime(self): self.zk.last_lsn = '0' self.zk.write_leader_optime('1') with patch.object(MockKazooClient, 'create_async', Mock()): self.zk.write_leader_optime('1') with patch.object(MockKazooClient, 'set_async', Mock()): self.zk.write_leader_optime('2') self.zk._base_path = self.zk._base_path.replace('test', 'bla') self.zk.get_cluster() self.zk.write_leader_optime('3') def test_delete_cluster(self): self.assertTrue(self.zk.delete_cluster()) def test_watch(self): self.zk.watch(None, 0) self.zk.event.is_set = Mock(return_value=True) self.zk._fetch_status = False self.zk.watch(None, 0) def test__kazoo_connect(self): self.zk._client._retry.deadline = 1 self.zk._orig_kazoo_connect = Mock(return_value=(0, 0)) self.zk._kazoo_connect(None, None) def test_sync_state(self): self.zk.set_sync_state_value('') self.zk.set_sync_state_value('ok') self.zk.set_sync_state_value('Exception') self.zk.delete_sync_state() def test_set_history_value(self): self.zk.set_history_value('{}')
zalando/patroni
tests/test_zookeeper.py
Python
mit
10,527
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # __all__ = [ 'HydrogenBondAnalysis', 'WaterBridgeAnalysis', 'HydrogenBondAutoCorrel', 'find_hydrogen_donors' ] from .hbond_analysis import HydrogenBondAnalysis from .wbridge_analysis import WaterBridgeAnalysis from .hbond_autocorrel import HydrogenBondAutoCorrel, find_hydrogen_donors
MDAnalysis/mdanalysis
package/MDAnalysis/analysis/hydrogenbonds/__init__.py
Python
gpl-2.0
1,353
# -*- coding: utf-8 -*- # Copyright(C) 2016 Julien Veyssier # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. from weboob.capabilities.lyrics import CapLyrics, SongLyrics from weboob.tools.backend import Module from .browser import ParolesmusiqueBrowser __all__ = ['ParolesmusiqueModule'] class ParolesmusiqueModule(Module, CapLyrics): NAME = 'parolesmusique' MAINTAINER = u'Julien Veyssier' EMAIL = 'eneiluj@gmx.fr' VERSION = '2.1' DESCRIPTION = 'paroles-musique lyrics website' LICENSE = 'AGPLv3+' BROWSER = ParolesmusiqueBrowser def get_lyrics(self, id): return self.browser.get_lyrics(id) def iter_lyrics(self, criteria, pattern): return self.browser.iter_lyrics(criteria, pattern.encode('utf-8')) def fill_songlyrics(self, songlyrics, fields): if 'content' in fields: sl = self.get_lyrics(songlyrics.id) songlyrics.content = sl.content return songlyrics OBJECTS = { SongLyrics: fill_songlyrics }
laurentb/weboob
modules/parolesmusique/module.py
Python
lgpl-3.0
1,688
import _plotly_utils.basevalidators class ColorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__( self, plotly_name="color", parent_name="surface.hoverlabel.font", **kwargs ): super(ColorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "none"), **kwargs )
plotly/plotly.py
packages/python/plotly/plotly/validators/surface/hoverlabel/font/_color.py
Python
mit
470
import warnings import chainer import numpy as np import onnx from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE from onnx_chainer.functions.opset_version import support from onnx_chainer import onnx_helper TENSOR_TYPE_TO_NAME = { 0: 'UNDEFINED', 1: 'FLOAT', 2: 'UINT8', 3: 'INT8', 4: 'UINT16', 5: 'INT16', 6: 'INT32', 7: 'INT64', 8: 'STRING', 9: 'BOOL', 10: 'FLOAT16', 11: 'DOUBLE', 12: 'UINT32', 13: 'UINT64', 14: 'COMPLEX64', 15: 'COMPLEX128', } @support((1, 6)) def convert_Cast(func, opset_version, input_names, output_names, context): typ = func.type if isinstance(func.type, np.dtype) else np.dtype(func.type) if opset_version == 1: return onnx_helper.make_node( 'Cast', input_names, output_names, to=TENSOR_TYPE_TO_NAME[NP_TYPE_TO_TENSOR_TYPE[typ]] ), elif opset_version == 6: return onnx_helper.make_node( 'Cast', input_names, output_names, to=NP_TYPE_TO_TENSOR_TYPE[typ] ), @support((1, 4)) def convert_Concat(func, opset_version, input_names, output_names, context): if opset_version == 1: return onnx_helper.make_node( 'Concat', input_names, output_names, axis=func.axis ), elif opset_version == 4: return onnx_helper.make_node( 'Concat', input_names, output_names, axis=func.axis ), def convert_Copy(func, opset_version, input_names, output_names, context): return onnx_helper.make_node( 'Identity', input_names, output_names ), def convert_Depth2Space( func, opset_version, input_names, output_names, context): return onnx_helper.make_node( 'DepthToSpace', input_names, output_names, blocksize=func.r ), def get_slice_node( gb, opset_version, context, input_names, axes, starts, ends, steps): if opset_version < 11 and any([i != 1 for i in steps]): raise ValueError( 'GetItem with n-step slicing is supported from opset11, ' 'opset{} is not supported'.format(opset_version)) if opset_version < 10: return gb.op( 'Slice', input_names, axes=axes, starts=starts, ends=ends) else: inputs = [('starts', starts), ('ends', ends), ('axes', axes)] if opset_version > 10: inputs.append(('steps', steps)) for name, values in inputs: param_name = context.add_const( np.asarray(list(values), dtype=np.int64), name) input_names.append(param_name) return gb.op('Slice', input_names) def _to_ndarray(x, dtype=np.int64): if isinstance(x, list): return np.array(x, dtype=dtype) else: return chainer.cuda.to_cpu(x).astype(dtype) @support((1, 10, 11)) def convert_GetItem(func, opset_version, input_names, output_names, context): x = func.inputs[0] axes, starts, ends, steps = [], [], [], [] squeeze_idxs, unsqueeze_idxs = [], [] skipped = 0 # when set ellipsis, need to skip index rolling prev_gathered_axis = -1 gather_axis = -1 gather_idx = None # when GatherND, set first array for broadcasting gather_nd_idx = None is_used_slice_whole = False # GatherND does not support axis, need to care for i, idx in enumerate(func.slices): # axis means the index of input x, adjust None and Ellipsis counts axis = i - len(unsqueeze_idxs) + skipped if isinstance(idx, slice): if idx.start is None and idx.stop is None and idx.step is None: is_used_slice_whole = True continue axes.append(axis) step = 1 if idx.step is None else idx.step steps.append(step) if step < 0: starts.append( np.iinfo(np.int64).max if idx.start is None else idx.start) ends.append( np.iinfo(np.int64).min if idx.stop is None else idx.stop) else: starts.append(0 if idx.start is None else idx.start) ends.append( np.iinfo(np.int64).max if idx.stop is None else idx.stop) elif isinstance(idx, int): axes.append(axis) steps.append(1) if idx == -1: starts.append(idx) ends.append(np.iinfo(np.int64).max) else: starts.append(idx) ends.append(idx+1) squeeze_idxs.append(axis) elif isinstance(idx, np.ndarray) and idx.ndim == 0: scalar_idx = idx.item() axes.append(axis) starts.append(scalar_idx) ends.append(scalar_idx+1) steps.append(1) squeeze_idxs.append(axis) elif idx is None: unsqueeze_idxs.append(i - len(squeeze_idxs) + skipped) elif idx is Ellipsis: # calculate rest slice number except None, GetItem does not allow # multiple Ellipsis, so ignore latter Ellipsis count rest_slice_len = len( [idx_ for idx_ in func.slices[i+1:] if idx_ is not None]) assert skipped == 0 skipped = len(x.shape) - axis - rest_slice_len - 1 elif isinstance(idx, (list,) + chainer.get_array_types()): if prev_gathered_axis >= 0: if (i - 1) != prev_gathered_axis: raise ValueError( 'ONNX-Chainer does not support non-consecutive' 'multiple advanced indexing') if is_used_slice_whole: raise ValueError( 'ONNX-Chainer does not support whole indexing(`[:]`)' 'in front of multiple advanced indexing') if unsqueeze_idxs: raise ValueError( 'ONNX-Chainer does not support new axis in front of ' 'multiple advanced indexing') # multiple advanced index, convert to GatherND idx_array = _to_ndarray(idx) base_idx = gather_idx if gather_nd_idx is None else\ gather_nd_idx gather_nd_idx = np.vstack((base_idx, idx_array)) prev_gathered_axis = i else: # convert to Gather, if next index is also list, change to # GatherND gather_axis = axis - len(squeeze_idxs) + len(unsqueeze_idxs) gather_idx = _to_ndarray(idx) prev_gathered_axis = i else: raise ValueError( 'GetItem with type {} cannot handle in ONNX Slice, so that ' 'ONNX-Chainer does not accept the type'.format(type(idx))) gb = onnx_helper.GraphBuilder() slice_output = input_names if axes: output = get_slice_node( gb, opset_version, context, slice_output, axes, starts, ends, steps) slice_output = [output] if squeeze_idxs: output = gb.op('Squeeze', slice_output, axes=squeeze_idxs) slice_output = [output] if unsqueeze_idxs: output = gb.op('Unsqueeze', slice_output, axes=unsqueeze_idxs) slice_output = [output] if gather_nd_idx is not None: if opset_version < 11: raise ValueError( 'ONNX-Chainer supports multiple advanced indexing from opset11' ', opset{} is not supported'.format(opset_version)) gather_nd_idx_name = context.add_const(gather_nd_idx.T, 'indices') slice_output.append(gather_nd_idx_name) gb.op('GatherND', slice_output) elif gather_idx is not None: gather_idx_name = context.add_const(gather_idx, 'indices') slice_output.append(gather_idx_name) gb.op('Gather', slice_output, axis=gather_axis) return gb.nodes(output_names=output_names) @support((9, 11)) def convert_SelectItem(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() if opset_version >= 11: t = gb.op('Unsqueeze', [input_names[1]], axes=[1]) out = gb.op('GatherElements', [input_names[0], t], axis=1) gb.op('Squeeze', [out], axes=[1]) else: data, target_idxs = input_names target_idxs = gb.op('Cast', [target_idxs], to=NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]) n_rows = gb.op('Shape', [target_idxs]) # This is an equivalent of using Range. one_1 = onnx.helper.make_tensor( 'one_1', onnx.TensorProto.FLOAT, [1], [1]) ones = gb.op('ConstantOfShape', [n_rows], value=one_1) row_idxs = gb.op('Squeeze', [gb.op('NonZero', [ones])]) data_shape = gb.op('Shape', [data]) one_2 = context.add_const(np.array([1]), 'one_2') n_cols = gb.op('Gather', [data_shape, one_2], axis=0) data = gb.op('Squeeze', [gb.op('Flatten', [data], axis=2)]) target_idxs = gb.op( 'Add', [target_idxs, gb.op('Mul', [row_idxs, n_cols])]) gb.op('Gather', [data, target_idxs], axis=0) return gb.nodes(output_names) @support((1, 2, 11)) def convert_Pad(func, opset_version, input_names, output_names, context): if func.mode not in ['constant', 'reflect', 'edge']: raise ValueError( '{} mode is not supported in ONNX\'s Pad operation'.format( func.mode)) pad_begin = [] pad_end = [] pad_bw = func.pad_bw if pad_bw.ndim == 1: pad_bw = np.tile(pad_bw, (len(func.inputs[0].shape), 1)) for pp in pad_bw.tolist(): pad_begin.append(pp[0]) pad_end.append(pp[1]) pad = pad_begin + pad_end constant_value = func.keywords.get('constant_values', None) if constant_value is not None: # 'constant_values' only accepts int or array-like on Chainer if not isinstance(constant_value, int) and len(constant_value) > 1: raise ValueError( 'ONNX doesn\'t support multiple constant values for Pad ' 'operation') elif not isinstance(constant_value, int): constant_value = float(constant_value[0]) else: constant_value = float(constant_value) if opset_version == 1: kwargs = { 'mode': func.mode, 'paddings': pad, } if constant_value is not None: kwargs['value'] = constant_value elif opset_version == 2: kwargs = { 'mode': func.mode, 'pads': pad, } if constant_value is not None: kwargs['value'] = constant_value elif opset_version == 11: pads_name = context.add_const(np.array(pad, dtype=np.int64), 'pads') input_names.append(pads_name) if constant_value is not None: constant_value_name = context.add_const( np.array(constant_value, dtype=np.float32), 'constant_value') input_names.append(constant_value_name) kwargs = {'mode': func.mode} return onnx_helper.make_node('Pad', input_names, output_names, **kwargs), @support((9, 11)) def convert_Permutate(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() indices_name = context.get_name(func.indices) if func.inv: empty = context.add_const( np.zeros(dtype=np.int64, shape=func.indices.shape), 'empty') r = context.add_const(np.arange(len(func.indices), dtype=np.int64), 'range') op = 'ScatterElements' if opset_version == 11 else 'Scatter' indices_name = gb.op(op, [empty, indices_name, r]) input_names.append(indices_name) gb.op_output_named('Gather', input_names, output_names, axis=func.axis) return gb.nodes() @support((1, 5)) def convert_Reshape(func, opset_version, input_names, output_names, context): if opset_version == 1: return onnx_helper.make_node( 'Reshape', input_names, output_names, shape=func.shape ), elif opset_version == 5: if hasattr(func, 'shape'): # if the function has shape parameter, means not dynamic assert len(input_names) == 1 shape_name = context.add_const( np.asarray(list(func.shape), dtype=np.int64), 'shape') input_names.append(shape_name) else: if len(input_names) != 2: raise ValueError('shape must be set as parameter or 2nd input') return onnx_helper.make_node( 'Reshape', input_names, output_names, ), def convert_Space2Depth( func, opset_version, input_names, output_names, context): return onnx_helper.make_node( 'SpaceToDepth', input_names, output_names, blocksize=func.r ), @support((1, 2)) def convert_SplitAxis(func, opset_version, input_names, output_names, context): if func.indices is not None: indices_or_sections = func.indices else: indices_or_sections = func.sections total = func.inputs[0].shape[func.axis] if hasattr(indices_or_sections, '__iter__'): split = [] prev_i = 0 for i in indices_or_sections: split.append(i - prev_i) prev_i = i split.append(total - prev_i) else: length = total // indices_or_sections split = [length for _ in range(indices_or_sections)] assert len(output_names) == len(split) if opset_version == 1: return onnx_helper.make_node( 'Split', input_names, output_names, axis=func.axis, split=split ), elif opset_version == 2: return onnx_helper.make_node( 'Split', input_names, output_names, axis=func.axis, split=split ), def convert_Squeeze(func, opset_version, input_names, output_names, context): if func.axis is None: axis = [] for i, s in enumerate(func.inputs[0].shape): if s == 1: axis.append(i) else: axis = func.axis return onnx_helper.make_node( 'Squeeze', input_names, output_names, axes=axis ), def convert_Swapaxes(func, opset_version, input_names, output_names, context): perm = list(range(len(func.inputs[0].shape))) perm[func.axis1], perm[func.axis2] = perm[func.axis2], perm[func.axis1] return onnx_helper.make_node( 'Transpose', input_names, output_names, perm=perm ), @support((1, 6)) def convert_Tile(func, opset_version, input_names, output_names, context): # Add tiles and axis to graph if isinstance(func.reps, int): func.reps = [func.reps] tiles_name = context.add_const( np.asarray(func.reps, dtype=np.int64), 'tiles') input_names.append(tiles_name) # In operater version = 1, axis also should be given if opset_version == 1: axis_name = context.add_const( np.array([i for i, _ in enumerate(func.reps)], dtype=np.float32), 'axis') input_names.append(axis_name) return onnx_helper.make_node('Tile', input_names, output_names), def convert_Transpose(func, opset_version, input_names, output_names, context): if func.axes is None: node = onnx_helper.make_node('Transpose', input_names, output_names) else: node = onnx_helper.make_node( 'Transpose', input_names, output_names, perm=func.axes ) return node, def convert_ExpandDims( func, opset_version, input_names, output_names, context): axis = func.axis if axis < 0: axis = len(func.inputs[0].shape) + 1 + axis return onnx_helper.make_node( 'Unsqueeze', input_names, output_names, axes=[axis]), @support((9,)) def convert_Where(func, opset_version, input_names, output_names, context): input_names.insert(0, context.get_name(func.condition)) return onnx_helper.make_node('Where', input_names, output_names), @support((7, 9, 10, 11)) def convert_Repeat(func, opset_version, input_names, output_names, context): repeats = func.repeats if len(repeats) > 1: raise NotImplementedError( 'ONNX-Chainer currently does not support elementwise repeat') gb = onnx_helper.GraphBuilder() inputs = list(input_names) axis = func.axis if axis is None: shape_name = context.add_const(np.array([-1], dtype=np.int64), 'shape') input_names.append(shape_name) inputs = [gb.op('Reshape', input_names)] scales = [float(repeats[0])] else: scales = [1.0] * func.inputs[0].data.ndim scales[axis] = float(repeats[0]) if opset_version == 7: gb.op_output_named('Upsample', inputs, output_names, scales=scales) return gb.nodes() scales_name = context.add_const( np.array(scales, dtype=np.float32), 'scales') if opset_version in [9, 10]: inputs.append(scales_name) op = 'Upsample' if opset_version == 9 else 'Resize' gb.op_output_named(op, inputs, output_names) return gb.nodes() if opset_version == 11: roi = context.add_const(np.array([]), 'roi') inputs.extend([roi, scales_name]) gb.op_output_named('Resize', inputs, output_names) return gb.nodes() @support((7, 9, 10, 11)) def convert_ResizeImages( func, opset_version, input_names, output_names, context): warnings.warn( '`resize_images` is mapped to `Upsampling` ONNX op with bilinear ' 'interpolation. ' 'Behavior of bilinear interpolation differs from each implementation. ' 'See the issue https://github.com/chainer/onnx-chainer/issues/147 ' 'for details.', UserWarning) outsize = (func.out_H, func.out_W) h, w = func.inputs[0].shape[2:] # Compute scaling factor. # NOTE(syoyo): Despite of its name, `Upsample` onnx op will downsample # images when scale value is less than 1.0 scales = [1.0, 1.0, float(outsize[0]) / float(h), float(outsize[1]) / float(w)] if (scales[2] < 1.0e-8) and (scales[3] < 1.0e-8): raise ValueError( 'scaling factor is too small or zero. scales for h = {}, scales ' 'for w = {}'.format(scales[2], scales[3])) # resize_images in Chainer only supports bilinear interpolation # Actually this will be mapped to 'bilinear' in onnxruntime mode = 'linear' if opset_version == 7: return onnx_helper.make_node('Upsample', input_names, output_names, scales=scales, mode=mode), scales_name = context.add_const( np.array(scales, dtype=np.float32), 'scales') if opset_version in [9, 10]: input_names.append(scales_name) op = 'Upsample' if opset_version == 9 else 'Resize' return onnx_helper.make_node(op, input_names, output_names, mode=mode), if opset_version == 11: roi_name = context.add_const(np.array([]), 'roi') input_names.extend([roi_name, scales_name]) return onnx_helper.make_node( 'Resize', input_names, output_names, mode=mode), def convert_Stack(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() axis = func.axis if axis < 0: axis = len(func.inputs[0].shape) + 1 + axis # To use concat op, reshape every inputs add new axes inputs = [gb.op('Unsqueeze', [name], axes=[axis]) for name in input_names] gb.op_output_named('Concat', inputs, output_names, axis=axis) return gb.nodes() def convert_Hstack(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() input0_ndim = len(func.inputs[0].shape) inputs = input_names axis = 1 if input0_ndim == 0: inputs = [gb.op('Unsqueeze', [name], axes=[0]) for name in input_names] axis = 0 elif input0_ndim == 1: axis = 0 gb.op_output_named('Concat', inputs, output_names, axis=axis) return gb.nodes() def convert_Vstack(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() input0_ndim = len(func.inputs[0].shape) inputs = input_names if input0_ndim == 0: inputs = [gb.op('Unsqueeze', [name], axes=[0, 1]) for name in input_names] elif input0_ndim == 1: inputs = [gb.op('Unsqueeze', [name], axes=[0]) for name in input_names] gb.op_output_named('Concat', inputs, output_names, axis=0) return gb.nodes() def convert_Dstack(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() input0_ndim = len(func.inputs[0].shape) inputs = input_names if input0_ndim == 0: inputs = [gb.op('Unsqueeze', [name], axes=[0, 1, 2]) for name in input_names] elif input0_ndim == 1: inputs = [gb.op('Unsqueeze', [name], axes=[0, 2]) for name in input_names] elif input0_ndim == 2: inputs = [gb.op('Unsqueeze', [name], axes=[2]) for name in input_names] gb.op_output_named('Concat', inputs, output_names, axis=2) return gb.nodes() def convert_Separate(func, opset_version, input_names, output_names, context): gb = onnx_helper.GraphBuilder() split_outs = gb.op( 'Split', input_names, num_outputs=len(output_names), axis=func.axis) if len(output_names) == 1: split_outs = [split_outs] for i, node_name in enumerate(split_outs): gb.op_output_named( 'Squeeze', [node_name], [output_names[i]], axes=[func.axis]) return gb.nodes() def convert_Shape(func, opset_version, input_names, output_names, context): return onnx_helper.make_node('Shape', input_names, output_names), def convert_Moveaxis(func, opset_version, input_names, output_names, context): ndim = len(func.inputs[0].shape) source = [a % ndim for a in func.source] destination = [a % ndim for a in func.destination] order = [n for n in range(ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) node = onnx_helper.make_node('Transpose', input_names, output_names, perm=order) return node, def convert_Rollaxis(func, opset_version, input_names, output_names, context): ndim = len(func.inputs[0].shape) order = list(range(ndim)) order.remove(func.axis) order.insert(func.start, func.axis) node = onnx_helper.make_node('Transpose', input_names, output_names, perm=order) return node, def convert_TransposeSequence( func, opset_version, input_names, output_names, context): if any(x.shape != func.inputs[0].shape for x in func.inputs): raise ValueError( 'ONNX-Chainer can convert TransposeSequence only when all ' 'inputs have same shape') gb = onnx_helper.GraphBuilder() n = func.inputs[0].shape[0] concat_out = gb.op( 'Concat', [gb.op('Unsqueeze', [name], axes=[0]) for name in input_names], axis=0) perm = list(range(len(func.inputs[0].shape) + 1)) perm[0], perm[1] = perm[1], perm[0] transpose_out = gb.op('Transpose', [concat_out], perm=perm) split_outs = gb.op('Split', [transpose_out], axis=0, num_outputs=n) if n == 1: split_outs = [split_outs] for i, name in enumerate(split_outs): gb.op_output_named('Squeeze', [name], [output_names[i]], axes=[0]) return gb.nodes()
pfnet/chainer
onnx_chainer/functions/array.py
Python
mit
23,717
# -*- coding:utf-8 -*- from yepes.apps import apps AbstractConfiguration = apps.get_class('thumbnails.abstract_models', 'AbstractConfiguration') AbstractSource = apps.get_class('thumbnails.abstract_models', 'AbstractSource') AbstractThumbnail = apps.get_class('thumbnails.abstract_models', 'AbstractThumbnail') class Configuration(AbstractConfiguration): pass class Source(AbstractSource): pass class Thumbnail(AbstractThumbnail): pass
samuelmaudo/yepes
yepes/contrib/thumbnails/models.py
Python
bsd-3-clause
455
# -*- coding: utf-8 -*- # # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2013 Vassilii Khachaturov # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Deeper testing of some DateParser internals. """ from __future__ import print_function, unicode_literals import unittest from ...utils.grampslocale import GrampsLocale from ...lib.date import Date class DateDisplayTest(unittest.TestCase): def setUp(self): from .._datedisplay import DateDisplay self.display = DateDisplay() self.display_RU = GrampsLocale(lang='ru').date_displayer def assert_map_key_val(self, m, k, v): try: self.assertEqual(m[k], v) except KeyError: self.assertTrue(False, list(m.items())) class DateDisplayCalendarTest(DateDisplayTest): def test_calendar_gregorian_is_empty(self): self.assert_map_key_val(self.display.calendar, Date.CAL_GREGORIAN, "") def test_calendar_julian_RU(self): self.assert_map_key_val(self.display_RU.calendar, Date.CAL_JULIAN, 'юлианский') # This class tests common functionality in DateDisplay as applied to RU, # and so it is coupled to translated strings and inflection names # extracted by lexgettext from ru.po class DateDisplayInflectionsTestRU(DateDisplayTest): def setUp(self): DateDisplayTest.setUp(self) self.dd = self.display = self.display_RU self.months = self.dd._ds.long_months # TODO hardwired magic numbers! Bad API smell. self.dd.set_format(4) # day month_name year self.may = self.months[5] def assertInflectionInDate(self, inflection, date, month=None): if month is None: month = date.get_month() month_lexeme = self.months[month] self.assertIn(month_lexeme.f[inflection], self.dd.display(date)) def test_month_only_date_nominative(self): for qual in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): d1945may = Date(1945, 5, 0) d1945may.set_quality(qual) self.assertInflectionInDate('И', d1945may) def test_day_month_date_genitive(self): d1945may9 = Date(1945, 5, 9) self.assertInflectionInDate('Р', d1945may9) def test_before_month_only_date_genitive(self): d1945may = Date(1945, 5, 0) d1945may.set_modifier(Date.MOD_BEFORE) # TODO hardwired magic numbers! Bad API smell. for inflecting_format in (3,4): self.dd.set_format(inflecting_format) # this depends on the fact that in Russian the short and long forms for May # will be the same! self.assertIn("до мая", self.dd.display(d1945may)) def test_between_month_only_dates_ablative(self): b1945may_1946may = Date() b1945may_1946may.set( modifier=Date.MOD_RANGE, value=(0, 5, 1945, False, 0, 5, 1946, False)) # TODO hardwired magic numbers! Bad API smell. for inflecting_format in (3,4): self.dd.set_format(inflecting_format) # this depends on the fact that in Russian the short and long forms for May # will be the same! self.assertIn("между маем", self.dd.display(b1945may_1946may)) self.assertIn("и маем", self.dd.display(b1945may_1946may)) def test_month_only_date_span_from_genitive_to_accusative(self): f1945may_t1946may = Date() f1945may_t1946may.set( modifier=Date.MOD_SPAN, value=(0, 5, 1945, False, 0, 5, 1946, False)) # TODO hardwired magic numbers! Bad API smell. for inflecting_format in (3,4): self.dd.set_format(inflecting_format) # this depends on the fact that in Russian the short and long forms for May # will be the same! self.assertIn("с мая", self.dd.display(f1945may_t1946may)) self.assertIn("по май", self.dd.display(f1945may_t1946may)) if __name__ == "__main__": unittest.main()
pmghalvorsen/gramps_branch
gramps/gen/datehandler/test/datedisplay_test.py
Python
gpl-2.0
4,682
"""卷积操作的空间含义定义如下:如果输入数据是一个四维的 input ,卷积操作的步长stride是一个四维数组. 数据维度是 [batch, in_height, in_width, ...],卷积核也是一个四维的卷积核,数据维度是 [filter_height, filter_width, ...] ,那么: shape(output) = [batch, (in_height - filter_height + 1) / strides[1], (in_width - filter_width + 1) / strides[2], ...] ...中的数据表示通道数,例如对于图像就表示像素点的RGB值 output[b, i, j, :] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, ...] * filter[di, dj, ...] 因为,input 数据是一个四维的,每一个通道上面是一个向量 input[b, i, j, :] 。对于 conv2d ,这些向量将会被卷积核 filter[di, dj, :, :] 相乘而产生一个新的向量。 对于 depthwise_conv_2d ,每个标量分量 input[b, i, j, k] 将在 k 个通道上面独立的被卷积核 filter[di, dj, k] 进行卷积操作,然后把所有得到的向量进行连接组合成一个新的向量。""" """ def conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, data_format=None, name=None): 解释:这个函数的作用是对一个四维的输入数据 input 和四维的卷积核 filter 进行操作,然后对输入数据进行一个二维的卷积操作,最后得到卷积之后的结果。 给定的输入张量的维度是 [batch, in_height, in_width, in_channels], 卷积核张量的维度是 [filter_height, filter_width, in_channels, out_channels],参数分别表示卷积核的[高度,宽度,输入通道数.输出通道数] 具体卷积操作如下: ● 将卷积核的维度转换成一个二维的矩阵形状 [filter_height * filter_width * in_channels, output_channels] ● 对于每个批处理的图片,我们将输入张量转换成一个临时的数据维度 [batch, out_height, out_width, filter_height * filter_width * in_channels] 。 ● 对于每个批处理的图片,我们右乘以卷积核,得到最后的输出结果。 更加具体的表示细节为: output[b, i, j, k] = sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * filter[di, dj, q, k] 注意,必须有 strides[0] = strides[3] = 1。在大部分处理过程中,卷积核的水平移动步数和垂直移动步数是相同的,即 strides = [1, stride, stride, 1] 。""" import numpy as np import tensorflow as tf input_data = tf.Variable(np.random.rand(10, 6, 6, 3), dtype=np.float32) filter_data = tf.Variable(np.random.rand(2, 2, 3, 1), dtype=np.float32) y = tf.nn.conv2d(input_data, filter_data, strides=[1, 1, 1, 1], padding='SAME') with tf.Session() as sess: init = tf.initialize_all_variables() sess.run(init) print(sess.run(y)) print(sess.run(tf.shape(y))) # [10 6 6 1] """输入参数: ● input: 一个Tensor。数据类型必须是float32或者float64。表示输入. ● filter: 一个Tensor。数据类型必须是input相同,表示卷积核. ● strides: 一个长度是4的一维整数类型数组,步长.每一维度对应的是 input 中每一维的对应移动步数,比如,strides[1] 对应 input[1] 的移动步数。 ● padding: 一个字符串,取值为 SAME 或者 VALID 。 SAME:卷积输出与输入的尺寸相同,这里在计算如何跨越图像时,不用考虑滤波器的尺寸. VALID:计算卷积核如何在图像上跨越时,需要考虑滤波器的尺寸,卷积核不能超过图像的尺寸. ● use_cudnn_on_gpu: 一个可选布尔值,默认情况下是 True 。 ● name: (可选)为这个操作取一个名字。 ● data_format:用于修改输入的格式,该参数可取为"NHWC"或"NCHW",默认值是"NHWC"用于指定输入和输出数据的格式, 当取默认格式"NHWC"数据存储的顺序为[batch,in_height,in_width,in_channels] 数据格式 N:批数据中的张量数目,即batch_size H:每个批数据中张量的高度 W:每个批数据中张量的宽度 C:每个批数据中张量的通道数 输出参数: ● 一个Tensor,数据类型是 input 相同。"""
Asurada2015/TFAPI_translation
NeuralNekworks_function/Convolution/tf_nn_conv2d.py
Python
apache-2.0
4,208
master_doc = "index" extensions = ["releases"] releases_github_path = "bitprophet/releases"
bitprophet/releases
integration/_support/conf.py
Python
bsd-2-clause
92
#!/usr/bin/python2.4 # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests exercising chromiumsync and SyncDataModel.""" import pickle import unittest import autofill_specifics_pb2 import bookmark_specifics_pb2 import chromiumsync import sync_pb2 import theme_specifics_pb2 class SyncDataModelTest(unittest.TestCase): def setUp(self): self.model = chromiumsync.SyncDataModel() def AddToModel(self, proto): self.model._entries[proto.id_string] = proto def GetChangesFromTimestamp(self, requested_types, timestamp): message = sync_pb2.GetUpdatesMessage() message.from_timestamp = timestamp for data_type in requested_types: message.requested_types.Extensions[ chromiumsync.SYNC_TYPE_TO_EXTENSION[data_type]].SetInParent() return self.model.GetChanges( chromiumsync.UpdateSieve(message, self.model.migration_history)) def FindMarkerByNumber(self, markers, datatype): """Search a list of progress markers and find the one for a datatype.""" for marker in markers: if marker.data_type_id == datatype.number: return marker self.fail('Required marker not found: %s' % datatype.name) def testPermanentItemSpecs(self): specs = chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS declared_specs = set(['0']) for spec in specs: self.assertTrue(spec.parent_tag in declared_specs, 'parent tags must ' 'be declared before use') declared_specs.add(spec.tag) unique_datatypes = set([x.sync_type for x in specs]) self.assertEqual(unique_datatypes, set(chromiumsync.ALL_TYPES), 'Every sync datatype should have a permanent folder ' 'associated with it') def testSaveEntry(self): proto = sync_pb2.SyncEntity() proto.id_string = 'abcd' proto.version = 0 self.assertFalse(self.model._ItemExists(proto.id_string)) self.model._SaveEntry(proto) self.assertEqual(1, proto.version) self.assertTrue(self.model._ItemExists(proto.id_string)) self.model._SaveEntry(proto) self.assertEqual(2, proto.version) proto.version = 0 self.assertTrue(self.model._ItemExists(proto.id_string)) self.assertEqual(2, self.model._entries[proto.id_string].version) def testWritePosition(self): def MakeProto(id_string, parent, position): proto = sync_pb2.SyncEntity() proto.id_string = id_string proto.position_in_parent = position proto.parent_id_string = parent self.AddToModel(proto) MakeProto('a', 'X', 1000) MakeProto('b', 'X', 1800) MakeProto('c', 'X', 2600) MakeProto('a1', 'Z', 1007) MakeProto('a2', 'Z', 1807) MakeProto('a3', 'Z', 2607) MakeProto('s', 'Y', 10000) def AssertPositionResult(my_id, parent_id, prev_id, expected_position): entry = sync_pb2.SyncEntity() entry.id_string = my_id self.model._WritePosition(entry, parent_id, prev_id) self.assertEqual(expected_position, entry.position_in_parent) self.assertEqual(parent_id, entry.parent_id_string) self.assertFalse(entry.HasField('insert_after_item_id')) AssertPositionResult('new', 'new_parent', '', 0) AssertPositionResult('new', 'Y', '', 10000 - (2 ** 20)) AssertPositionResult('new', 'Y', 's', 10000 + (2 ** 20)) AssertPositionResult('s', 'Y', '', 10000) AssertPositionResult('s', 'Y', 's', 10000) AssertPositionResult('a1', 'Z', '', 1007) AssertPositionResult('new', 'X', '', 1000 - (2 ** 20)) AssertPositionResult('new', 'X', 'a', 1100) AssertPositionResult('new', 'X', 'b', 1900) AssertPositionResult('new', 'X', 'c', 2600 + (2 ** 20)) AssertPositionResult('a1', 'X', '', 1000 - (2 ** 20)) AssertPositionResult('a1', 'X', 'a', 1100) AssertPositionResult('a1', 'X', 'b', 1900) AssertPositionResult('a1', 'X', 'c', 2600 + (2 ** 20)) AssertPositionResult('a', 'X', '', 1000) AssertPositionResult('a', 'X', 'b', 1900) AssertPositionResult('a', 'X', 'c', 2600 + (2 ** 20)) AssertPositionResult('b', 'X', '', 1000 - (2 ** 20)) AssertPositionResult('b', 'X', 'a', 1800) AssertPositionResult('b', 'X', 'c', 2600 + (2 ** 20)) AssertPositionResult('c', 'X', '', 1000 - (2 ** 20)) AssertPositionResult('c', 'X', 'a', 1100) AssertPositionResult('c', 'X', 'b', 2600) def testCreatePermanentItems(self): self.model._CreatePermanentItems(chromiumsync.ALL_TYPES) self.assertEqual(len(chromiumsync.ALL_TYPES) + 2, len(self.model._entries)) def ExpectedPermanentItemCount(self, sync_type): if sync_type == chromiumsync.BOOKMARK: return 4 elif sync_type == chromiumsync.TOP_LEVEL: return 1 else: return 2 def testGetChangesFromTimestampZeroForEachType(self): all_types = chromiumsync.ALL_TYPES[1:] for sync_type in all_types: self.model = chromiumsync.SyncDataModel() request_types = [sync_type] version, changes, remaining = ( self.GetChangesFromTimestamp(request_types, 0)) expected_count = self.ExpectedPermanentItemCount(sync_type) self.assertEqual(expected_count, version) self.assertEqual(expected_count, len(changes)) self.assertEqual('google_chrome', changes[0].server_defined_unique_tag) for change in changes: self.assertTrue(change.HasField('server_defined_unique_tag')) self.assertEqual(change.version, change.sync_timestamp) self.assertTrue(change.version <= version) # Test idempotence: another GetUpdates from ts=0 shouldn't recreate. version, changes, remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(expected_count, version) self.assertEqual(expected_count, len(changes)) self.assertEqual(0, remaining) # Doing a wider GetUpdates from timestamp zero shouldn't recreate either. new_version, changes, remaining = ( self.GetChangesFromTimestamp(all_types, 0)) self.assertEqual(len(chromiumsync.SyncDataModel._PERMANENT_ITEM_SPECS), new_version) self.assertEqual(new_version, len(changes)) self.assertEqual(0, remaining) version, changes, remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(new_version, version) self.assertEqual(expected_count, len(changes)) self.assertEqual(0, remaining) def testBatchSize(self): for sync_type in chromiumsync.ALL_TYPES[1:]: specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type) self.model = chromiumsync.SyncDataModel() request_types = [sync_type] for i in range(self.model._BATCH_SIZE*3): entry = sync_pb2.SyncEntity() entry.id_string = 'batch test %d' % i entry.specifics.CopyFrom(specifics) self.model._SaveEntry(entry) last_bit = self.ExpectedPermanentItemCount(sync_type) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(self.model._BATCH_SIZE, version) self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE*2, version) self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE*3, version) self.assertEqual(last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE*3 + last_bit, version) self.assertEqual(0, changes_remaining) # Now delete a third of the items. for i in xrange(self.model._BATCH_SIZE*3 - 1, 0, -3): entry = sync_pb2.SyncEntity() entry.id_string = 'batch test %d' % i entry.deleted = True self.model._SaveEntry(entry) # The batch counts shouldn't change. version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, 0)) self.assertEqual(self.model._BATCH_SIZE, len(changes)) self.assertEqual(self.model._BATCH_SIZE*2 + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE, len(changes)) self.assertEqual(self.model._BATCH_SIZE + last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(self.model._BATCH_SIZE, len(changes)) self.assertEqual(last_bit, changes_remaining) version, changes, changes_remaining = ( self.GetChangesFromTimestamp(request_types, version)) self.assertEqual(last_bit, len(changes)) self.assertEqual(self.model._BATCH_SIZE*4 + last_bit, version) self.assertEqual(0, changes_remaining) def testCommitEachDataType(self): for sync_type in chromiumsync.ALL_TYPES[1:]: specifics = chromiumsync.GetDefaultEntitySpecifics(sync_type) self.model = chromiumsync.SyncDataModel() my_cache_guid = '112358132134' parent = 'foobar' commit_session = {} # Start with a GetUpdates from timestamp 0, to populate permanent items. original_version, original_changes, changes_remaining = ( self.GetChangesFromTimestamp([sync_type], 0)) def DoCommit(original=None, id_string='', name=None, parent=None, prev=None): proto = sync_pb2.SyncEntity() if original is not None: proto.version = original.version proto.id_string = original.id_string proto.parent_id_string = original.parent_id_string proto.name = original.name else: proto.id_string = id_string proto.version = 0 proto.specifics.CopyFrom(specifics) if name is not None: proto.name = name if parent: proto.parent_id_string = parent.id_string if prev: proto.insert_after_item_id = prev.id_string else: proto.insert_after_item_id = '' proto.folder = True proto.deleted = False result = self.model.CommitEntry(proto, my_cache_guid, commit_session) self.assertTrue(result) return (proto, result) # Commit a new item. proto1, result1 = DoCommit(name='namae', id_string='Foo', parent=original_changes[-1]) # Commit an item whose parent is another item (referenced via the # pre-commit ID). proto2, result2 = DoCommit(name='Secondo', id_string='Bar', parent=proto1) # Commit a sibling of the second item. proto3, result3 = DoCommit(name='Third!', id_string='Baz', parent=proto1, prev=proto2) self.assertEqual(3, len(commit_session)) for p, r in [(proto1, result1), (proto2, result2), (proto3, result3)]: self.assertNotEqual(r.id_string, p.id_string) self.assertEqual(r.originator_client_item_id, p.id_string) self.assertEqual(r.originator_cache_guid, my_cache_guid) self.assertTrue(r is not self.model._entries[r.id_string], "Commit result didn't make a defensive copy.") self.assertTrue(p is not self.model._entries[r.id_string], "Commit result didn't make a defensive copy.") self.assertEqual(commit_session.get(p.id_string), r.id_string) self.assertTrue(r.version > original_version) self.assertEqual(result1.parent_id_string, proto1.parent_id_string) self.assertEqual(result2.parent_id_string, result1.id_string) version, changes, remaining = ( self.GetChangesFromTimestamp([sync_type], original_version)) self.assertEqual(3, len(changes)) self.assertEqual(0, remaining) self.assertEqual(original_version + 3, version) self.assertEqual([result1, result2, result3], changes) for c in changes: self.assertTrue(c is not self.model._entries[c.id_string], "GetChanges didn't make a defensive copy.") self.assertTrue(result2.position_in_parent < result3.position_in_parent) self.assertEqual(0, result2.position_in_parent) # Now update the items so that the second item is the parent of the # first; with the first sandwiched between two new items (4 and 5). # Do this in a new commit session, meaning we'll reference items from # the first batch by their post-commit, server IDs. commit_session = {} old_cache_guid = my_cache_guid my_cache_guid = 'A different GUID' proto2b, result2b = DoCommit(original=result2, parent=original_changes[-1]) proto4, result4 = DoCommit(id_string='ID4', name='Four', parent=result2, prev=None) proto1b, result1b = DoCommit(original=result1, parent=result2, prev=proto4) proto5, result5 = DoCommit(id_string='ID5', name='Five', parent=result2, prev=result1) self.assertEqual(2, len(commit_session), 'Only new items in second ' 'batch should be in the session') for p, r, original in [(proto2b, result2b, proto2), (proto4, result4, proto4), (proto1b, result1b, proto1), (proto5, result5, proto5)]: self.assertEqual(r.originator_client_item_id, original.id_string) if original is not p: self.assertEqual(r.id_string, p.id_string, 'Ids should be stable after first commit') self.assertEqual(r.originator_cache_guid, old_cache_guid) else: self.assertNotEqual(r.id_string, p.id_string) self.assertEqual(r.originator_cache_guid, my_cache_guid) self.assertEqual(commit_session.get(p.id_string), r.id_string) self.assertTrue(r is not self.model._entries[r.id_string], "Commit result didn't make a defensive copy.") self.assertTrue(p is not self.model._entries[r.id_string], "Commit didn't make a defensive copy.") self.assertTrue(r.version > p.version) version, changes, remaining = ( self.GetChangesFromTimestamp([sync_type], original_version)) self.assertEqual(5, len(changes)) self.assertEqual(0, remaining) self.assertEqual(original_version + 7, version) self.assertEqual([result3, result2b, result4, result1b, result5], changes) for c in changes: self.assertTrue(c is not self.model._entries[c.id_string], "GetChanges didn't make a defensive copy.") self.assertTrue(result4.parent_id_string == result1b.parent_id_string == result5.parent_id_string == result2b.id_string) self.assertTrue(result4.position_in_parent < result1b.position_in_parent < result5.position_in_parent) def testUpdateSieve(self): # from_timestamp, legacy mode autofill = autofill_specifics_pb2.autofill theme = theme_specifics_pb2.theme msg = sync_pb2.GetUpdatesMessage() msg.from_timestamp = 15412 msg.requested_types.Extensions[autofill].SetInParent() msg.requested_types.Extensions[theme].SetInParent() sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15412, chromiumsync.AUTOFILL: 15412, chromiumsync.THEME: 15412}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15412, response) self.assertEqual(0, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15413, response) self.assertEqual(0, len(response.new_progress_marker)) self.assertTrue(response.HasField('new_timestamp')) self.assertEqual(15413, response.new_timestamp) # Existing tokens msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 1)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15412, chromiumsync.AUTOFILL: 15412, chromiumsync.THEME: 15413}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15413, response) self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = response.new_progress_marker[0] self.assertEqual(marker.data_type_id, autofill.number) self.assertEqual(pickle.loads(marker.token), (15413, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) # Empty tokens indicating from timestamp = 0 msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((412, 1)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = '' sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 0, chromiumsync.AUTOFILL: 412, chromiumsync.THEME: 0}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(1, response) self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = response.new_progress_marker[0] self.assertEqual(marker.data_type_id, theme.number) self.assertEqual(pickle.loads(marker.token), (1, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(412, response) self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = response.new_progress_marker[0] self.assertEqual(marker.data_type_id, theme.number) self.assertEqual(pickle.loads(marker.token), (412, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(413, response) self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (413, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (413, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) # Migration token timestamps (client gives timestamp, server returns token) # These are for migrating from the old 'timestamp' protocol to the # progressmarker protocol, and have nothing to do with the MIGRATION_DONE # error code. msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.timestamp_token_for_migration = 15213 marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.timestamp_token_for_migration = 15211 sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15211, chromiumsync.AUTOFILL: 15213, chromiumsync.THEME: 15211}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(16000, response) # There were updates self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (16000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (16000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.timestamp_token_for_migration = 3000 marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.timestamp_token_for_migration = 3000 sieve = chromiumsync.UpdateSieve(msg) self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 3000, chromiumsync.AUTOFILL: 3000, chromiumsync.THEME: 3000}) response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(3000, response) # Already up to date self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (3000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (3000, 1)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) def testUpdateSieveStoreMigration(self): autofill = autofill_specifics_pb2.autofill theme = theme_specifics_pb2.theme migrator = chromiumsync.MigrationHistory() msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 1)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() migrator.Bump([chromiumsync.BOOKMARK, chromiumsync.PASSWORD]) # v=2 sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() self.assertEqual(sieve._state, {chromiumsync.TOP_LEVEL: 15412, chromiumsync.AUTOFILL: 15412, chromiumsync.THEME: 15413}) migrator.Bump([chromiumsync.AUTOFILL, chromiumsync.PASSWORD]) # v=3 sieve = chromiumsync.UpdateSieve(msg, migrator) try: sieve.CheckMigrationState() self.fail('Should have raised.') except chromiumsync.MigrationDoneError, error: # We want this to happen. self.assertEqual([chromiumsync.AUTOFILL], error.datatypes) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = '' marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15412, response) # There were updates self.assertEqual(1, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (15412, 3)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 3)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() migrator.Bump([chromiumsync.THEME, chromiumsync.AUTOFILL]) # v=4 migrator.Bump([chromiumsync.AUTOFILL]) # v=5 sieve = chromiumsync.UpdateSieve(msg, migrator) try: sieve.CheckMigrationState() self.fail("Should have raised.") except chromiumsync.MigrationDoneError, error: # We want this to happen. self.assertEqual(set([chromiumsync.THEME, chromiumsync.AUTOFILL]), set(error.datatypes)) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = '' marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 1)) sieve = chromiumsync.UpdateSieve(msg, migrator) try: sieve.CheckMigrationState() self.fail("Should have raised.") except chromiumsync.MigrationDoneError, error: # We want this to happen. self.assertEqual([chromiumsync.THEME], error.datatypes) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = '' marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = '' sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() response = sync_pb2.GetUpdatesResponse() sieve.SaveProgress(15412, response) # There were updates self.assertEqual(2, len(response.new_progress_marker)) self.assertFalse(response.HasField('new_timestamp')) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, autofill) self.assertEqual(pickle.loads(marker.token), (15412, 5)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) marker = self.FindMarkerByNumber(response.new_progress_marker, theme) self.assertEqual(pickle.loads(marker.token), (15412, 4)) self.assertFalse(marker.HasField('timestamp_token_for_migration')) msg = sync_pb2.GetUpdatesMessage() marker = msg.from_progress_marker.add() marker.data_type_id = autofill.number marker.token = pickle.dumps((15412, 5)) marker = msg.from_progress_marker.add() marker.data_type_id = theme.number marker.token = pickle.dumps((15413, 4)) sieve = chromiumsync.UpdateSieve(msg, migrator) sieve.CheckMigrationState() if __name__ == '__main__': unittest.main()
Crystalnix/house-of-life-chromium
net/tools/testserver/chromiumsync_test.py
Python
bsd-3-clause
26,921
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-13 16:28 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20171112_1806'), ] operations = [ migrations.AddField( model_name='user', name='is_verified', field=models.BooleanField(default=False, verbose_name='Is Verified'), ), migrations.AddField( model_name='user', name='profile_image', field=models.ImageField(null=True, upload_to='profile_images/%Y/%m/%d', verbose_name='Profile Image'), ), migrations.AlterField( model_name='user', name='facebook_token', field=models.TextField(blank=True, verbose_name='Facebook Token'), ), ]
we-inc/mms-snow-white-and-the-seven-pandas
webserver/apps/users/migrations/0004_auto_20171113_2328.py
Python
mit
918
## begin license ## # # "Meresco Lucene" is a set of components and tools to integrate Lucene into Meresco # # Copyright (C) 2013-2016, 2019, 2021 Seecr (Seek You Too B.V.) https://seecr.nl # Copyright (C) 2013-2014 Stichting Bibliotheek.nl (BNL) http://www.bibliotheek.nl # Copyright (C) 2015-2016 Koninklijke Bibliotheek (KB) http://www.kb.nl # Copyright (C) 2016, 2021 Stichting Kennisnet https://www.kennisnet.nl # Copyright (C) 2021 Data Archiving and Network Services https://dans.knaw.nl # Copyright (C) 2021 SURF https://www.surf.nl # Copyright (C) 2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl # # This file is part of "Meresco Lucene" # # "Meresco Lucene" is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # "Meresco Lucene" is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with "Meresco Lucene"; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ## end license ## from meresco.core import Observable from meresco.lucene import SORTED_PREFIX from itertools import chain from .fieldregistry import IDFIELD class Fields2LuceneDoc(Observable): def __init__(self, transactionName, fieldRegistry, identifierRewrite=None, rewriteFields=None): Observable.__init__(self) self._transactionName = transactionName self._identifierRewrite = (lambda identifier: identifier) if identifierRewrite is None else identifierRewrite self._rewriteFields = (lambda fields: fields) if rewriteFields is None else rewriteFields self._fieldRegistry = fieldRegistry def begin(self, name): if name != self._transactionName: return tx = self.ctx.tx tx.join(self) def addField(self, name, value): tx = self.ctx.tx valueList = tx.objectScope(self).setdefault('fields', {}).setdefault(name, []) if name.startswith(SORTED_PREFIX) and valueList: return valueList.append(value) def addFacetField(self, name, value): tx = self.ctx.tx valueList = tx.objectScope(self).setdefault('facet_fields', {}).setdefault(name, []) valueList.append(value) def commit(self, id): tx = self.ctx.tx fieldValues = tx.objectScope(self).get('fields', {}) facet_fields = tx.objectScope(self).get('facet_fields', {}) if not (fieldValues or facet_fields): return identifier = self._identifierRewrite(tx.locals['id']) fieldValues = self._rewriteFields(fieldValues) yield self.all.addDocument( identifier=identifier, fields=self._createFields(fieldValues, facet_fields), ) def _createFields(self, fieldValues, facet_fields=None): facet_fields = facet_fields or {} fields = [] for field, values in chain(fieldValues.items(), facet_fields.items()): if self._fieldRegistry.isDrilldownField(field): for value in values: if hasattr(value, 'extend'): path = [str(category) for category in value] else: path = [str(value)] fields.append(self._fieldRegistry.createFacetField(field, path)) else: for value in values: if field == IDFIELD: raise ValueError("Field '%s' is protected and created by Meresco Lucene" % IDFIELD) fields.append(self._fieldRegistry.createField(field, value)) return fields
seecr/meresco-lucene
meresco/lucene/fields2lucenedoc.py
Python
gpl-2.0
4,022
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.from_tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import ragged from tensorflow.python.platform import googletest class RaggedFromTensorOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testDocStringExamples(self): # The examples from ragged.from_tensor.__doc__. dt = constant_op.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]]) with self.test_session(): self.assertEqual( ragged.from_tensor(dt).eval().tolist(), [[5, 7, 0], [0, 3, 0], [6, 0, 0]]) self.assertEqual( ragged.from_tensor(dt, lengths=[1, 0, 3]).eval().tolist(), [[5], [], [6, 0, 0]]) self.assertEqual( ragged.from_tensor(dt, padding=0).eval().tolist(), [[5, 7], [0, 3], [6]]) @parameterized.parameters( # 2D test cases, no length or padding. { 'tensor': [[]], 'expected': [[]], }, { 'tensor': [[1]], 'expected': [[1]], }, { 'tensor': [[1, 2]], 'expected': [[1, 2]], }, { 'tensor': [[1], [2], [3]], 'expected': [[1], [2], [3]], }, { 'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'expected': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], }, # 3D test cases, no length or padding { 'tensor': [[[]]], 'expected': [[[]]], }, { 'tensor': [[[]]], 'expected': [[[]]], 'ragged_rank': 1, }, { 'tensor': [[[1]]], 'expected': [[[1]]], }, { 'tensor': [[[1, 2]]], 'expected': [[[1, 2]]], }, { 'tensor': [[[1, 2], [3, 4]]], 'expected': [[[1, 2], [3, 4]]], }, { 'tensor': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]], 'expected': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]], }, { 'tensor': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]], 'expected': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]], }, # 2D test cases, with length { 'tensor': [[1]], 'lengths': [1], 'expected': [[1]] }, { 'tensor': [[1]], 'lengths': [0], 'expected': [[]] }, { 'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'lengths': [0, 1, 2], 'expected': [[], [4], [7, 8]] }, { 'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'lengths': [0, 0, 0], 'expected': [[], [], []] }, { 'tensor': [[1, 2], [3, 4]], 'lengths': [2, 2], 'expected': [[1, 2], [3, 4]] }, { 'tensor': [[1, 2], [3, 4]], 'lengths': [7, 8], # lengths > ncols: truncated to ncols 'expected': [[1, 2], [3, 4]] }, { 'tensor': [[1, 2], [3, 4]], 'lengths': [-2, -1], # lengths < 0: treated as zero 'expected': [[], []] }, # 3D test cases, with length { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'lengths': [0, 0], 'expected': [[], []] }, { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'lengths': [1, 2], 'expected': [[[1, 2]], [[5, 6], [7, 8]]] }, { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'lengths': [2, 2], 'expected': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] }, # 2D test cases, with padding { 'tensor': [[1]], 'padding': 0, 'expected': [[1]] }, { 'tensor': [[0]], 'padding': 0, 'expected': [[]] }, { 'tensor': [[0, 1]], 'padding': 0, 'expected': [[0, 1]] }, { 'tensor': [[1, 0]], 'padding': 0, 'expected': [[1]] }, { 'tensor': [[1, 0, 1, 0, 0, 1, 0, 0]], 'padding': 0, 'expected': [[1, 0, 1, 0, 0, 1]] }, { 'tensor': [[3, 7, 0, 0], [2, 0, 0, 0], [5, 0, 0, 0]], 'padding': 0, 'expected': [[3, 7], [2], [5]] }, # 3D test cases, with padding { 'tensor': [[[1]]], 'padding': [0], 'expected': [[[1]]] }, { 'tensor': [[[0]]], 'padding': [0], 'expected': [[]] }, { 'tensor': [[[0, 0], [1, 2]], [[3, 4], [0, 0]]], 'padding': [0, 0], 'expected': [[[0, 0], [1, 2]], [[3, 4]]] }, # 4D test cases, with padding { 'tensor': [ [[[1, 2], [3, 4]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]] ], 'padding': [[0, 0], [0, 0]], 'expected': [ [[[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]], [] ] }, # 3D test cases, with ragged_rank=2. { 'tensor': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]], 'ragged_rank': 2, 'expected': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]] }, { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'ragged_rank': 2, 'lengths': [2, 0, 2, 1], 'expected': [[[1, 2], []], [[5, 6], [7]]] }, { 'tensor': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]], 'ragged_rank': 2, 'padding': 0, 'expected': [[[1], [2, 3]], [[], [4]]] }, # 4D test cases, with ragged_rank>1 { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 2, 'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 3, 'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 2, 'padding': [0, 0], 'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8]]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 3, 'padding': 0, 'expected': [[[[1], [2, 3]], [[], [4]]], [[[5, 6], [7]], [[0, 8], []]]] }, ) # pyformat: disable def testRaggedFromTensor(self, tensor, expected, lengths=None, padding=None, ragged_rank=1): dt = constant_op.constant(tensor) rt = ragged.from_tensor(dt, lengths, padding, ragged_rank) self.assertEqual(type(rt), ragged.RaggedTensor) self.assertEqual(rt.ragged_rank, ragged_rank) self.assertTrue( dt.shape.is_compatible_with(rt.shape), '%s is incompatible with %s' % (dt.shape, rt.shape)) with self.test_session(): self.assertEqual(rt.eval().tolist(), expected) def testHighDimensions(self): # Use distinct prime numbers for all dimension shapes in this test, so # we can see any errors that are caused by mixing up dimension sizes. dt = array_ops.reshape( math_ops.range(3 * 5 * 7 * 11 * 13 * 17), [3, 5, 7, 11, 13, 17]) for ragged_rank in range(1, 4): rt = ragged.from_tensor(dt, ragged_rank=ragged_rank) self.assertEqual(type(rt), ragged.RaggedTensor) self.assertEqual(rt.ragged_rank, ragged_rank) self.assertTrue( dt.shape.is_compatible_with(rt.shape), '%s is incompatible with %s' % (dt.shape, rt.shape)) with self.test_session(): self.assertEqual(rt.eval().tolist(), self.evaluate(dt).tolist()) @parameterized.parameters( # With no padding or lengths { 'dt_shape': [0, 0], 'expected': [] }, { 'dt_shape': [0, 3], 'expected': [] }, { 'dt_shape': [3, 0], 'expected': [[], [], []] }, { 'dt_shape': [0, 2, 3], 'expected': [] }, { 'dt_shape': [2, 0, 3], 'expected': [[], []] }, { 'dt_shape': [2, 3, 0], 'expected': [[[], [], []], [[], [], []]] }, { 'dt_shape': [2, 3, 0, 1], 'expected': [[[], [], []], [[], [], []]] }, { 'dt_shape': [2, 3, 1, 0], 'expected': [[[[]], [[]], [[]]], [[[]], [[]], [[]]]] }, # With padding { 'dt_shape': [0, 0], 'padding': 0, 'expected': [] }, { 'dt_shape': [0, 3], 'padding': 0, 'expected': [] }, { 'dt_shape': [3, 0], 'padding': 0, 'expected': [[], [], []] }, { 'dt_shape': [0, 2, 3], 'padding': [0, 0, 0], 'expected': [] }, { 'dt_shape': [2, 0, 3], 'padding': [0, 0, 0], 'expected': [[], []] }, { 'dt_shape': [2, 3, 0], 'padding': [], 'expected': [[], []] }, # With lengths { 'dt_shape': [0, 0], 'lengths': [], 'expected': [] }, { 'dt_shape': [0, 3], 'lengths': [], 'expected': [] }, { 'dt_shape': [3, 0], 'lengths': [0, 0, 0], 'expected': [[], [], []] }, { 'dt_shape': [3, 0], 'lengths': [2, 3, 4], # lengths > ncols: truncated to ncols 'expected': [[], [], []] }, { 'dt_shape': [0, 2, 3], 'lengths': [], 'expected': [] }, { 'dt_shape': [2, 0, 3], 'lengths': [0, 0], 'expected': [[], []] }, { 'dt_shape': [2, 3, 0], 'lengths': [0, 0], 'expected': [[], []] }, ) def testEmpty(self, dt_shape, expected, lengths=None, padding=None): dt = array_ops.zeros(dt_shape) rt = ragged.from_tensor(dt, lengths, padding) self.assertEqual(type(rt), ragged.RaggedTensor) self.assertEqual(rt.ragged_rank, 1) self.assertTrue(dt.shape.is_compatible_with(rt.shape)) with self.test_session(): self.assertEqual(rt.eval().tolist(), expected) @parameterized.parameters( { 'tensor': [[1]], 'lengths': [0], 'padding': 0, 'error': (ValueError, 'Specify lengths or padding, but not both') }, { 'tensor': [[1]], 'lengths': [0.5], 'error': (TypeError, 'lengths must be an integer tensor') }, { 'tensor': [[1]], 'padding': 'a', 'error': (TypeError, "Expected int32, got 'a'.*") }, { 'tensor': [[1]], 'padding': [1], 'error': (ValueError, r'Shapes \(1,\) and \(\) are incompatible') }, { 'tensor': [[[1]]], 'padding': 1, 'error': (ValueError, r'Shapes \(\) and \(1,\) are incompatible') }, { 'tensor': [[1]], 'ragged_rank': 'bad', 'error': (TypeError, r'ragged_rank expected int, got \'bad\'') }, { 'tensor': [[1]], 'ragged_rank': 0, 'error': (ValueError, r'ragged_rank must be greater than 0; got 0') }, { 'tensor': [[1]], 'ragged_rank': -1, 'error': (ValueError, r'ragged_rank must be greater than 0; got -1') }, ) def testErrors(self, tensor, lengths=None, padding=None, ragged_rank=1, error=None): dt = constant_op.constant(tensor) self.assertRaisesRegexp(error[0], error[1], ragged.from_tensor, dt, lengths, padding, ragged_rank) if __name__ == '__main__': googletest.main()
hehongliang/tensorflow
tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py
Python
apache-2.0
13,524
from difflib import unified_diff import filecmp import glob import hashlib from optparse import OptionParser import os import shutil import sys import numpy as np import openmc from openmc.examples import pwr_core from tests.regression_tests import config class TestHarness(object): """General class for running OpenMC regression tests.""" def __init__(self, statepoint_name): self._sp_name = statepoint_name def main(self): """Accept commandline arguments and either run or update tests.""" if config['update']: self.update_results() else: self.execute_test() def execute_test(self): """Run OpenMC with the appropriate arguments and check the outputs.""" try: self._run_openmc() self._test_output_created() results = self._get_results() self._write_results(results) self._compare_results() finally: self._cleanup() def update_results(self): """Update the results_true using the current version of OpenMC.""" try: self._run_openmc() self._test_output_created() results = self._get_results() self._write_results(results) self._overwrite_results() finally: self._cleanup() def _run_openmc(self): if config['mpi']: mpi_args = [config['mpiexec'], '-n', config['mpi_np']] openmc.run(openmc_exec=config['exe'], mpi_args=mpi_args) else: openmc.run(openmc_exec=config['exe']) def _test_output_created(self): """Make sure statepoint.* and tallies.out have been created.""" statepoint = glob.glob(self._sp_name) assert len(statepoint) == 1, 'Either multiple or no statepoint files' \ ' exist.' assert statepoint[0].endswith('h5'), \ 'Statepoint file is not a HDF5 file.' if os.path.exists('tallies.xml'): assert os.path.exists('tallies.out'), \ 'Tally output file does not exist.' def _get_results(self, hash_output=False): """Digest info in the statepoint and return as a string.""" # Read the statepoint file. statepoint = glob.glob(self._sp_name)[0] with openmc.StatePoint(statepoint) as sp: # Write out k-combined. outstr = 'k-combined:\n' form = '{0:12.6E} {1:12.6E}\n' outstr += form.format(sp.k_combined.n, sp.k_combined.s) # Write out tally data. for i, tally_ind in enumerate(sp.tallies): tally = sp.tallies[tally_ind] results = np.zeros((tally.sum.size * 2, )) results[0::2] = tally.sum.ravel() results[1::2] = tally.sum_sq.ravel() results = ['{0:12.6E}'.format(x) for x in results] outstr += 'tally {}:\n'.format(i + 1) outstr += '\n'.join(results) + '\n' # Hash the results if necessary. if hash_output: sha512 = hashlib.sha512() sha512.update(outstr.encode('utf-8')) outstr = sha512.hexdigest() return outstr def _write_results(self, results_string): """Write the results to an ASCII file.""" with open('results_test.dat', 'w') as fh: fh.write(results_string) def _overwrite_results(self): """Overwrite the results_true with the results_test.""" shutil.copyfile('results_test.dat', 'results_true.dat') def _compare_results(self): """Make sure the current results agree with the _true standard.""" compare = filecmp.cmp('results_test.dat', 'results_true.dat') if not compare: os.rename('results_test.dat', 'results_error.dat') assert compare, 'Results do not agree.' def _cleanup(self): """Delete statepoints, tally, and test files.""" output = glob.glob('statepoint.*.h5') output += ['tallies.out', 'results_test.dat', 'summary.h5'] output += glob.glob('volume_*.h5') for f in output: if os.path.exists(f): os.remove(f) class HashedTestHarness(TestHarness): """Specialized TestHarness that hashes the results.""" def _get_results(self): """Digest info in the statepoint and return as a string.""" return super()._get_results(True) class CMFDTestHarness(TestHarness): """Specialized TestHarness for running OpenMC CMFD tests.""" def __init__(self, statepoint_name, cmfd_run): self._sp_name = statepoint_name self._create_cmfd_result_str(cmfd_run) def _create_cmfd_result_str(self, cmfd_run): """Create CMFD result string from variables of CMFDRun instance""" outstr = 'cmfd indices\n' outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfd_run.indices]) outstr += '\nk cmfd\n' outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfd_run.k_cmfd]) outstr += '\ncmfd entropy\n' outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfd_run.entropy]) outstr += '\ncmfd balance\n' outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfd_run.balance]) outstr += '\ncmfd dominance ratio\n' outstr += '\n'.join(['{0:10.3E}'.format(x) for x in cmfd_run.dom]) outstr += '\ncmfd openmc source comparison\n' outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfd_run.src_cmp]) outstr += '\ncmfd source\n' cmfdsrc = np.reshape(cmfd_run.cmfd_src, np.product(cmfd_run.indices), order='F') outstr += '\n'.join(['{0:12.6E}'.format(x) for x in cmfdsrc]) outstr += '\n' self._cmfdrun_results = outstr def execute_test(self): """Don't call _run_openmc as OpenMC will be called through C API for CMFD tests, and write CMFD results that were passsed as argument """ try: self._test_output_created() results = self._get_results() results += self._cmfdrun_results self._write_results(results) self._compare_results() finally: self._cleanup() def update_results(self): """Don't call _run_openmc as OpenMC will be called through C API for CMFD tests, and write CMFD results that were passsed as argument """ try: self._test_output_created() results = self._get_results() results += self._cmfdrun_results self._write_results(results) self._overwrite_results() finally: self._cleanup() def _cleanup(self): """Delete output files for numpy matrices and flux vectors.""" super()._cleanup() output = ['loss.npz', 'loss.dat', 'prod.npz', 'prod.dat', 'fluxvec.npy', 'fluxvec.dat'] for f in output: if os.path.exists(f): os.remove(f) class ParticleRestartTestHarness(TestHarness): """Specialized TestHarness for running OpenMC particle restart tests.""" def _run_openmc(self): # Set arguments args = {'openmc_exec': config['exe']} if config['mpi']: args['mpi_args'] = [config['mpiexec'], '-n', config['mpi_np']] # Initial run openmc.run(**args) # Run particle restart args.update({'restart_file': self._sp_name}) openmc.run(**args) def _test_output_created(self): """Make sure the restart file has been created.""" particle = glob.glob(self._sp_name) assert len(particle) == 1, 'Either multiple or no particle restart ' \ 'files exist.' assert particle[0].endswith('h5'), \ 'Particle restart file is not a HDF5 file.' def _get_results(self): """Digest info in the statepoint and return as a string.""" # Read the particle restart file. particle = glob.glob(self._sp_name)[0] p = openmc.Particle(particle) # Write out the properties. outstr = '' outstr += 'current batch:\n' outstr += "{0:12.6E}\n".format(p.current_batch) outstr += 'current generation:\n' outstr += "{0:12.6E}\n".format(p.current_generation) outstr += 'particle id:\n' outstr += "{0:12.6E}\n".format(p.id) outstr += 'run mode:\n' outstr += "{0}\n".format(p.run_mode) outstr += 'particle weight:\n' outstr += "{0:12.6E}\n".format(p.weight) outstr += 'particle energy:\n' outstr += "{0:12.6E}\n".format(p.energy) outstr += 'particle xyz:\n' outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.xyz[0], p.xyz[1], p.xyz[2]) outstr += 'particle uvw:\n' outstr += "{0:12.6E} {1:12.6E} {2:12.6E}\n".format(p.uvw[0], p.uvw[1], p.uvw[2]) return outstr class PyAPITestHarness(TestHarness): def __init__(self, statepoint_name, model=None): super().__init__(statepoint_name) if model is None: self._model = pwr_core() else: self._model = model self._model.plots = [] def main(self): """Accept commandline arguments and either run or update tests.""" if config['build_inputs']: self._build_inputs() elif config['update']: self.update_results() else: self.execute_test() def execute_test(self): """Build input XMLs, run OpenMC, and verify correct results.""" try: self._build_inputs() inputs = self._get_inputs() self._write_inputs(inputs) self._compare_inputs() self._run_openmc() self._test_output_created() results = self._get_results() self._write_results(results) self._compare_results() finally: self._cleanup() def update_results(self): """Update results_true.dat and inputs_true.dat""" try: self._build_inputs() inputs = self._get_inputs() self._write_inputs(inputs) self._overwrite_inputs() self._run_openmc() self._test_output_created() results = self._get_results() self._write_results(results) self._overwrite_results() finally: self._cleanup() def _build_inputs(self): """Write input XML files.""" self._model.export_to_xml() def _get_inputs(self): """Return a hash digest of the input XML files.""" xmls = ['geometry.xml', 'materials.xml', 'settings.xml', 'tallies.xml', 'plots.xml'] return ''.join([open(fname).read() for fname in xmls if os.path.exists(fname)]) def _write_inputs(self, input_digest): """Write the digest of the input XMLs to an ASCII file.""" with open('inputs_test.dat', 'w') as fh: fh.write(input_digest) def _overwrite_inputs(self): """Overwrite inputs_true.dat with inputs_test.dat""" shutil.copyfile('inputs_test.dat', 'inputs_true.dat') def _compare_inputs(self): """Make sure the current inputs agree with the _true standard.""" compare = filecmp.cmp('inputs_test.dat', 'inputs_true.dat') if not compare: os.rename('inputs_test.dat', 'inputs_error.dat') for line in unified_diff(open('inputs_true.dat', 'r').readlines(), open('inputs_error.dat', 'r').readlines(), 'inputs_true.dat', 'inputs_error.dat'): print(line, end='') assert compare, 'Input files are broken.' def _cleanup(self): """Delete XMLs, statepoints, tally, and test files.""" super()._cleanup() output = ['materials.xml', 'geometry.xml', 'settings.xml', 'tallies.xml', 'plots.xml', 'inputs_test.dat'] for f in output: if os.path.exists(f): os.remove(f) class HashedPyAPITestHarness(PyAPITestHarness): def _get_results(self): """Digest info in the statepoint and return as a string.""" return super()._get_results(True)
wbinventor/openmc
tests/testing_harness.py
Python
mit
12,485
from __future__ import unicode_literals import base64 import logging import pickle try: from django.utils.timezone import now as datetime_now datetime_now # workaround for pyflakes except ImportError: from datetime import datetime datetime_now = datetime.now from django.core.mail import EmailMessage from django.db import models from django.utils.translation import ugettext_lazy as _ PRIORITY_HIGH = "1" PRIORITY_MEDIUM = "2" PRIORITY_LOW = "3" PRIORITY_DEFERRED = "4" PRIORITIES = [ (PRIORITY_HIGH, "high"), (PRIORITY_MEDIUM, "medium"), (PRIORITY_LOW, "low"), (PRIORITY_DEFERRED, "deferred"), ] PRIORITY_MAPPING = dict((label, v) for (v, label) in PRIORITIES) class MessageManager(models.Manager): def high_priority(self): """ the high priority messages in the queue """ return self.filter(priority=PRIORITY_HIGH) def medium_priority(self): """ the medium priority messages in the queue """ return self.filter(priority=PRIORITY_MEDIUM) def low_priority(self): """ the low priority messages in the queue """ return self.filter(priority=PRIORITY_LOW) def non_deferred(self): """ the messages in the queue not deferred """ return self.exclude(priority=PRIORITY_DEFERRED) def deferred(self): """ the deferred messages in the queue """ return self.filter(priority=PRIORITY_DEFERRED) def retry_deferred(self, new_priority=PRIORITY_MEDIUM): count = 0 for message in self.deferred(): if message.retry(new_priority): count += 1 return count base64_encode = base64.encodebytes if hasattr(base64, 'encodebytes') else base64.encodestring base64_decode = base64.decodebytes if hasattr(base64, 'decodebytes') else base64.decodestring def email_to_db(email): # pickle.dumps returns essentially binary data which we need to encode # to store in a unicode field. return base64_encode(pickle.dumps(email)) def db_to_email(data): if data == "": return None else: try: data = data.encode("ascii") except AttributeError: pass try: return pickle.loads(base64_decode(data)) except (TypeError, pickle.UnpicklingError, base64.binascii.Error): try: # previous method was to just do pickle.dumps(val) return pickle.loads(data) except (TypeError, pickle.UnpicklingError): return None class Message(models.Model): # The actual data - a pickled EmailMessage message_data = models.TextField() when_added = models.DateTimeField(default=datetime_now) priority = models.CharField(max_length=1, choices=PRIORITIES, default=PRIORITY_MEDIUM) # @@@ campaign? # @@@ content_type? objects = MessageManager() class Meta: verbose_name = _("message") verbose_name_plural = _("messages") def defer(self): self.priority = PRIORITY_DEFERRED self.save() def retry(self, new_priority=PRIORITY_MEDIUM): if self.priority == PRIORITY_DEFERRED: self.priority = new_priority self.save() return True else: return False def _get_email(self): return db_to_email(self.message_data) def _set_email(self, val): self.message_data = email_to_db(val) email = property( _get_email, _set_email, doc="""EmailMessage object. If this is mutated, you will need to set the attribute again to cause the underlying serialised data to be updated.""") @property def to_addresses(self): email = self.email if email is not None: return email.to else: return [] @property def subject(self): email = self.email if email is not None: return email.subject else: return "" def filter_recipient_list(lst): if lst is None: return None retval = [] for e in lst: if DontSendEntry.objects.has_address(e): logging.info("skipping email to %s as on don't send list " % e.encode("utf-8")) else: retval.append(e) return retval def make_message(subject="", body="", from_email=None, to=None, bcc=None, attachments=None, headers=None, priority=None): """ Creates a simple message for the email parameters supplied. The 'to' and 'bcc' lists are filtered using DontSendEntry. If needed, the 'email' attribute can be set to any instance of EmailMessage if e-mails with attachments etc. need to be supported. Call 'save()' on the result when it is ready to be sent, and not before. """ to = filter_recipient_list(to) bcc = filter_recipient_list(bcc) core_msg = EmailMessage( subject=subject, body=body, from_email=from_email, to=to, bcc=bcc, attachments=attachments, headers=headers ) db_msg = Message(priority=priority) db_msg.email = core_msg return db_msg class DontSendEntryManager(models.Manager): def has_address(self, address): """ is the given address on the don't send list? """ queryset = self.filter(to_address__iexact=address) return queryset.exists() class DontSendEntry(models.Model): to_address = models.EmailField(max_length=254) when_added = models.DateTimeField() # @@@ who added? # @@@ comment field? objects = DontSendEntryManager() class Meta: verbose_name = _("don't send entry") verbose_name_plural = _("don't send entries") RESULT_SUCCESS = "1" RESULT_DONT_SEND = "2" RESULT_FAILURE = "3" RESULT_CODES = ( (RESULT_SUCCESS, "success"), (RESULT_DONT_SEND, "don't send"), (RESULT_FAILURE, "failure"), # @@@ other types of failure? ) class MessageLogManager(models.Manager): def log(self, message, result_code, log_message=""): """ create a log entry for an attempt to send the given message and record the given result and (optionally) a log message """ return self.create( message_data=message.message_data, when_added=message.when_added, priority=message.priority, # @@@ other fields from Message result=result_code, log_message=log_message, ) class MessageLog(models.Model): # fields from Message message_data = models.TextField() when_added = models.DateTimeField(db_index=True) priority = models.CharField(max_length=1, choices=PRIORITIES, db_index=True) # @@@ campaign? # additional logging fields when_attempted = models.DateTimeField(default=datetime_now) result = models.CharField(max_length=1, choices=RESULT_CODES) log_message = models.TextField() objects = MessageLogManager() class Meta: verbose_name = _("message log") verbose_name_plural = _("message logs") @property def email(self): return db_to_email(self.message_data) @property def to_addresses(self): email = self.email if email is not None: return email.to else: return [] @property def subject(self): email = self.email if email is not None: return email.subject else: return ""
jawed123/django-mailer
mailer/models.py
Python
mit
7,585
from django.conf.urls import url, include from django.views.generic import TemplateView from .views import * from django.conf.urls.static import static from .import views urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^contact/$', views.contact, name='contact'), url(r'^register/$', RegisterView.as_view(), name='register'), #registration page url(r'^login/$', LoginView.as_view(), name='login'), url(r'^logout/$', views.LogoutView, name='logout'), url(r'^profile/$', views.profile, name='profile'), #the logged in users own profile url(r'^(?P<user>[\w\-]+)/$', views.detail, name='detail'), url(r'^(?P<user>[\w\-]+)/images/$', ImageManagementView.as_view(), name='image_listing'), #the logged in users own profile url(r'^(?P<user>[\w\-]+)/edit/$', PortfolioUpdate.as_view(), name='portfolio_update'), url(r'^(?P<user>[\w\-]+)/project/new/$', ProjectCreate.as_view(), name='project_create'), ]
TobiasKundig/CSCI491Project
portfolio/urls.py
Python
mit
950
# -*- coding: utf-8 -*- # # COPYRIGHT (C) 2018 Mutnick <mutnick@techie.com> # COPYRIGHT (C) 2016-2017 Michael Labouebe <gfarmerfr@free.fr> # COPYRIGHT (C) 2008-2011 Quinox <quinox@users.sf.net> # COPYRIGHT (C) 2009 Hedonist <ak@sensi.org> # COPYRIGHT (C) 2006-2009 Daelstorm <daelstorm@gmail.com> # COPYRIGHT (C) 2003-2004 Hyriand <hyriand@thegraveyard.org> # # GNU GENERAL PUBLIC LICENSE # Version 3, 29 June 2007 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gettext import gettext as _ from math import ceil from time import time import gi from gi.repository import GObject as gobject from gi.repository import Gtk as gtk from pynicotine.gtkgui.utils import HumanSize from pynicotine.gtkgui.utils import HumanSpeed from pynicotine.gtkgui.utils import InitialiseColumns from pynicotine.gtkgui.utils import float_sort_func from pynicotine.gtkgui.utils import int_sort_func from pynicotine.utils import cmp gi.require_version('Gtk', '3.0') class TransferList: MINIMUM_GUI_DELAY = 1 # in seconds MINIMUM_GUI_DELAY_SLEEP = int(ceil(MINIMUM_GUI_DELAY * 2000)) # in ms status_tab = [ _("Getting status"), _("Waiting for download"), _("Waiting for upload"), _("Getting address"), _("Connecting"), _("Waiting for peer to connect"), _("Cannot connect"), _("User logged off"), _("Requesting file"), _("Initializing transfer"), _("Filtered"), _("Download directory error"), _("Local file error"), _("File not shared"), _("Aborted"), _("Paused"), _("Queued"), _("Transferring"), _("Finished") ] def __init__(self, frame, widget, type): self.frame = frame self.widget = widget self.type = type self.transfers = [] self.list = None self.selected_transfers = [] self.selected_users = [] self.users = {} self.lastupdate = 0 self.finalupdatetimerid = None widget.get_selection().set_mode(gtk.SelectionMode.MULTIPLE) columntypes = [ gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT64, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_INT, gobject.TYPE_UINT64, gobject.TYPE_UINT64, gobject.TYPE_BOOLEAN, gobject.TYPE_STRING ] self.transfersmodel = gtk.TreeStore(*columntypes) widths = self.frame.np.config.sections["columns"]["{}_widths".format(type)] self.cols = cols = InitialiseColumns( widget, [_("User"), widths[0], "text", self.CellDataFunc], [_("Filename"), widths[1], "text", self.CellDataFunc], [_("Status"), widths[2], "text", self.CellDataFunc], [_("Queue Position"), widths[3], "text", self.CellDataFunc], [_("Percent"), widths[4], "progress"], [_("Size"), widths[5], "text", self.CellDataFunc], [_("Speed"), widths[6], "text", self.CellDataFunc], [_("Time elapsed"), widths[7], "text", self.CellDataFunc], [_("Time left"), widths[8], "text", self.CellDataFunc], [_("Path"), widths[9], "text", self.CellDataFunc] ) self.col_user, self.col_filename, self.col_status, self.col_position, self.col_percent, self.col_human_size, self.col_human_speed, self.col_time_elapsed, self.col_time_left, self.col_path = cols self.col_user.set_sort_column_id(0) self.col_filename.set_sort_column_id(1) self.col_status.set_sort_column_id(2) # Only view progress renderer on transfers, not user tree parents self.transfersmodel.set_sort_func(2, self.status_sort_func, 2) self.col_position.set_sort_column_id(3) self.transfersmodel.set_sort_func(3, int_sort_func, 3) self.col_percent.set_sort_column_id(11) self.col_percent.set_attributes(self.col_percent.get_cells()[0], value=4, visible=14) self.col_human_size.set_sort_column_id(12) self.col_human_speed.set_sort_column_id(6) self.col_time_elapsed.set_sort_column_id(7) self.col_time_left.set_sort_column_id(8) self.col_path.set_sort_column_id(9) self.transfersmodel.set_sort_func(11, self.progress_sort_func, 4) self.transfersmodel.set_sort_func(6, float_sort_func, 6) widget.set_model(self.transfersmodel) self.UpdateColours() def UpdateColours(self): self.frame.SetTextBG(self.widget) self.frame.ChangeListFont(self.widget, self.frame.np.config.sections["ui"]["transfersfont"]) def CellDataFunc(self, column, cellrenderer, model, iter, dummy="dummy"): colour = self.frame.np.config.sections["ui"]["search"] if colour == "": colour = None cellrenderer.set_property("foreground", colour) def get_status_index(self, val): try: return int(val) except Exception: if val in self.status_tab: return self.status_tab.index(val) else: return -len(self.status_tab) def status_sort_func(self, model, iter1, iter2, column): val1 = self.get_status_index(model.get_value(iter1, column)) val2 = self.get_status_index(model.get_value(iter2, column)) return cmp(val1, val2) def progress_sort_func(self, model, iter1, iter2, column): # We want 0% to be always below anything else, # so we have to look up whether we are ascending or descending ascending = True if model.get_sort_column_id()[1] == gtk.SortType.DESCENDING: ascending = False val1 = self.get_status_index(model.get_value(iter1, column)) val2 = self.get_status_index(model.get_value(iter2, column)) if val1 == 0 and val2 == 0: return 0 if val1 == 0: return -1 + (ascending * 2) if val2 == 0: return 1 - (ascending * 2) return cmp(val1, val2) def InitInterface(self, list): self.list = list self.update() self.widget.set_sensitive(True) def ConnClose(self): self.widget.set_sensitive(False) self.list = None self.Clear() self.transfersmodel.clear() self.transfers = [] self.users.clear() self.selected_transfers = [] self.selected_users = [] def SelectedTransfersCallback(self, model, path, iter): user = model.get_value(iter, 0) file = model.get_value(iter, 10) for i in self.list: if i.user == user and i.filename == file: self.selected_transfers.append(i) break if user not in self.selected_users: self.selected_users.append(user) def SelectCurrentRow(self, event, kind): # If nothing is selected (first click was right-click?) try to select the current row if self.selected_transfers == [] and self.selected_users == [] and kind == "mouse": d = self.widget.get_path_at_pos(int(event.x), int(event.y)) if d: path, column, x, y = d iter = self.transfersmodel.get_iter(path) user = self.transfersmodel.get_value(iter, 0) file = self.transfersmodel.get_value(iter, 10) if path is not None: sel = self.widget.get_selection() sel.unselect_all() sel.select_path(path) for i in self.list: if i.user == user and i.filename == file: self.selected_transfers.append(i) break if user not in self.selected_users: self.selected_users.append(user) def TranslateStatus(self, status): if status == "Waiting for download": newstatus = _("Waiting for download") elif status == "Waiting for upload": newstatus = _("Waiting for upload") elif status == "Requesting file": newstatus = _("Requesting file") elif status == "Initializing transfer": newstatus = _("Initializing transfer") elif status == "Cannot connect": newstatus = _("Cannot connect") elif status == "Waiting for peer to connect": newstatus = _("Waiting for peer to connect") elif status == "Connecting": newstatus = _("Connecting") elif status == "Getting address": newstatus = _("Getting address") elif status == "Getting status": newstatus = _("Getting status") elif status == "Queued": newstatus = _("Queued") elif status == "User logged off": newstatus = _("User logged off") elif status == "Aborted": newstatus = _("Aborted") elif status == "Finished": newstatus = _("Finished") elif status == "Paused": newstatus = _("Paused") elif status == "Transferring": newstatus = _("Transferring") elif status == "Filtered": newstatus = _("Filtered") elif status == "Connection closed by peer": newstatus = _("Connection closed by peer") elif status == "File not shared": newstatus = _("File not shared") elif status == "Establishing connection": newstatus = _("Establishing connection") elif status == "Download directory error": newstatus = _("Download directory error") elif status == "Local file error": newstatus = _("Local file error") else: newstatus = status return newstatus def finalupdate(self, func): now = time() # I had a logical explanation about why it has to be 3*delay, but I # forgot. Something to do with the timeout being 2*delay if (now - self.lastupdate) < (3 * self.MINIMUM_GUI_DELAY): # The list has been updated recently, # trying again later. return True self.update(forced=True) # delayed updates can never trigger a new timer self.finalupdatetimerid = None return False # Stopping timeout def replace(self, oldtransfer, newtransfer): for i in self.transfers: if i[2] == oldtransfer: i[2] = newtransfer self.update_specific(newtransfer) return else: print(("WARNING: Could not find transfer %s." % oldtransfer)) def update(self, transfer=None, forced=False): current_page = self.frame.MainNotebook.get_current_page() my_page = self.frame.MainNotebook.page_num(self.myvbox) if (current_page == my_page): self._update(transfer, forced) self.frame.UpdateBandwidth() def _update(self, transfer=None, forced=True): now = time() if forced: self.lastupdate = time() # ...we're working... if transfer is not None: self.update_specific(transfer) elif self.list is not None: # This seems to me to be O(n^2), perhaps constructing a temp. dict # from self.list would be better? for i in self.transfers[:]: for j in self.list: if [j.user, j.filename] == i[0]: break else: # Remove transfers from treeview that aren't in the transfer list self.transfersmodel.remove(i[1]) self.transfers.remove(i) for i in self.list: self.update_specific(i) # The rest is just summarizing so it's not too important. # It's fairly CPU intensive though, so we only do it if we haven't updated it recently if not forced and (now - self.lastupdate) < self.MINIMUM_GUI_DELAY: if not self.finalupdatetimerid: self.finalupdatetimerid = True # I'm not sure if gobject returns fast enough self.finalupdatetimerid = gobject.timeout_add(self.MINIMUM_GUI_DELAY_SLEEP, self.finalupdate, self.update) return self.lastupdate = time() # ...we're working... # Remove empty parent rows for (username, user) in [x for x in self.users.items()]: if not self.transfersmodel.iter_has_child(user): self.transfersmodel.remove(user) del self.users[username] else: files = self.transfersmodel.iter_n_children(user) ispeed = 0.0 percent = totalsize = position = 0 elapsed = left = "" elap = 0 salientstatus = "" extensions = {} for f in range(files): iter = self.transfersmodel.iter_nth_child(user, f) filename = self.transfersmodel.get_value(iter, 10) parts = filename.rsplit('.', 1) if len(parts) == 2: ext = parts[1] try: extensions[ext.lower()] += 1 except KeyError: extensions[ext.lower()] = 1 for transfer in self.list: if [transfer.user, transfer.filename] == [username, filename] and transfer.timeelapsed is not None: elap += transfer.timeelapsed break totalsize += self.transfersmodel.get_value(iter, 12) position += self.transfersmodel.get_value(iter, 13) status = self.transfersmodel.get_value(iter, 2) if status == _("Transferring"): str_speed = self.transfersmodel.get_value(iter, 15) if str_speed != "": ispeed += float(str_speed) left = self.transfersmodel.get_value(iter, 8) if salientstatus in ('', _("Finished")): # we prefer anything over ''/finished salientstatus = status if status in (_("Transferring"), _("Banned"), _("Getting address"), _("Establishing connection")): salientstatus = status try: speed = "%.1f" % ispeed except TypeError: speed = str(ispeed) if totalsize > 0: percent = ((100 * position) / totalsize) if ispeed <= 0.0: left = "∞" else: left = self.frame.np.transfers.getTime((totalsize - position) / ispeed / 1024) elapsed = self.frame.np.transfers.getTime(elap) if len(extensions) == 0: extensions = "Unknown" elif len(extensions) == 1: extensions = _("All %(ext)s") % {'ext': list(extensions.keys())[0]} else: extensionlst = [(extensions[key], key) for key in extensions] extensionlst.sort(reverse=True) extensions = ", ".join([str(count) + " " + ext for (count, ext) in extensionlst]) self.transfersmodel.set( user, 1, _("%(number)2s files ") % {'number': files} + " (" + extensions + ")", 2, salientstatus, 4, percent, 5, "%s / %s" % (HumanSize(position), HumanSize(totalsize)), 6, HumanSpeed(speed), 7, elapsed, 8, left, 12, ispeed, 14, True, 15, speed ) self.lastupdate = time() # ...and we're done def update_specific(self, transfer=None): if transfer not in self.list: return fn = transfer.filename user = transfer.user shortfn = fn.split("\\")[-1] currentbytes = transfer.currentbytes place = transfer.place if currentbytes is None: currentbytes = 0 key = [user, fn] status = HumanSize(self.TranslateStatus(transfer.status)) istatus = self.get_status_index(transfer.status) try: size = int(transfer.size) except TypeError: size = 0 hsize = "%s / %s" % (HumanSize(currentbytes), HumanSize(size)) if transfer.modifier: hsize += " (%s)" % transfer.modifier try: speed = "%.1f" % transfer.speed except TypeError: speed = str(transfer.speed) elap = transfer.timeelapsed left = str(transfer.timeleft) if speed == "None": speed = "" else: # transfer.speed is in KB speed = float(speed) * 1024 if elap is None: elap = 0 elap = self.frame.np.transfers.getTime(elap) if left == "None": left = "" try: icurrentbytes = int(currentbytes) if icurrentbytes == int(transfer.size): percent = 100 else: percent = ((100 * icurrentbytes) / int(size)) except Exception as e: # noqa: F841 icurrentbytes = 0 percent = 0 # Modify old transfer for i in self.transfers: if i[0] != key: continue if i[2] != transfer: continue self.transfersmodel.set( i[1], 1, shortfn, 2, status, 3, str(place), 4, percent, 5, str(hsize), 6, HumanSpeed(speed), 7, elap, 8, left, 9, transfer.path, 11, istatus, 12, size, 13, currentbytes, 15, str(speed) ) break else: newparent = False if self.TreeUsers: if user not in self.users: # Create Parent if it doesn't exist # ProgressRender not visible (last column sets 4th column) self.users[user] = self.transfersmodel.append( None, [user, "", "", "", 0, "", "", "", "", "", "", 0, 0, 0, False, ""] ) newparent = True parent = self.users[user] else: parent = None # Add a new transfer path = transfer.path iter = self.transfersmodel.append( parent, [user, shortfn, status, str(place), percent, str(hsize), HumanSpeed(speed), elap, left, path, fn, istatus, size, icurrentbytes, True, str(speed)] ) # Expand path path = self.transfersmodel.get_path(iter) self.transfers.append([key, iter, transfer]) if newparent: self.expandcollapse(self.transfersmodel.get_path(parent)) def Clear(self): self.users.clear() self.transfers = [] self.selected_transfers = [] self.selected_users = [] self.transfersmodel.clear() def OnCopyURL(self, widget): i = self.selected_transfers[0] self.frame.SetClipboardURL(i.user, i.filename) def OnCopyDirURL(self, widget): i = self.selected_transfers[0] path = "\\".join(i.filename.split("\\")[:-1]) + "\\" if path[:-1] != "/": path += "/" self.frame.SetClipboardURL(i.user, path) def OnAbortTransfer(self, widget, remove=False, clear=False): transfers = self.selected_transfers for i in transfers: self.frame.np.transfers.AbortTransfer(i, remove) i.status = "Aborted" i.req = None if clear: for t in self.list[:]: if i.user == t.user and i.filename == t.filename: self.list.remove(t) self.update() def OnClearTransfer(self, widget): self.OnAbortTransfer(widget, False, True) def ClearTransfers(self, status): for i in self.list[:]: if i.status in status: if i.transfertimer is not None: i.transfertimer.cancel() self.list.remove(i) self.update() def OnClearFinished(self, widget): self.ClearTransfers(["Finished"]) def OnClearAborted(self, widget): statuslist = ["Aborted", "Cancelled"] self.ClearTransfers(statuslist) def OnClearFiltered(self, widget): statuslist = ["Filtered"] self.ClearTransfers(statuslist) def OnClearFailed(self, widget): statuslist = ["Cannot connect", "Connection closed by peer", "Local file error", "Getting address", "Waiting for peer to connect", "Initializing transfer"] self.ClearTransfers(statuslist) def OnClearPaused(self, widget): statuslist = ["Paused"] self.ClearTransfers(statuslist) def OnClearFinishedAborted(self, widget): statuslist = ["Aborted", "Cancelled", "Finished", "Filtered"] self.ClearTransfers(statuslist) def OnClearFinishedErred(self, widget): statuslist = ["Aborted", "Cancelled", "Finished", "Filtered", "Cannot connect", "Connection closed by peer", "Local file error"] self.ClearTransfers(statuslist) def OnClearQueued(self, widget): self.ClearTransfers(["Queued"])
eLvErDe/nicotine-plus
pynicotine/gtkgui/transferlist.py
Python
gpl-3.0
22,723
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sahara.exceptions as e from sahara.i18n import _ from sahara.utils import api_validator as a def check_job_binary_internal(data, **kwargs): if not (type(data) is str and len(data) > 0): raise e.BadJobBinaryInternalException() if "name" in kwargs: name = kwargs["name"] if not a.validate_name_format(name): raise e.BadJobBinaryInternalException(_("%s is not a valid name") % name)
ekasitk/sahara
sahara/service/validations/edp/job_binary_internal.py
Python
apache-2.0
1,066
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from heatclient import client as hc from heatclient import exc from heat.engine.clients import client_plugin CLIENT_NAME = 'heat' class HeatClientPlugin(client_plugin.ClientPlugin): exceptions_module = exc service_types = [ORCHESTRATION, CLOUDFORMATION] = ['orchestration', 'cloudformation'] def _create(self): endpoint = self.get_heat_url() args = {} if self._get_client_option(CLIENT_NAME, 'url'): # assume that the heat API URL is manually configured because # it is not in the keystone catalog, so include the credentials # for the standalone auth_password middleware args['username'] = self.context.username args['password'] = self.context.password args['connect_retries'] = cfg.CONF.client_retry_limit return hc.Client('1', endpoint_override=endpoint, session=self.context.keystone_session, **args) def is_not_found(self, ex): return isinstance(ex, exc.HTTPNotFound) def is_over_limit(self, ex): return isinstance(ex, exc.HTTPOverLimit) def is_conflict(self, ex): return isinstance(ex, exc.HTTPConflict) def get_heat_url(self): heat_url = self._get_client_option(CLIENT_NAME, 'url') if heat_url: tenant_id = self.context.tenant_id heat_url = heat_url % {'tenant_id': tenant_id} else: endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type') heat_url = self.url_for(service_type=self.ORCHESTRATION, endpoint_type=endpoint_type) return heat_url def get_heat_cfn_url(self): endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type') heat_cfn_url = self.url_for(service_type=self.CLOUDFORMATION, endpoint_type=endpoint_type) return heat_cfn_url def get_cfn_metadata_server_url(self): # Historically, we've required heat_metadata_server_url set in # heat.conf, which simply points to the heat-api-cfn endpoint in # most cases, so fall back to looking in the catalog when not set config_url = cfg.CONF.heat_metadata_server_url if config_url is None: config_url = self.get_heat_cfn_url() # Backwards compatibility, previous heat_metadata_server_url # values didn't have to include the version path suffix # Also, we always added a trailing "/" in nova/server.py, # which looks not required by os-collect-config, but maintain # to avoid any risk other folks have scripts which expect it. if '/v1' not in config_url: config_url += '/v1' if config_url and config_url[-1] != "/": config_url += '/' return config_url def get_insecure_option(self): return self._get_client_option(CLIENT_NAME, 'insecure')
openstack/heat
heat/engine/clients/os/heat_plugin.py
Python
apache-2.0
3,700
from typing import Any import swagger_client as saltedge_client def create_customer_in_saltedge( profile: Any, api: saltedge_client.CustomersApi ) -> None: data = saltedge_client.CustomerRequestBodyData( identifier=str(profile.user.username) ) body = saltedge_client.CustomerRequestBody(data) response = api.customers_post(body=body) profile.external_id = int(response.data.id) profile.save() def remove_customer_from_saltedge( profile: Any, api: saltedge_client.CustomersApi ) -> None: api.customers_customer_id_delete(str(profile.external_id)) profile.external_id = None profile.save()
ltowarek/budget-supervisor
budgetsupervisor/users/services.py
Python
mit
645
from core.moduleguessbase import ModuleGuessBase from core.moduleexception import ModuleException, ProbeException, ExecutionException, ProbeSucceed class ModuleGuess(ModuleGuessBase): '''Generic ModuleGuess class to inherit. ModuleGuess object is a dynamically loaded Weevely extension that automatically guess best way to accomplish tasks on remote target. Vector objects contains the code to run on remote target. To create a new module, define an object that inherit ModuleGuess (e.g. 'class MyModule(ModuleGuess)') into python file situated in 'modules/mygroup/mymodule.py'. Class needs the same name of the python file, with first capital letter. At first run (e.g. running ':mymgroup.mymodule' from terminal for the first time), module constructor executes following main tasks: A) Defines module arguments (method _set_args(), inherition is recommended) B) Defines module vectors (method _set_vectors(), inherition is recommended) At every call (e.g. at every ':mymgroup.mymodule' run) run() method parse passed arguments and execute following main tasks: 1) Optionally prepares the enviroinment (method _prepare(), inherition is optional) 2) Runs every vector to guess best way to accomplish task. Guessing stops as soon as first vector returns good results. Those three methods are executed for every vector: 2.1) Formats the passed arguments to simplify current_vector run (method _prepare_vector(), inherition is recommended) 2.2) Runs current_vector and saves results (method _execute_vector(), inherition is optional) 2.3) Verifies probe execution (method _verify_vector_execution(), inherition is optional) 3) Optionally verifies probe execution (method _verify(), inherition is optional) Example of a basic module that download files from web into target: ==================================== webdownload.py =================================== from core.moduleguess import ModuleGuess from core.moduleexception import ProbeException, ProbeSucceed WARN_DOWNLOAD_OK = 'Downloaded succeed' class Webdownload(ModuleGuess): def _set_args(self): # Declare accepted module parameters. Let the user choose specific vector to skip guessing with # '-vector' parameter. Parameters passed at run are stored in self.args dictionary. self.argparser.add_argument('url') self.argparser.add_argument('rpath') self.argparser.add_argument('-vector', choices = self.vectors.keys()) def _set_vectors(self): # Declare vectors to execute. # Vectors defined in self.vectors are three diffent ways to accomplish tasks. # They are execute in succession: the first vector that returns a positive # results, break the probe. # Vector defined in self.support_vectors are a support vectors executed manually. # Payload variable fields '$path' and '$url' are replaced at vector execution. # Because variable fields '$path' and '$url' corresponds with arguments, # is not necessary to inherit _prepare_vector() and _execute_vector(). self.vectors.add_vector(name='putcontent', interpreter='shell.php', payloads = [ 'file_put_contents("$rpath", file_get_contents("$url"));' ]) self.vectors.add_vector(name='wget', interpreter='shell.sh', payloads = [ 'wget $url -O $rpath' ]) self.vectors.add_vector(name='curl', interpreter='shell.sh', payloads = [ 'curl -o $rpath $url' ]) self.support_vectors.add_vector(name='check_download', interpreter='file.check', payloads = [ '$rpath', 'exists' ]) def _verify_vector_execution(self): # Verify downloaded file. Save vector return value in self._result and eventually raise # ProbeSucceed to stop module execution and print error message. If not even one vector # raise a ProbeSucceed/ProbeException to break the flow, the probe ends with an error # due to negative value of self._result. self._result = self.support_vectors.get('check_download').execute({ 'rpath' : self.args['rpath'] }) if self._result == True: raise ProbeSucceed(self.name, WARN_DOWNLOAD_OK) ============================================================================= ''' def _set_vectors(self): """Inherit this method to add vectors in self.vectors and self.support_vectors lists, easily callable in _probe() function. This method is called by module constructor. Example of vector declaration: > self.support_vectors.add_vector(name='vector_name', interpreter='module_name', payloads = [ 'module_param1', '$module_param2', .. ]) Template fields like '$rpath' are replaced at vector execution. """ pass def _set_args(self): """Inherit this method to set self.argparser arguments. Set new arguments following official python argparse documentation like. This method is called by module constructor. Arguments passed at module runs are stored in Module.args dictionary. """ pass def _init_module(self): """Inherit this method to set eventual additional variables. Called by module constructor. """ def _prepare(self): """Inherit this method to prepare enviroinment for the probe. This method is called at every module run. Throws ModuleException, ProbeException. """ pass def _prepare_vector(self): """Inherit this method to prepare properly self.formatted_arguments for the self.current_vector execution. This method is called for every vector. Throws ProbeException to break module run with an error, ProbeSucceed to break module run in case of success, and ExecutionException to skip single self.current_vector execution. """ self.formatted_args = self.args def _execute_vector(self): """This method execute self.current_vector. Is recommended to avoid inherition to prepare properly arguments with self.formatted_args in ModuleGuess._prepare_vector(). Vector execution results should be stored in self._result. This method is called for every vector. Throws ProbeException to break module run with an error, ProbeSucceed to break module run in case of success, and ExecutionException to skip single self.current_vector execution. """ self._result = self.current_vector.execute(self.formatted_args) def _verify_vector_execution(self): """This method verify vector execution results. Is recommended to does not inherit this method but just fill properly self._result in ModuleGuess._execute_vector(). This method is called for every vector. Throws ProbeException to break module run with an error, ProbeSucceed to break module run in case of success, and ExecutionException to skip single self.current_vector execution. """ # If self._result is set. False is probably a good return value. if self._result or self._result == False: raise ProbeSucceed(self.name,'Command succeeded') def _verify(self): """Inherit this method to check probe result. Results to print and return after moudule execution should be stored in self._result. It is called at every module run. Throws ModuleException, ProbeException, ProbeSucceed. """ pass
jorik041/Weevely
core/moduleguess.py
Python
gpl-3.0
8,074
""" You are given a square matrix of size N×N. Can you calculate the absolute difference of the sums across the main diagonal and the secondary diagonal? """ #!/bin/python n = int(raw_input().strip()) mat = [] diagMain = 0 diagMinor = 0 # O(N) for a_i in xrange(n): a_temp = map(int,raw_input().strip().split(' ')) mat.append(a_temp) if mat: # O(N) for index, eachList in enumerate(mat): diagMain += eachList[index] diagMinor += eachList[(n-1)-index] print abs(diagMain-diagMinor)
codecakes/algorithms
algorithms/practice/diagonalMatrixDiff.py
Python
mit
521
# The MIT License (MIT) # # Copyright (c) 2015 Brian Wray (brian@wrocket.org) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import subprocess import json import unittest def call_tulip(args): cmd = ['../../src/tulip'] cmd.extend(args) out = subprocess.check_output(cmd) return out.decode('utf-8') class SearchResult: def __init__(self, move, score, move_scores=[]): self.move = move self.score = score self.move_scores = move_scores class TestSearch(unittest.TestCase): def setUp(self): None def get_result(self, fen): result = call_tulip(['-simplesearch', fen]) parsed_output = json.loads(result) json_obj = parsed_output['searchResult'] parsed_scores = [(x['move'], x['score']) for x in json_obj['rootNodeScores']] sorted_scores = sorted(parsed_scores, key=lambda x: -1* x[1]) # sort move scores by score return SearchResult(move=json_obj['move'], score=json_obj['score'], move_scores=sorted_scores) def test_easy_skewer(self): result = self.get_result('4q3/8/8/8/4k3/8/8/2KR4 w - - 0 1') self.assertEqual('Re1+', result.move) def test_friend_liver_qs(self): result = self.get_result('rnbqkb1r/1pp1ppp1/p4n1p/1N1p4/3P1B2/8/PPP1PPPP/R2QKBNR w KQkq - 0 5') self.assertEqual('Nxc7+', result.move) def test_simple_fork(self): result = self.get_result('7k/8/2n4p/8/2K3Q1/8/8/8 b - - 0 1') self.assertEqual('Ne5+', result.move) def test_play_for_draw(self): result = self.get_result('7k/8/2n5/8/2K3Q1/8/8/8 b - - 0 1') self.assertEqual('Ne5+', result.move) def test_short_mate(self): result = self.get_result('1rb2rk1/p1q1ppbp/n2p3B/3n1P2/1ppP4/3B1N2/PPPQN1PP/K2R3R w - - 0 1') self.assertEqual('Qg5', result.move) if __name__ == '__main__': unittest.main()
wrocket/Tulip-Chess
tests/search_tests/test_simple_attacks.py
Python
mit
2,888
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Liblarch - a library to handle directed acyclic graphs # Copyright (c) 2011-2012 - Lionel Dricot & Izidor Matušov # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------------------------------------------- from distutils.core import setup import codecs import os def read(*parts): """ Build an absolute path from *parts* and and return the contents of the resulting file. Assume UTF-8 encoding. """ HERE = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f: return f.read() setup( version='3.2.0', url='https://wiki.gnome.org/Projects/liblarch', author='Lionel Dricot & Izidor Matušov', author_email='gtg-contributors@lists.launchpad.net', license='LGPLv3', long_description=read("README.md"), long_description_content_type="text/markdown", name='liblarch', packages=['liblarch', 'liblarch_gtk'], python_requires=">=3.5", keywords = ["gtk", "treeview", "treemodel"], classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: X11 Applications :: GTK", "Intended Audience :: Developers", "Natural Language :: English", "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 3.5", "Topic :: Desktop Environment :: Gnome", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Software Development :: User Interfaces", ], description=( 'LibLarch is a python library built to easily handle ' 'data structures such as lists, trees and directed acyclic graphs ' 'and represent them as a GTK TreeWidget or in other forms.' ), )
getting-things-gnome/liblarch
setup.py
Python
lgpl-3.0
2,629
import io import json class Stream(object): __shortname__ = "stream" """ This is a base class that should be inherited when implementing different stream types. Should only be created by plugins. """ def __init__(self, session): self.session = session def __repr__(self): return "<Stream()>" def __json__(self): return dict(type=type(self).shortname()) def open(self): """ Attempts to open a connection to the stream. Returns a file-like object that can be used to read the stream data. Raises :exc:`StreamError` on failure. """ raise NotImplementedError @property def json(self): obj = self.__json__() return json.dumps(obj) @classmethod def shortname(cls): return cls.__shortname__ class StreamIO(io.IOBase): pass __all__ = ["Stream", "StreamIO"]
ethanhlc/streamlink
src/streamlink/stream/stream.py
Python
bsd-2-clause
915