repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
EdLeafe/pyrax
samples/images/export_task.py
13
2383
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c)2014 Rackspace US, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import os import six import pyrax pyrax.set_setting("identity_type", "rackspace") creds_file = os.path.expanduser("~/.rackspace_cloud_credentials") pyrax.set_credential_file(creds_file) imgs = pyrax.images cf = pyrax.cloudfiles print("You will need to select an image to export, and a Container into which " "the exported image will be placed.") images = imgs.list(visibility="private") print() print("Select an image to export:") for pos, image in enumerate(images): print("[%s] %s" % (pos, image.name)) snum = six.moves.input("Enter the number of the image you want to share: ") if not snum: exit() try: num = int(snum) except ValueError: print("'%s' is not a valid number." % snum) exit() if not 0 <= num < len(images): print("'%s' is not a valid image number." % snum) exit() image = images[num] conts = cf.list() print() print("Select the target container to place the exported image:") for pos, cont in enumerate(conts): print("[%s] %s" % (pos, cont.name)) snum = six.moves.input("Enter the number of the container: ") if not snum: exit() try: num = int(snum) except ValueError: print("'%s' is not a valid number." % snum) exit() if not 0 <= num < len(conts): print("'%s' is not a valid container number." % snum) exit() cont = conts[num] task = imgs.export_task(image, cont) print("Task ID=%s" % task.id) print() answer = six.moves.input("Do you want to track the task until completion? This " "may take several minutes. [y/N]: ") if answer and answer[0].lower() == "y": pyrax.utils.wait_until(task, "status", ["success", "failure"], verbose=True, interval=30)
apache-2.0
tafaRU/account-financial-tools
__unported__/account_move_template/wizard/select_template.py
7
10189
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>) # Copyright (C) 2011 Domsense srl (<http://www.domsense.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm import time from openerp.tools.translate import _ class wizard_select_template(orm.TransientModel): _name = "wizard.select.move.template" _columns = { 'template_id': fields.many2one( 'account.move.template', 'Move Template', required=True ), 'partner_id': fields.many2one('res.partner', 'Partner'), 'line_ids': fields.one2many( 'wizard.select.move.template.line', 'template_id', 'Lines' ), 'state': fields.selection( [ ('template_selected', 'Template selected'), ], 'State' ), } def on_change_template_id(self, cr, uid, ids, template_id): res = {} if template_id: res['value'] = {'line_ids': []} template_pool = self.pool.get('account.move.template') template = template_pool.browse(cr, uid, template_id) for line in template.template_line_ids: if line.type == 'input': res['value']['line_ids'].append({ 'sequence': line.sequence, 'name': line.name, 'account_id': line.account_id.id, 'move_line_type': line.move_line_type, }) return res def load_lines(self, cr, uid, ids, context=None): wizard = self.browse(cr, uid, ids, context=context)[0] template_pool = self.pool.get('account.move.template') wizard_line_pool = self.pool.get('wizard.select.move.template.line') model_data_obj = self.pool.get('ir.model.data') template = template_pool.browse(cr, uid, wizard.template_id.id) for line in template.template_line_ids: if line.type == 'input': wizard_line_pool.create(cr, uid, { 'template_id': wizard.id, 'sequence': line.sequence, 'name': line.name, 'amount': 0.0, 'account_id': line.account_id.id, 'move_line_type': line.move_line_type, }) if not wizard.line_ids: return self.load_template(cr, uid, ids) wizard.write({'state': 'template_selected'}) view_rec = model_data_obj.get_object_reference( cr, uid, 'account_move_template', 'wizard_select_template') view_id = view_rec and view_rec[1] or False return { 'view_type': 'form', 'view_id': [view_id], 'view_mode': 'form', 'res_model': 'wizard.select.move.template', 'res_id': wizard.id, 'type': 'ir.actions.act_window', 'target': 'new', 'context': context, } def load_template(self, cr, uid, ids, context=None): template_obj = self.pool.get('account.move.template') account_period_obj = self.pool.get('account.period') wizard = self.browse(cr, uid, ids, context=context)[0] if not template_obj.check_zero_lines(cr, uid, wizard): raise orm.except_orm( _('Error !'), _('At least one amount has to be non-zero!') ) input_lines = {} for template_line in wizard.line_ids: input_lines[template_line.sequence] = template_line.amount period_id = account_period_obj.find(cr, uid, context=context) if not period_id: raise orm.except_orm( _('No period found !'), _('Unable to find a valid period !') ) period_id = period_id[0] computed_lines = template_obj.compute_lines( cr, uid, wizard.template_id.id, input_lines) moves = {} for line in wizard.template_id.template_line_ids: if line.journal_id.id not in moves: moves[line.journal_id.id] = self._make_move( cr, uid, wizard.template_id.name, period_id, line.journal_id.id, wizard.partner_id.id ) self._make_move_line( cr, uid, line, computed_lines, moves[line.journal_id.id], period_id, wizard.partner_id.id ) if wizard.template_id.cross_journals: trans_account_id = wizard.template_id.transitory_acc_id.id self._make_transitory_move_line( cr, uid, line, computed_lines, moves[line.journal_id.id], period_id, trans_account_id, wizard.partner_id.id ) return { 'domain': "[('id','in', " + str(moves.values()) + ")]", 'name': 'Entries', 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'account.move', 'type': 'ir.actions.act_window', 'target': 'current', } def _make_move(self, cr, uid, ref, period_id, journal_id, partner_id): account_move_obj = self.pool.get('account.move') move_id = account_move_obj.create(cr, uid, { 'ref': ref, 'period_id': period_id, 'journal_id': journal_id, 'partner_id': partner_id, }) return move_id def _make_move_line(self, cr, uid, line, computed_lines, move_id, period_id, partner_id): account_move_line_obj = self.pool.get('account.move.line') analytic_account_id = False if line.analytic_account_id: if not line.journal_id.analytic_journal_id: raise orm.except_orm( _('No Analytic Journal !'), _("You have to define an analytic " "journal on the '%s' journal!") % (line.journal_id.name,) ) analytic_account_id = line.analytic_account_id.id val = { 'name': line.name, 'move_id': move_id, 'journal_id': line.journal_id.id, 'period_id': period_id, 'analytic_account_id': analytic_account_id, 'account_id': line.account_id.id, 'date': time.strftime('%Y-%m-%d'), 'account_tax_id': line.account_tax_id.id, 'credit': 0.0, 'debit': 0.0, 'partner_id': partner_id, } if line.move_line_type == 'cr': val['credit'] = computed_lines[line.sequence] if line.move_line_type == 'dr': val['debit'] = computed_lines[line.sequence] id_line = account_move_line_obj.create(cr, uid, val) return id_line def _make_transitory_move_line(self, cr, uid, line, computed_lines, move_id, period_id, trans_account_id, partner_id): account_move_line_obj = self.pool.get('account.move.line') analytic_account_id = False if line.analytic_account_id: if not line.journal_id.analytic_journal_id: raise orm.except_orm( _('No Analytic Journal !'), _("You have to define an analytic journal " "on the '%s' journal!") % (line.template_id.journal_id.name,) ) analytic_account_id = line.analytic_account_id.id val = { 'name': 'transitory', 'move_id': move_id, 'journal_id': line.journal_id.id, 'period_id': period_id, 'analytic_account_id': analytic_account_id, 'account_id': trans_account_id, 'date': time.strftime('%Y-%m-%d'), 'partner_id': partner_id, } if line.move_line_type != 'cr': val['credit'] = computed_lines[line.sequence] if line.move_line_type != 'dr': val['debit'] = computed_lines[line.sequence] id_line = account_move_line_obj.create(cr, uid, val) return id_line class wizard_select_template_line(orm.TransientModel): _description = 'Template Lines' _name = "wizard.select.move.template.line" _columns = { 'template_id': fields.many2one('wizard.select.move.template', 'Template'), 'sequence': fields.integer('Number', required=True), 'name': fields.char('Name', size=64, required=True, readonly=True), 'account_id': fields.many2one( 'account.account', 'Account', required=True, readonly=True ), 'move_line_type': fields.selection( [('cr', 'Credit'), ('dr', 'Debit')], 'Move Line Type', required=True, readonly=True ), 'amount': fields.float('Amount', required=True), }
agpl-3.0
aganzha/tweepy
tweepy/streaming.py
1
16268
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. # Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets from __future__ import absolute_import, print_function import logging import requests from requests.exceptions import Timeout from threading import Thread from time import sleep import six import ssl from tweepy.models import Status from tweepy.api import API from tweepy.error import TweepError from tweepy.utils import import_simplejson json = import_simplejson() STREAM_VERSION = '1.1' class StreamListener(object): def __init__(self, api=None): self.api = api or API() def on_connect(self): """Called once connected to streaming server. This will be invoked once a successful response is received from the server. Allows the listener to perform some work prior to entering the read loop. """ pass def on_data(self, raw_data): """Called when raw data is received from connection. Override this method if you wish to manually handle the stream data. Return False to stop stream and close connection. """ data = json.loads(raw_data) if 'in_reply_to_status_id' in data: status = Status.parse(self.api, data) if self.on_status(status) is False: return False elif 'delete' in data: delete = data['delete']['status'] if self.on_delete(delete['id'], delete['user_id']) is False: return False elif 'event' in data: status = Status.parse(self.api, data) if self.on_event(status) is False: return False elif 'direct_message' in data: status = Status.parse(self.api, data) if self.on_direct_message(status) is False: return False elif 'friends' in data: if self.on_friends(data['friends']) is False: return False elif 'limit' in data: if self.on_limit(data['limit']['track']) is False: return False elif 'disconnect' in data: if self.on_disconnect(data['disconnect']) is False: return False elif 'warning' in data: if self.on_warning(data['warning']) is False: return False else: logging.error("Unknown message type: " + str(raw_data)) def keep_alive(self): """Called when a keep-alive arrived""" return def on_status(self, status): """Called when a new status arrives""" return def on_exception(self, exception): """Called when an unhandled exception occurs.""" return def on_delete(self, status_id, user_id): """Called when a delete notice arrives for a status""" return def on_event(self, status): """Called when a new event arrives""" return def on_direct_message(self, status): """Called when a new direct message arrives""" return def on_friends(self, friends): """Called when a friends list arrives. friends is a list that contains user_id """ return def on_limit(self, track): """Called when a limitation notice arrives""" return def on_error(self, status_code): """Called when a non-200 status code is returned""" return False def on_timeout(self): """Called when stream connection times out""" return def on_disconnect(self, notice): """Called when twitter sends a disconnect notice Disconnect codes are listed here: https://dev.twitter.com/docs/streaming-apis/messages#Disconnect_messages_disconnect """ return def on_warning(self, notice): """Called when a disconnection warning message arrives""" return class ReadBuffer(object): """Buffer data from the response in a smarter way than httplib/requests can. Tweets are roughly in the 2-12kb range, averaging around 3kb. Requests/urllib3/httplib/socket all use socket.read, which blocks until enough data is returned. On some systems (eg google appengine), socket reads are quite slow. To combat this latency we can read big chunks, but the blocking part means we won't get results until enough tweets have arrived. That may not be a big deal for high throughput systems. For low throughput systems we don't want to sacrafice latency, so we use small chunks so it can read the length and the tweet in 2 read calls. """ def __init__(self, stream, chunk_size): self._stream = stream self._buffer = '' self._chunk_size = chunk_size def read_len(self, length): while not self._stream.closed: if len(self._buffer) >= length: return self._pop(length) read_len = max(self._chunk_size, length - len(self._buffer)) self._buffer += self._stream.read(read_len) def read_line(self, sep='\n'): start = 0 while not self._stream.closed: loc = self._buffer.find(sep, start) if loc >= 0: return self._pop(loc + len(sep)) else: start = len(self._buffer) self._buffer += self._stream.read(self._chunk_size) def _pop(self, length): r = self._buffer[:length] self._buffer = self._buffer[length:] return r class Stream(object): host = 'stream.twitter.com' def __init__(self, auth, listener, **options): self.auth = auth self.listener = listener self.running = False self.timeout = options.get("timeout", 300.0) self.retry_count = options.get("retry_count") # values according to # https://dev.twitter.com/docs/streaming-apis/connecting#Reconnecting self.retry_time_start = options.get("retry_time", 5.0) self.retry_420_start = options.get("retry_420", 60.0) self.retry_time_cap = options.get("retry_time_cap", 320.0) self.snooze_time_step = options.get("snooze_time", 0.25) self.snooze_time_cap = options.get("snooze_time_cap", 16) # The default socket.read size. Default to less than half the size of # a tweet so that it reads tweets with the minimal latency of 2 reads # per tweet. Values higher than ~1kb will increase latency by waiting # for more data to arrive but may also increase throughput by doing # fewer socket read calls. self.chunk_size = options.get("chunk_size", 512) self.verify = options.get("verify", True) self.api = API() self.headers = options.get("headers") or {} self.new_session() self.body = None self.retry_time = self.retry_time_start self.snooze_time = self.snooze_time_step def new_session(self): self.session = requests.Session() self.session.headers = self.headers self.session.params = None def _run(self): # Authenticate url = "https://%s%s" % (self.host, self.url) # Connect and process the stream error_counter = 0 resp = None exception = None while self.running: if self.retry_count is not None: if error_counter > self.retry_count: # quit if error count greater than retry count break try: auth = self.auth.apply_auth() resp = self.session.request('POST', url, data=self.body, timeout=self.timeout, stream=True, auth=auth, verify=self.verify) if resp.status_code != 200: if self.listener.on_error(resp.status_code) is False: break error_counter += 1 if resp.status_code == 420: self.retry_time = max(self.retry_420_start, self.retry_time) sleep(self.retry_time) self.retry_time = min(self.retry_time * 2, self.retry_time_cap) else: error_counter = 0 self.retry_time = self.retry_time_start self.snooze_time = self.snooze_time_step self.listener.on_connect() self._read_loop(resp) except (Timeout, ssl.SSLError) as exc: # This is still necessary, as a SSLError can actually be # thrown when using Requests # If it's not time out treat it like any other exception if isinstance(exc, ssl.SSLError): if not (exc.args and 'timed out' in str(exc.args[0])): exception = exc break if self.listener.on_timeout() is False: break if self.running is False: break sleep(self.snooze_time) self.snooze_time = min(self.snooze_time + self.snooze_time_step, self.snooze_time_cap) except Exception as exc: exception = exc # any other exception is fatal, so kill loop break # cleanup self.running = False if resp: resp.close() self.new_session() if exception: # call a handler first so that the exception can be logged. self.listener.on_exception(exception) raise def _data(self, data): if self.listener.on_data(data) is False: self.running = False def _read_loop(self, resp): buf = ReadBuffer(resp.raw, self.chunk_size) while self.running and not resp.raw.closed: length = 0 while not resp.raw.closed: line = buf.read_line().strip() if not line: self.listener.keep_alive() # keep-alive new lines are expected elif line.isdigit(): length = int(line) break else: raise TweepError('Expecting length, unexpected value found') next_status_obj = buf.read_len(length) if self.running: self._data(next_status_obj) # # Note: keep-alive newlines might be inserted before each length value. # # read until we get a digit... # c = b'\n' # for c in resp.iter_content(decode_unicode=True): # if c == b'\n': # continue # break # # delimited_string = c # # # read rest of delimiter length.. # d = b'' # for d in resp.iter_content(decode_unicode=True): # if d != b'\n': # delimited_string += d # continue # break # # # read the next twitter status object # if delimited_string.decode('utf-8').strip().isdigit(): # status_id = int(delimited_string) # next_status_obj = resp.raw.read(status_id) # if self.running: # self._data(next_status_obj.decode('utf-8')) if resp.raw.closed: self.on_closed(resp) def _start(self, async): self.running = True if async: self._thread = Thread(target=self._run) self._thread.start() else: self._run() def on_closed(self, resp): """ Called when the response has been closed by Twitter """ pass def userstream(self, stall_warnings=False, _with=None, replies=None, track=None, locations=None, async=False, encoding='utf8'): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/user.json' % STREAM_VERSION self.host = 'userstream.twitter.com' if stall_warnings: self.session.params['stall_warnings'] = stall_warnings if _with: self.session.params['with'] = _with if replies: self.session.params['replies'] = replies if locations and len(locations) > 0: if len(locations) % 4 != 0: raise TweepError("Wrong number of locations points, " "it has to be a multiple of 4") self.session.params['locations'] = ','.join(['%.2f' % l for l in locations]) if track: self.session.params['track'] = u','.join(track).encode(encoding) self._start(async) def firehose(self, count=None, async=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/firehose.json' % STREAM_VERSION if count: self.url += '&count=%s' % count self._start(async) def retweet(self, async=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/retweet.json' % STREAM_VERSION self._start(async) def sample(self, async=False, languages=None): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/sample.json' % STREAM_VERSION if languages: self.session.params['language'] = ','.join(map(str, languages)) self._start(async) def filter(self, follow=None, track=None, async=False, locations=None, stall_warnings=False, languages=None, encoding='utf8'): self.body = {} self.session.headers['Content-type'] = "application/x-www-form-urlencoded" if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/filter.json' % STREAM_VERSION if follow: self.body['follow'] = u','.join(follow).encode(encoding) if track: self.body['track'] = u','.join(track).encode(encoding) if locations and len(locations) > 0: if len(locations) % 4 != 0: raise TweepError("Wrong number of locations points, " "it has to be a multiple of 4") self.body['locations'] = u','.join(['%.4f' % l for l in locations]) if stall_warnings: self.body['stall_warnings'] = stall_warnings if languages: self.body['language'] = u','.join(map(str, languages)) self.session.params = {'delimited': 'length'} self.host = 'stream.twitter.com' self._start(async) def sitestream(self, follow, stall_warnings=False, with_='user', replies=False, async=False): self.body = {} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/site.json' % STREAM_VERSION self.body['follow'] = u','.join(map(six.text_type, follow)) self.body['delimited'] = 'length' if stall_warnings: self.body['stall_warnings'] = stall_warnings if with_: self.body['with'] = with_ if replies: self.body['replies'] = replies self._start(async) def disconnect(self): if self.running is False: return self.running = False
mit
rosarior/rua
rua/apps/permissions/migrations/0001_initial.py
4
5607
# encoding: utf-8 from south.db import db from south.v2 import SchemaMigration class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Permission' db.create_table('permissions_permission', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('namespace', self.gf('django.db.models.fields.CharField')(max_length=64)), ('name', self.gf('django.db.models.fields.CharField')(max_length=64)), ('label', self.gf('django.db.models.fields.CharField')(max_length=96)), )) db.send_create_signal('permissions', ['Permission']) # Adding unique constraint on 'Permission', fields ['namespace', 'name'] db.create_unique('permissions_permission', ['namespace', 'name']) # Adding model 'PermissionHolder' db.create_table('permissions_permissionholder', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('permission', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['permissions.Permission'])), ('holder_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='permission_holder', to=orm['contenttypes.ContentType'])), ('holder_id', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal('permissions', ['PermissionHolder']) # Adding model 'Role' db.create_table('permissions_role', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)), ('label', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)), )) db.send_create_signal('permissions', ['Role']) # Adding model 'RoleMember' db.create_table('permissions_rolemember', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['permissions.Role'])), ('member_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='role_member', to=orm['contenttypes.ContentType'])), ('member_id', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal('permissions', ['RoleMember']) def backwards(self, orm): # Removing unique constraint on 'Permission', fields ['namespace', 'name'] db.delete_unique('permissions_permission', ['namespace', 'name']) # Deleting model 'Permission' db.delete_table('permissions_permission') # Deleting model 'PermissionHolder' db.delete_table('permissions_permissionholder') # Deleting model 'Role' db.delete_table('permissions_role') # Deleting model 'RoleMember' db.delete_table('permissions_rolemember') models = { 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'permissions.permission': { 'Meta': {'ordering': "('namespace', 'label')", 'unique_together': "(('namespace', 'name'),)", 'object_name': 'Permission'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '96'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'namespace': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'permissions.permissionholder': { 'Meta': {'object_name': 'PermissionHolder'}, 'holder_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'holder_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permission_holder'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['permissions.Permission']"}) }, 'permissions.role': { 'Meta': {'ordering': "('label',)", 'object_name': 'Role'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}) }, 'permissions.rolemember': { 'Meta': {'object_name': 'RoleMember'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'member_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'member_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'role_member'", 'to': "orm['contenttypes.ContentType']"}), 'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['permissions.Role']"}) } } complete_apps = ['permissions']
gpl-3.0
s20121035/rk3288_android5.1_repo
external/chromium_org/tools/telemetry/telemetry/core/backends/chrome/tab_list_backend.py
46
2790
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import urllib2 from telemetry.core import tab from telemetry.core import util from telemetry.core.backends.chrome import inspector_backend_list class TabListBackend(inspector_backend_list.InspectorBackendList): """A dynamic sequence of tab.Tabs in UI order.""" def __init__(self, browser_backend): super(TabListBackend, self).__init__(browser_backend, backend_wrapper=tab.Tab) def New(self, timeout): assert self._browser_backend.supports_tab_control self._browser_backend.Request('new', timeout=timeout) return self[-1] def CloseTab(self, debugger_url, timeout=None): assert self._browser_backend.supports_tab_control tab_id = inspector_backend_list.DebuggerUrlToId(debugger_url) # TODO(dtu): crbug.com/160946, allow closing the last tab on some platforms. # For now, just create a new tab before closing the last tab. if len(self) <= 1: self.New(timeout) try: response = self._browser_backend.Request('close/%s' % tab_id, timeout=timeout, throw_network_exception=True) except urllib2.HTTPError: raise Exception('Unable to close tab, tab id not found: %s' % tab_id) assert response == 'Target is closing' util.WaitFor(lambda: tab_id not in self, timeout=5) def ActivateTab(self, debugger_url, timeout=None): assert self._browser_backend.supports_tab_control tab_id = inspector_backend_list.DebuggerUrlToId(debugger_url) assert tab_id in self try: response = self._browser_backend.Request('activate/%s' % tab_id, timeout=timeout, throw_network_exception=True) except urllib2.HTTPError: raise Exception('Unable to activate tab, tab id not found: %s' % tab_id) assert response == 'Target activated' def GetTabUrl(self, debugger_url): tab_id = inspector_backend_list.DebuggerUrlToId(debugger_url) tab_info = self.GetContextInfo(tab_id) assert tab_info is not None return tab_info['url'] def Get(self, index, ret): """Returns self[index] if it exists, or ret if index is out of bounds.""" if len(self) <= index: return ret return self[index] def ShouldIncludeContext(self, context): if 'type' in context: return context['type'] == 'page' # TODO: For compatibility with Chrome before r177683. # This check is not completely correct, see crbug.com/190592. return not context['url'].startswith('chrome-extension://')
gpl-3.0
jetty840/ReplicatorG
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/analyze_plugins/skeiniso.py
6
43737
""" This page is in the table of contents. Skeiniso is an analysis script to display a gcode file in an isometric view. The skeiniso manual page is at: http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Skeiniso ==Operation== The default 'Activate Skeiniso' checkbox is on. When it is on, the functions described below will work when called from the skeinforge toolchain, when it is off, the functions will not be called from the toolchain. The functions will still be called, whether or not the 'Activate Skeiniso' checkbox is on, when skeiniso is run directly. Skeiniso can not separate the layers when it reads gcode without comments. The viewer is simple, the viewpoint can only be moved in a sphere around the center of the model by changing the viewpoint latitude and longitude. Different regions of the model can be hidden by setting the width of the thread to zero. The alternating bands act as contour bands and their brightness and width can be changed. ==Settings== ===Animation=== ====Animation Line Quickening==== Default is one. The quickness of the tool animation over the quickness of the actual tool. ====Animation Slide Show Rate==== Default is two layers per second. The rate, in layers per second, at which the layer changes when the soar or dive button is pressed.. ===Axis Rulings=== Default is on. When selected, rulings will be drawn on the axis lines. ===Banding=== ====Band Height==== Default is five layers. Defines the height of the band in layers, a pair of bands is twice that height. ====Bottom Band Brightness==== Default is 0.7. Defines the ratio of the brightness of the bottom band over the brightness of the top band. The higher it is the brighter the bottom band will be. ====Bottom Layer Brightness==== Default is one. Defines the ratio of the brightness of the bottom layer over the brightness of the top layer. With a low bottom layer brightness ratio the bottom of the model will be darker than the top of the model, as if it was being illuminated by a light just above the top. ====Bright Band Start==== Default choice is 'From the Top'. The button group that determines where the bright band starts from. =====From the Bottom===== When selected, the bright bands will start from the bottom. =====From the Top===== When selected, the bright bands will start from the top. ===Draw Arrows=== Default is on. When selected, arrows will be drawn at the end of each line segment. ===Export Menu=== When the submenu in the export menu item in the file menu is clicked, an export canvas dialog will be displayed, which can export the canvas to a file. ===Go Around Extruder Off Travel=== Default is off. When selected, the display will include the travel when the extruder is off, which means it will include the nozzle wipe path if any. ===Layers=== ====Layer==== Default is zero. On the display window, the Up button increases the 'Layer' by one, and the Down button decreases the layer by one. When the layer displayed in the layer spin box is changed then <Return> is hit, the layer shown will be set to the spin box, to a mimimum of zero and to a maximum of the highest index layer.The Soar button increases the layer at the 'Animation Slide Show Rate', and the Dive (double left arrow button beside the layer field) button decreases the layer at the slide show rate. ====Layer Extra Span==== Default is a huge number. The viewer will draw the layers in the range including the 'Layer' index and the 'Layer' index plus the 'Layer Extra Span'. If the 'Layer Extra Span' is negative, the layers viewed will start at the 'Layer' index, plus the 'Layer Extra Span', and go up to and include the 'Layer' index. If the 'Layer Extra Span' is zero, only the 'Layer' index layer will be displayed. If the 'Layer Extra Span' is positive, the layers viewed will start at the 'Layer' index, and go up to and include the 'Layer' index plus the 'Layer Extra Span'. ===Line=== Default is zero. The index of the selected line on the layer that is highlighted when the 'Display Line' mouse tool is chosen. The line spin box up button increases the 'Line' by one. If the line index of the layer goes over the index of the last line, the layer index will be increased by one and the new line index will be zero. The down button decreases the line index by one. If the line index goes below the index of the first line, the layer index will be decreased by one and the new line index will be at the last line. When the line displayed in the line field is changed then <Return> is hit, the line shown will be set to the line field, to a mimimum of zero and to a maximum of the highest index line. The Soar button increases the line at the speed at which the extruder would move, times the 'Animation Line Quickening' ratio, and the Dive (double left arrow button beside the line field) button decreases the line at the animation line quickening ratio. ===Mouse Mode=== Default is 'Display Line'. The mouse tool can be changed from the 'Mouse Mode' menu button or picture button. The mouse tools listen to the arrow keys when the canvas has the focus. Clicking in the canvas gives the canvas the focus, and when the canvas has the focus a thick black border is drawn around the canvas. ====Display Line==== The 'Display Line' tool will display the highlight the selected line, and display the file line count, counting from one, and the gcode line itself. When the 'Display Line' tool is active, clicking the canvas will select the nearest line to the mouse click. ====Viewpoint Move==== The 'Viewpoint Move' tool will move the viewpoint in the xy plane when the mouse is clicked and dragged on the canvas. ====Viewpoint Rotate==== The 'Viewpoint Rotate' tool will rotate the viewpoint around the origin, when the mouse is clicked and dragged on the canvas, or the arrow keys have been used and <Return> is pressed. The viewpoint can also be moved by dragging the mouse. The viewpoint latitude will be increased when the mouse is dragged from the center towards the edge. The viewpoint longitude will be changed by the amount around the center the mouse is dragged. This is not very intuitive, but I don't know how to do this the intuitive way and I have other stuff to develop. If the shift key is pressed; if the latitude is changed more than the longitude, only the latitude will be changed, if the longitude is changed more only the longitude will be changed. ===Number of Fill Layers=== ====Number of Fill Bottom Layers==== Default is one. The "Number of Fill Bottom Layers" is the number of layers at the bottom which will be colored olive. ===Number of Fill Top Layers=== Default is one. The "Number of Fill Top Layers" is the number of layers at the top which will be colored blue. ===Scale=== Default is ten. The scale setting is the scale of the image in pixels per millimeter, the higher the number, the greater the size of the display. The zoom in mouse tool will zoom in the display at the point where the mouse was clicked, increasing the scale by a factor of two. The zoom out tool will zoom out the display at the point where the mouse was clicked, decreasing the scale by a factor of two. ===Screen Inset=== ====Screen Horizontal Inset==== Default is one hundred. The "Screen Horizontal Inset" determines how much the canvas will be inset in the horizontal direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be. ====Screen Vertical Inset==== Default is two hundred and twenty. The "Screen Vertical Inset" determines how much the canvas will be inset in the vertical direction from the edge of screen, the higher the number the more it will be inset and the smaller it will be.. ===Viewpoint=== ====Viewpoint Latitude==== Default is fifteen degrees. The "Viewpoint Latitude" is the latitude of the viewpoint, a latitude of zero is the top pole giving a top view, a latitude of ninety gives a side view and a latitude of 180 gives a bottom view. ====Viewpoint Longitude==== Default is 210 degrees. The "Viewpoint Longitude" is the longitude of the viewpoint. ===Width=== The width of each type of thread and of each axis can be changed. If the width is set to zero, the thread will not be visible. ====Width of Axis Negative Side==== Default is two. Defines the width of the negative side of the axis. ====Width of Axis Positive Side==== Default is six. Defines the width of the positive side of the axis. ====Width of Infill Thread==== Default is one. The "Width of Infill Thread" sets the width of the green extrusion threads, those threads which are not loops and not part of the raft. ====Width of Fill Bottom Thread==== Default is two. The "Width of Fill Bottom Thread" sets the width of the olive extrusion threads at the bottom of the model. ====Width of Fill Top Thread==== Default is two. The "Width of Fill Top Thread" sets the width of the blue extrusion threads at the top of the model. ====Width of Loop Thread==== Default is three. The "Width of Loop Thread" sets the width of the yellow loop threads, which are not perimeters. ====Width of Perimeter Inside Thread==== Default is eight. The "Width of Perimeter Inside Thread" sets the width of the orange inside perimeter threads. ====Width of Perimeter Outside Thread==== Default is eight. The "Width of Perimeter Outside Thread" sets the width of the red outside perimeter threads. ====Width of Raft Thread==== Default is one. The "Width of Raft Thread" sets the width of the brown raft threads. ====Width of Selection Thread==== Default is six. The "Width of Selection Thread" sets the width of the selected line. ====Width of Travel Thread==== Default is zero. The "Width of Travel Thread" sets the width of the grey extruder off travel threads. ==Icons== The dive, soar and zoom icons are from Mark James' soarSilk icon set 1.3 at: http://www.famfamfam.com/lab/icons/silk/ ==Gcodes== An explanation of the gcodes is at: http://reprap.org/bin/view/Main/Arduino_GCode_Interpreter and at: http://reprap.org/bin/view/Main/MCodeReference A gode example is at: http://forums.reprap.org/file.php?12,file=565 ==Examples== Below are examples of skeiniso being used. These examples are run in a terminal in the folder which contains Screw Holder_penultimate.gcode and skeiniso.py. > python skeiniso.py This brings up the skeiniso dialog. > python skeiniso.py Screw Holder_penultimate.gcode This brings up the skeiniso viewer to view the gcode file. > python Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31) [GCC 4.2.1 (SUSE Linux)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import skeiniso >>> skeiniso.main() This brings up the skeiniso dialog. >>> skeiniso.getWindowAnalyzeFile('Screw Holder_penultimate.gcode') This brings up the skeiniso viewer to view the gcode file. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import display_line from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import tableau from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import view_move from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import view_rotate from skeinforge_application.skeinforge_utilities import skeinforge_polyfile import math import sys __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GPL 3.0' def compareLayerSequence( first, second ): "Get comparison in order to sort skein panes in ascending order of layer zone index then sequence index." if first.layerZoneIndex < second.layerZoneIndex: return - 1 if first.layerZoneIndex > second.layerZoneIndex: return 1 if first.sequenceIndex < second.sequenceIndex: return - 1 return int( first.sequenceIndex > second.sequenceIndex ) def getNewRepository(): "Get the repository constructor." return SkeinisoRepository() def getWindowAnalyzeFile(fileName): "Skeiniso a gcode file." gcodeText = archive.getFileText(fileName) return getWindowAnalyzeFileGivenText(fileName, gcodeText) def getWindowAnalyzeFileGivenText( fileName, gcodeText, repository=None): "Display a skeiniso gcode file for a gcode file." if gcodeText == '': return None if repository == None: repository = settings.getReadRepository( SkeinisoRepository() ) skeinWindow = getWindowGivenTextRepository( fileName, gcodeText, repository ) skeinWindow.updateDeiconify() return skeinWindow def getWindowGivenTextRepository( fileName, gcodeText, repository ): "Display the gcode text in a skeiniso viewer." skein = SkeinisoSkein() skein.parseGcode( fileName, gcodeText, repository ) return SkeinWindow( repository, skein ) def writeOutput( fileName, fileNameSuffix, gcodeText = ''): "Write a skeinisoed gcode file for a skeinforge gcode file, if 'Activate Skeiniso' is selected." repository = settings.getReadRepository( SkeinisoRepository() ) if repository.activateSkeiniso.value: gcodeText = archive.getTextIfEmpty( fileNameSuffix, gcodeText ) getWindowAnalyzeFileGivenText( fileNameSuffix, gcodeText, repository ) class SkeinisoRepository( tableau.TableauRepository ): "A class to handle the skeiniso settings." def __init__(self): "Set the default settings, execute title & settings fileName." settings.addListsToRepository('skeinforge_application.skeinforge_plugins.analyze_plugins.skeiniso.html', None, self ) self.baseNameSynonym = 'behold.csv' self.fileNameInput = settings.FileNameInput().getFromFileName( [ ('Gcode text files', '*.gcode') ], 'Open File for Skeiniso', self, '') self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Skeiniso') self.activateSkeiniso = settings.BooleanSetting().getFromValue('Activate Skeiniso', self, True ) self.addAnimation() self.axisRulings = settings.BooleanSetting().getFromValue('Axis Rulings', self, True ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Banding -', self ) self.bandHeight = settings.IntSpinUpdate().getFromValue( 0, 'Band Height (layers):', self, 10, 5 ) self.bottomBandBrightness = settings.FloatSpinUpdate().getFromValue( 0.0, 'Bottom Band Brightness (ratio):', self, 1.0, 0.7 ) self.bottomLayerBrightness = settings.FloatSpinUpdate().getFromValue( 0.0, 'Bottom Layer Brightness (ratio):', self, 1.0, 1.0 ) self.brightBandStart = settings.MenuButtonDisplay().getFromName('Bright Band Start:', self ) self.fromTheBottom = settings.MenuRadio().getFromMenuButtonDisplay( self.brightBandStart, 'From the Bottom', self, False ) self.fromTheTop = settings.MenuRadio().getFromMenuButtonDisplay( self.brightBandStart, 'From the Top', self, True ) settings.LabelSeparator().getFromRepository(self) self.drawArrows = settings.BooleanSetting().getFromValue('Draw Arrows', self, False ) self.goAroundExtruderOffTravel = settings.BooleanSetting().getFromValue('Go Around Extruder Off Travel', self, False ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Layers -', self ) self.layer = settings.IntSpinNotOnMenu().getSingleIncrementFromValue( 0, 'Layer (index):', self, 912345678, 0 ) self.layerExtraSpan = settings.IntSpinUpdate().getSingleIncrementFromValue( - 912345678, 'Layer Extra Span (integer):', self, 912345678, 912345678 ) settings.LabelSeparator().getFromRepository(self) self.line = settings.IntSpinNotOnMenu().getSingleIncrementFromValue( 0, 'Line (index):', self, 912345678, 0 ) self.mouseMode = settings.MenuButtonDisplay().getFromName('Mouse Mode:', self ) self.displayLine = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'Display Line', self, True ) self.viewMove = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'View Move', self, False ) self.viewRotate = settings.MenuRadio().getFromMenuButtonDisplay( self.mouseMode, 'View Rotate', self, False ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Number of Fill Layers -', self ) self.numberOfFillBottomLayers = settings.IntSpinUpdate().getFromValue( 0, 'Number of Fill Bottom Layers (integer):', self, 5, 1 ) self.numberOfFillTopLayers = settings.IntSpinUpdate().getFromValue( 0, 'Number of Fill Top Layers (integer):', self, 5, 1 ) settings.LabelSeparator().getFromRepository(self) self.addScaleScreenSlide() settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Viewpoint -', self ) self.viewpointLatitude = settings.FloatSpin().getFromValue( 0.0, 'Viewpoint Latitude (degrees):', self, 180.0, 15.0 ) self.viewpointLongitude = settings.FloatSpin().getFromValue( 0.0, 'Viewpoint Longitude (degrees):', self, 360.0, 210.0 ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Width -', self ) self.widthOfAxisNegativeSide = settings.IntSpinUpdate().getFromValue( 0, 'Width of Axis Negative Side (pixels):', self, 10, 2 ) self.widthOfAxisPositiveSide = settings.IntSpinUpdate().getFromValue( 0, 'Width of Axis Positive Side (pixels):', self, 10, 6 ) self.widthOfFillBottomThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Fill Bottom Thread (pixels):', self, 10, 2 ) self.widthOfFillTopThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Fill Top Thread (pixels):', self, 10, 2 ) self.widthOfInfillThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Infill Thread (pixels):', self, 10, 1 ) self.widthOfLoopThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Loop Thread (pixels):', self, 10, 2 ) self.widthOfPerimeterInsideThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Perimeter Inside Thread (pixels):', self, 10, 8 ) self.widthOfPerimeterOutsideThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Perimeter Outside Thread (pixels):', self, 10, 8 ) self.widthOfRaftThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Raft Thread (pixels):', self, 10, 1 ) self.widthOfSelectionThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Selection Thread (pixels):', self, 10, 6 ) self.widthOfTravelThread = settings.IntSpinUpdate().getFromValue( 0, 'Width of Travel Thread (pixels):', self, 10, 0 ) self.executeTitle = 'Skeiniso' def execute(self): "Write button has been clicked." fileNames = skeinforge_polyfile.getFileOrGcodeDirectory( self.fileNameInput.value, self.fileNameInput.wasCancelled ) for fileName in fileNames: getWindowAnalyzeFile(fileName) class SkeinisoSkein: "A class to write a get a scalable vector graphics text for a gcode skein." def __init__(self): self.coloredThread = [] self.feedRateMinute = 960.1 self.hasASurroundingLoopBeenReached = False self.isLoop = False self.isPerimeter = False self.isOuter = False self.isThereALayerStartWord = False self.layerCount = settings.LayerCount() self.layerTops = [] self.oldLayerZoneIndex = 0 self.oldZ = - 999999999999.0 self.skeinPane = None self.skeinPanes = [] self.thirdLayerThickness = 0.133333 def addToPath( self, line, location ): 'Add a point to travel and maybe extrusion.' if self.oldLocation == None: return begin = self.scale * self.oldLocation - self.scaleCenterBottom end = self.scale * location - self.scaleCenterBottom displayString = '%s %s' % ( self.lineIndex + 1, line ) tagString = 'colored_line_index: %s %s' % ( len( self.skeinPane.coloredLines ), len( self.skeinPanes ) - 1 ) coloredLine = tableau.ColoredLine( begin, '', displayString, end, tagString ) coloredLine.z = location.z self.skeinPane.coloredLines.append( coloredLine ) self.coloredThread.append( coloredLine ) def getLayerTop(self): "Get the layer top." if len( self.layerTops ) < 1: return - 9123456789123.9 return self.layerTops[-1] def getLayerZoneIndex( self, z ): "Get the layer zone index." if self.layerTops[ self.oldLayerZoneIndex ] > z: if self.oldLayerZoneIndex == 0: return 0 elif self.layerTops[ self.oldLayerZoneIndex - 1 ] < z: return self.oldLayerZoneIndex for layerTopIndex in xrange( len( self.layerTops ) ): layerTop = self.layerTops[ layerTopIndex ] if layerTop > z: self.oldLayerZoneIndex = layerTopIndex return layerTopIndex self.oldLayerZoneIndex = len( self.layerTops ) - 1 return self.oldLayerZoneIndex def initializeActiveLocation(self): "Set variables to default." self.extruderActive = False self.oldLocation = None def isLayerStart( self, firstWord, splitLine ): "Parse a gcode line and add it to the vector output." if self.isThereALayerStartWord: return firstWord == '(<layer>' if firstWord != 'G1' and firstWord != 'G2' and firstWord != 'G3': return False location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) if location.z - self.oldZ > 0.1: self.oldZ = location.z return True return False def linearCorner( self, splitLine ): "Update the bounding corners." location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) if self.extruderActive or self.goAroundExtruderOffTravel: self.cornerHigh = euclidean.getPointMaximum( self.cornerHigh, location ) self.cornerLow = euclidean.getPointMinimum( self.cornerLow, location ) self.oldLocation = location def linearMove( self, line, location ): "Get statistics for a linear move." if self.skeinPane == None: return self.addToPath( line, location ) def moveColoredThreadToSkeinPane(self): 'Move a colored thread to the skein pane.' if len( self.coloredThread ) <= 0: return layerZoneIndex = self.getLayerZoneIndex( self.coloredThread[0].z ) if not self.extruderActive: self.setColoredThread( ( 190.0, 190.0, 190.0 ), self.skeinPane.travelLines ) #grey return self.skeinPane.layerZoneIndex = layerZoneIndex if self.isPerimeter: if self.isOuter: self.setColoredThread( ( 255.0, 0.0, 0.0 ), self.skeinPane.perimeterOutsideLines ) #red else: self.setColoredThread( ( 255.0, 165.0, 0.0 ), self.skeinPane.perimeterInsideLines ) #orange return if self.isLoop: self.setColoredThread( ( 255.0, 255.0, 0.0 ), self.skeinPane.loopLines ) #yellow return if not self.hasASurroundingLoopBeenReached: self.setColoredThread( ( 165.0, 42.0, 42.0 ), self.skeinPane.raftLines ) #brown return if layerZoneIndex < self.repository.numberOfFillBottomLayers.value: self.setColoredThread( ( 128.0, 128.0, 0.0 ), self.skeinPane.fillBottomLines ) #olive return if layerZoneIndex >= self.firstTopLayer: self.setColoredThread( ( 0.0, 0.0, 255.0 ), self.skeinPane.fillTopLines ) #blue return self.setColoredThread( ( 0.0, 255.0, 0.0 ), self.skeinPane.infillLines ) #green def parseCorner(self, line): "Parse a gcode line and use the location to update the bounding corners." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if firstWord == 'G1': self.linearCorner(splitLine) elif firstWord == 'M101': self.extruderActive = True elif firstWord == 'M103': self.extruderActive = False elif firstWord == '(<layer>': self.layerTopZ = float(splitLine[1]) + self.thirdLayerThickness elif firstWord == '(<layerThickness>': self.thirdLayerThickness = 0.33333333333 * float(splitLine[1]) elif firstWord == '(<surroundingLoop>)': if self.layerTopZ > self.getLayerTop(): self.layerTops.append( self.layerTopZ ) def parseGcode( self, fileName, gcodeText, repository ): "Parse gcode text and store the vector output." self.repository = repository self.fileName = fileName self.gcodeText = gcodeText self.initializeActiveLocation() self.cornerHigh = Vector3(-999999999.0, -999999999.0, -999999999.0) self.cornerLow = Vector3(999999999.0, 999999999.0, 999999999.0) self.goAroundExtruderOffTravel = repository.goAroundExtruderOffTravel.value self.lines = archive.getTextLines(gcodeText) self.isThereALayerStartWord = gcodec.isThereAFirstWord('(<layer>', self.lines, 1 ) self.parseInitialization() for line in self.lines[self.lineIndex :]: self.parseCorner(line) if len( self.layerTops ) > 0: self.layerTops[-1] += 912345678.9 if len( self.layerTops ) > 1: self.oneMinusBrightnessOverTopLayerIndex = ( 1.0 - repository.bottomLayerBrightness.value ) / float( len( self.layerTops ) - 1 ) self.firstTopLayer = len( self.layerTops ) - self.repository.numberOfFillTopLayers.value self.centerComplex = 0.5 * ( self.cornerHigh.dropAxis(2) + self.cornerLow.dropAxis(2) ) self.centerBottom = Vector3( self.centerComplex.real, self.centerComplex.imag, self.cornerLow.z ) self.scale = repository.scale.value self.scaleCenterBottom = self.scale * self.centerBottom self.scaleCornerHigh = self.scale * self.cornerHigh.dropAxis(2) self.scaleCornerLow = self.scale * self.cornerLow.dropAxis(2) print( "The lower left corner of the skeiniso window is at %s, %s" % ( self.cornerLow.x, self.cornerLow.y ) ) print( "The upper right corner of the skeiniso window is at %s, %s" % ( self.cornerHigh.x, self.cornerHigh.y ) ) self.cornerImaginaryTotal = self.cornerHigh.y + self.cornerLow.y margin = complex( 5.0, 5.0 ) self.marginCornerLow = self.scaleCornerLow - margin self.screenSize = margin + 2.0 * ( self.scaleCornerHigh - self.marginCornerLow ) self.initializeActiveLocation() for self.lineIndex in xrange( self.lineIndex, len(self.lines) ): line = self.lines[self.lineIndex] self.parseLine(line) def parseInitialization(self): 'Parse gcode initialization and store the parameters.' for self.lineIndex in xrange(len(self.lines)): line = self.lines[self.lineIndex] splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = gcodec.getFirstWord(splitLine) if firstWord == '(</extruderInitialization>)': return elif firstWord == '(<operatingFeedRatePerSecond>': self.feedRateMinute = 60.0 * float(splitLine[1]) def parseLine(self, line): "Parse a gcode line and add it to the vector output." splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if len(splitLine) < 1: return firstWord = splitLine[0] if self.isLayerStart(firstWord, splitLine): self.layerCount.printProgressIncrement('skeiniso') self.skeinPane = SkeinPane( len( self.skeinPanes ) ) self.skeinPanes.append( self.skeinPane ) if firstWord == 'G1': location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) self.linearMove( line, location ) self.oldLocation = location elif firstWord == 'M101': self.moveColoredThreadToSkeinPane() self.extruderActive = True elif firstWord == 'M103': self.moveColoredThreadToSkeinPane() self.extruderActive = False self.isLoop = False self.isPerimeter = False elif firstWord == '(<loop>': self.isLoop = True elif firstWord == '(</loop>)': self.moveColoredThreadToSkeinPane() self.isLoop = False elif firstWord == '(<perimeter>': self.isPerimeter = True self.isOuter = ( splitLine[1] == 'outer') elif firstWord == '(</perimeter>)': self.moveColoredThreadToSkeinPane() self.isPerimeter = False elif firstWord == '(<surroundingLoop>)': self.hasASurroundingLoopBeenReached = True if firstWord == 'G2' or firstWord == 'G3': relativeLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine) relativeLocation.z = 0.0 location = self.oldLocation + relativeLocation self.linearMove( line, location ) self.oldLocation = location def setColoredLineColor( self, coloredLine, colorTuple ): 'Set the color and stipple of the colored line.' layerZoneIndex = self.getLayerZoneIndex( coloredLine.z ) multiplier = self.repository.bottomLayerBrightness.value if len( self.layerTops ) > 1: multiplier += self.oneMinusBrightnessOverTopLayerIndex * float( layerZoneIndex ) bandIndex = layerZoneIndex / self.repository.bandHeight.value if self.repository.fromTheTop.value: brightZoneIndex = len( self.layerTops ) - 1 - layerZoneIndex bandIndex = brightZoneIndex / self.repository.bandHeight.value + 1 if bandIndex % 2 == 0: multiplier *= self.repository.bottomBandBrightness.value red = settings.getWidthHex( int( colorTuple[0] * multiplier ), 2 ) green = settings.getWidthHex( int( colorTuple[1] * multiplier ), 2 ) blue = settings.getWidthHex( int( colorTuple[2] * multiplier ), 2 ) coloredLine.colorName = '#%s%s%s' % ( red, green, blue ) def setColoredThread( self, colorTuple, lineList ): 'Set the colored thread, then move it to the line list and stipple of the colored line.' for coloredLine in self.coloredThread: self.setColoredLineColor( coloredLine, colorTuple ) lineList += self.coloredThread self.coloredThread = [] class SkeinPane: "A class to hold the colored lines for a layer." def __init__( self, sequenceIndex ): "Create empty line lists." self.coloredLines = [] self.fillBottomLines = [] self.fillTopLines = [] self.index = 0 self.infillLines = [] self.layerZoneIndex = 0 self.loopLines = [] self.perimeterInsideLines = [] self.perimeterOutsideLines = [] self.raftLines = [] self.sequenceIndex = sequenceIndex self.travelLines = [] class Ruling: def __init__( self, modelDistance, roundedRulingText ): "Initialize the ruling." self.modelDistance = modelDistance self.roundedRulingText = roundedRulingText class SkeinWindow( tableau.TableauWindow ): def __init__( self, repository, skein ): "Initialize the skein window." self.arrowshape = ( 24, 30, 9 ) self.addCanvasMenuRootScrollSkein( repository, skein, '_skeiniso', 'Skeiniso') self.center = 0.5 * self.screenSize self.motionStippleName = 'gray75' halfCenter = 0.5 * self.center.real negativeHalfCenter = - halfCenter self.halfCenterModel = halfCenter / skein.scale negativeHalfCenterModel = - self.halfCenterModel roundedHalfCenter = euclidean.getThreeSignificantFigures( self.halfCenterModel ) roundedNegativeHalfCenter = euclidean.getThreeSignificantFigures( negativeHalfCenterModel ) self.negativeAxisLineX = tableau.ColoredLine( Vector3(), 'darkorange', None, Vector3( negativeHalfCenter ), 'X Negative Axis: Origin -> %s,0,0' % roundedNegativeHalfCenter ) self.negativeAxisLineY = tableau.ColoredLine( Vector3(), 'gold', None, Vector3( 0.0, negativeHalfCenter ), 'Y Negative Axis: Origin -> 0,%s,0' % roundedNegativeHalfCenter ) self.negativeAxisLineZ = tableau.ColoredLine( Vector3(), 'skyblue', None, Vector3( 0.0, 0.0, negativeHalfCenter ), 'Z Negative Axis: Origin -> 0,0,%s' % roundedNegativeHalfCenter ) self.positiveAxisLineX = tableau.ColoredLine( Vector3(), 'darkorange', None, Vector3( halfCenter ), 'X Positive Axis: Origin -> %s,0,0' % roundedHalfCenter ) self.positiveAxisLineY = tableau.ColoredLine( Vector3(), 'gold', None, Vector3( 0.0, halfCenter ), 'Y Positive Axis: Origin -> 0,%s,0' % roundedHalfCenter ) self.positiveAxisLineZ = tableau.ColoredLine( Vector3(), 'skyblue', None, Vector3( 0.0, 0.0, halfCenter ), 'Z Positive Axis: Origin -> 0,0,%s' % roundedHalfCenter ) self.repository.axisRulings.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.bandHeight.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.repository.bottomBandBrightness.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.repository.bottomLayerBrightness.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.repository.fromTheBottom.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.repository.fromTheTop.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.setWindowNewMouseTool( display_line.getNewMouseTool, self.repository.displayLine ) self.setWindowNewMouseTool( view_move.getNewMouseTool, self.repository.viewMove ) self.setWindowNewMouseTool( view_rotate.getNewMouseTool, self.repository.viewRotate ) self.repository.numberOfFillBottomLayers.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.repository.numberOfFillTopLayers.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) self.repository.viewpointLatitude.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.viewpointLongitude.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfAxisNegativeSide.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfAxisPositiveSide.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfFillBottomThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfFillTopThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfInfillThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfLoopThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfPerimeterInsideThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfPerimeterOutsideThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.repository.widthOfRaftThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) self.addMouseToolsBind() self.negativeRulings = [] self.positiveRulings = [] for rulingIndex in xrange( 1, int( math.ceil( self.halfCenterModel / self.rulingSeparationWidthMillimeters ) ) ): modelDistance = rulingIndex * self.rulingSeparationWidthMillimeters self.negativeRulings.append( Ruling( modelDistance, self.getRoundedRulingText( 1, - modelDistance ) ) ) self.positiveRulings.append( Ruling( modelDistance, self.getRoundedRulingText( 1, modelDistance ) ) ) self.rulingExtentHalf = 0.5 * self.rulingExtent def drawRuling( self, projectiveSpace, relativeRulingEnd, ruling, tags, viewBegin, viewEnd ): "Draw ruling." alongWay = ruling.modelDistance / self.halfCenterModel oneMinusAlongWay = 1.0 - alongWay alongScreen = alongWay * viewEnd + oneMinusAlongWay * viewBegin alongScreenEnd = alongScreen + relativeRulingEnd self.canvas.create_line( alongScreen.real, alongScreen.imag, alongScreenEnd.real, alongScreenEnd.imag, fill = 'black', tags = tags, width = 2 ) self.canvas.create_text( int( alongScreenEnd.real ) + 3, alongScreenEnd.imag, anchor = settings.Tkinter.W, text = ruling.roundedRulingText ) def drawRulings( self, axisLine, projectiveSpace, rulings ): "Draw rulings for the axis line." if not self.repository.axisRulings.value: return viewBegin = self.getScreenView( axisLine.begin, projectiveSpace ) viewEnd = self.getScreenView( axisLine.end, projectiveSpace ) viewSegment = viewEnd - viewBegin viewSegmentLength = abs( viewSegment ) if viewSegmentLength < self.rulingExtent: return normalizedViewSegment = viewSegment / viewSegmentLength relativeRulingEnd = complex( - normalizedViewSegment.imag, normalizedViewSegment.real ) if normalizedViewSegment.imag > 0.0: relativeRulingEnd = complex( normalizedViewSegment.imag, - normalizedViewSegment.real ) for ruling in rulings: self.drawRuling( projectiveSpace, relativeRulingEnd * self.rulingExtentHalf, ruling, axisLine.tagString, viewBegin, viewEnd ) def drawSkeinPane( self, projectiveSpace, skeinPane ): "Draw colored lines." self.getDrawnColoredLines( skeinPane.raftLines, projectiveSpace, self.repository.widthOfRaftThread.value ) self.getDrawnColoredLines( skeinPane.travelLines, projectiveSpace, self.repository.widthOfTravelThread.value ) self.getDrawnColoredLines( skeinPane.fillBottomLines, projectiveSpace, self.repository.widthOfFillBottomThread.value ) self.getDrawnColoredLines( skeinPane.fillTopLines, projectiveSpace, self.repository.widthOfFillTopThread.value ) self.getDrawnColoredLines( skeinPane.infillLines, projectiveSpace, self.repository.widthOfInfillThread.value ) self.getDrawnColoredLines( skeinPane.loopLines, projectiveSpace, self.repository.widthOfLoopThread.value ) self.getDrawnColoredLines( skeinPane.perimeterInsideLines, projectiveSpace, self.repository.widthOfPerimeterInsideThread.value ) self.getDrawnColoredLines( skeinPane.perimeterOutsideLines, projectiveSpace, self.repository.widthOfPerimeterOutsideThread.value ) def drawXYAxisLines( self, projectiveSpace ): "Draw the x and y axis lines." if self.repository.widthOfAxisNegativeSide.value > 0: self.getDrawnColoredLineWithoutArrow( self.negativeAxisLineX, projectiveSpace, self.negativeAxisLineX.tagString, self.repository.widthOfAxisNegativeSide.value ) self.getDrawnColoredLineWithoutArrow( self.negativeAxisLineY, projectiveSpace, self.negativeAxisLineY.tagString, self.repository.widthOfAxisNegativeSide.value ) if self.repository.widthOfAxisPositiveSide.value > 0: self.getDrawnColoredLine('last', self.positiveAxisLineX, projectiveSpace, self.positiveAxisLineX.tagString, self.repository.widthOfAxisPositiveSide.value ) self.getDrawnColoredLine('last', self.positiveAxisLineY, projectiveSpace, self.positiveAxisLineY.tagString, self.repository.widthOfAxisPositiveSide.value ) def drawZAxisLine( self, projectiveSpace ): "Draw the z axis line." if self.repository.widthOfAxisNegativeSide.value > 0: self.getDrawnColoredLineWithoutArrow( self.negativeAxisLineZ, projectiveSpace, self.negativeAxisLineZ.tagString, self.repository.widthOfAxisNegativeSide.value ) if self.repository.widthOfAxisPositiveSide.value > 0: self.getDrawnColoredLine('last', self.positiveAxisLineZ, projectiveSpace, self.positiveAxisLineZ.tagString, self.repository.widthOfAxisPositiveSide.value ) def getCentered( self, coordinate ): "Get the centered coordinate." relativeToCenter = complex( coordinate.real - self.center.real, self.center.imag - coordinate.imag ) if abs( relativeToCenter ) < 1.0: relativeToCenter = complex( 0.0, 1.0 ) return relativeToCenter def getCanvasRadius(self): "Get half of the minimum of the canvas height and width." return 0.5 * min( float( self.canvasHeight ), float( self.canvasWidth ) ) def getCenteredScreened( self, coordinate ): "Get the normalized centered coordinate." return self.getCentered( coordinate ) / self.getCanvasRadius() def getColoredLines(self): "Get the colored lines from the skein pane." return self.skeinPanes[ self.repository.layer.value ].coloredLines def getCopy(self): "Get a copy of this window." return SkeinWindow( self.repository, self.skein ) def getCopyWithNewSkein(self): "Get a copy of this window with a new skein." return getWindowGivenTextRepository( self.skein.fileName, self.skein.gcodeText, self.repository ) def getDrawnColoredLine( self, arrowType, coloredLine, projectiveSpace, tags, width ): "Draw colored line." viewBegin = self.getScreenView( coloredLine.begin, projectiveSpace ) viewEnd = self.getScreenView( coloredLine.end, projectiveSpace ) return self.canvas.create_line( viewBegin.real, viewBegin.imag, viewEnd.real, viewEnd.imag, fill = coloredLine.colorName, arrow = arrowType, tags = tags, width = width ) def getDrawnColoredLineMotion( self, coloredLine, projectiveSpace, width ): "Draw colored line with motion stipple and tag." viewBegin = self.getScreenView( coloredLine.begin, projectiveSpace ) viewEnd = self.getScreenView( coloredLine.end, projectiveSpace ) return self.canvas.create_line( viewBegin.real, viewBegin.imag, viewEnd.real, viewEnd.imag, fill = coloredLine.colorName, arrow = 'last', arrowshape = self.arrowshape, stipple = self.motionStippleName, tags = 'mouse_item', width = width + 4 ) def getDrawnColoredLines( self, coloredLines, projectiveSpace, width ): "Draw colored lines." if width <= 0: return drawnColoredLines = [] for coloredLine in coloredLines: drawnColoredLines.append( self.getDrawnColoredLine( self.arrowType, coloredLine, projectiveSpace, coloredLine.tagString, width ) ) return drawnColoredLines def getDrawnColoredLineWithoutArrow( self, coloredLine, projectiveSpace, tags, width ): "Draw colored line without an arrow." viewBegin = self.getScreenView( coloredLine.begin, projectiveSpace ) viewEnd = self.getScreenView( coloredLine.end, projectiveSpace ) return self.canvas.create_line( viewBegin.real, viewBegin.imag, viewEnd.real, viewEnd.imag, fill = coloredLine.colorName, tags = tags, width = width ) def getDrawnSelectedColoredLine( self, coloredLine ): "Get the drawn selected colored line." projectiveSpace = euclidean.ProjectiveSpace().getByLatitudeLongitude( self.repository.viewpointLatitude.value, self.repository.viewpointLongitude.value ) return self.getDrawnColoredLine( self.arrowType, coloredLine, projectiveSpace, 'mouse_item', self.repository.widthOfSelectionThread.value ) def getScreenComplex( self, pointComplex ): "Get the point in screen perspective." return complex( pointComplex.real, - pointComplex.imag ) + self.center def getScreenView( self, point, projectiveSpace ): "Get the point in screen view perspective." return self.getScreenComplex( projectiveSpace.getDotComplex(point) ) def printHexadecimalColorName(self, name): "Print the color name in hexadecimal." colorTuple = self.canvas.winfo_rgb( name ) print('#%s%s%s' % ( settings.getWidthHex( colorTuple[0], 2 ), settings.getWidthHex( colorTuple[1], 2 ), settings.getWidthHex( colorTuple[2], 2 ) ) ) def update(self): "Update the screen." if len( self.skeinPanes ) < 1: return self.limitIndexSetArrowMouseDeleteCanvas() self.repository.viewpointLatitude.value = view_rotate.getBoundedLatitude( self.repository.viewpointLatitude.value ) self.repository.viewpointLongitude.value = round( self.repository.viewpointLongitude.value, 1 ) projectiveSpace = euclidean.ProjectiveSpace().getByLatitudeLongitude( self.repository.viewpointLatitude.value, self.repository.viewpointLongitude.value ) skeinPanesCopy = self.getUpdateSkeinPanes()[:] skeinPanesCopy.sort( compareLayerSequence ) if projectiveSpace.basisZ.z > 0.0: self.drawXYAxisLines( projectiveSpace ) else: skeinPanesCopy.reverse() self.drawZAxisLine( projectiveSpace ) for skeinPane in skeinPanesCopy: self.drawSkeinPane( projectiveSpace, skeinPane ) if projectiveSpace.basisZ.z > 0.0: self.drawZAxisLine( projectiveSpace ) else: self.drawXYAxisLines( projectiveSpace ) if self.repository.widthOfAxisNegativeSide.value > 0: self.drawRulings( self.negativeAxisLineX, projectiveSpace, self.negativeRulings ) self.drawRulings( self.negativeAxisLineY, projectiveSpace, self.negativeRulings ) self.drawRulings( self.negativeAxisLineZ, projectiveSpace, self.negativeRulings ) if self.repository.widthOfAxisPositiveSide.value > 0: self.drawRulings( self.positiveAxisLineX, projectiveSpace, self.positiveRulings ) self.drawRulings( self.positiveAxisLineY, projectiveSpace, self.positiveRulings ) self.drawRulings( self.positiveAxisLineZ, projectiveSpace, self.positiveRulings ) self.setDisplayLayerIndex() def main(): "Display the skeiniso dialog." if len(sys.argv) > 1: tableau.startMainLoopFromWindow( getWindowAnalyzeFile(' '.join(sys.argv[1 :])) ) else: settings.startMainLoopFromConstructor( getNewRepository() ) if __name__ == "__main__": main()
gpl-2.0
nschaetti/EchoTorch
examples/generation/narma10_esn_feedbacks.py
1
3043
# -*- coding: utf-8 -*- # # File : examples/timeserie_prediction/switch_attractor_esn # Description : NARMA 30 prediction with ESN. # Date : 26th of January, 2018 # # This file is part of EchoTorch. EchoTorch is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Nils Schaetti <nils.schaetti@unine.ch> # Imports import torch from echotorch.datasets.NARMADataset import NARMADataset import echotorch.nn as etnn import echotorch.utils from torch.autograd import Variable from torch.utils.data.dataloader import DataLoader import numpy as np import mdp # Dataset params train_sample_length = 5000 test_sample_length = 1000 n_train_samples = 1 n_test_samples = 1 batch_size = 1 spectral_radius = 0.9 leaky_rate = 1.0 input_dim = 1 n_hidden = 100 # Use CUDA? use_cuda = False use_cuda = torch.cuda.is_available() if use_cuda else False # Manual seed mdp.numx.random.seed(1) np.random.seed(2) torch.manual_seed(1) # NARMA30 dataset narma10_train_dataset = NARMADataset(train_sample_length, n_train_samples, system_order=10, seed=1) narma10_test_dataset = NARMADataset(test_sample_length, n_test_samples, system_order=10, seed=10) # Data loader trainloader = DataLoader(narma10_train_dataset, batch_size=batch_size, shuffle=False, num_workers=2) testloader = DataLoader(narma10_test_dataset, batch_size=batch_size, shuffle=False, num_workers=2) # ESN cell esn = etnn.ESN( input_dim=input_dim, hidden_dim=n_hidden, output_dim=1, spectral_radius=spectral_radius, learning_algo='inv', # leaky_rate=leaky_rate, feedbacks=True ) if use_cuda: esn.cuda() # end if # For each batch for data in trainloader: # Inputs and outputs inputs, targets = data # To variable inputs, targets = Variable(inputs), Variable(targets) if use_cuda: inputs, targets = inputs.cuda(), targets.cuda() # Accumulate xTx and xTy esn(inputs, targets) # end for # Finalize training esn.finalize() # Test MSE dataiter = iter(testloader) test_u, test_y = dataiter.next() test_u, test_y = Variable(test_u), Variable(test_y) gen_u = Variable(torch.zeros(batch_size, test_sample_length, input_dim)) if use_cuda: test_u, test_y, gen_u = test_u.cuda(), test_y.cuda(), gen_u.cuda() y_predicted = esn(test_u) print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))) print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))) print(u"") y_generated = esn(gen_u) print(y_generated)
gpl-3.0
jjmiranda/edx-platform
lms/djangoapps/shoppingcart/admin.py
63
5399
"""Django admin interface for the shopping cart models. """ from ratelimitbackend import admin from shoppingcart.models import ( PaidCourseRegistrationAnnotation, Coupon, DonationConfiguration, Invoice, CourseRegistrationCodeInvoiceItem, InvoiceTransaction ) class SoftDeleteCouponAdmin(admin.ModelAdmin): """ Admin for the Coupon table. soft-delete on the coupons """ fields = ('code', 'description', 'course_id', 'percentage_discount', 'created_by', 'created_at', 'is_active') raw_id_fields = ("created_by",) readonly_fields = ('created_at',) actions = ['really_delete_selected'] def queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ # Default: qs = self.model._default_manager.get_active_coupons_query_set() # Queryset with all the coupons including the soft-deletes: qs = self.model._default_manager.get_queryset() query_string = self.model._default_manager.get_active_coupons_queryset() # pylint: disable=protected-access return query_string def get_actions(self, request): actions = super(SoftDeleteCouponAdmin, self).get_actions(request) del actions['delete_selected'] return actions def really_delete_selected(self, request, queryset): """override the default behavior of selected delete method""" for obj in queryset: obj.is_active = False obj.save() if queryset.count() == 1: message_bit = "1 coupon entry was" else: message_bit = "%s coupon entries were" % queryset.count() self.message_user(request, "%s successfully deleted." % message_bit) def delete_model(self, request, obj): """override the default behavior of single instance of model delete method""" obj.is_active = False obj.save() really_delete_selected.short_description = "Delete s selected entries" class CourseRegistrationCodeInvoiceItemInline(admin.StackedInline): """Admin for course registration code invoice items. Displayed inline within the invoice admin UI. """ model = CourseRegistrationCodeInvoiceItem extra = 0 can_delete = False readonly_fields = ( 'qty', 'unit_price', 'currency', 'course_id', ) def has_add_permission(self, request): return False class InvoiceTransactionInline(admin.StackedInline): """Admin for invoice transactions. Displayed inline within the invoice admin UI. """ model = InvoiceTransaction extra = 0 readonly_fields = ( 'created', 'modified', 'created_by', 'last_modified_by' ) class InvoiceAdmin(admin.ModelAdmin): """Admin for invoices. This is intended for the internal finance team to be able to view and update invoice information, including payments and refunds. """ date_hierarchy = 'created' can_delete = False readonly_fields = ('created', 'modified') search_fields = ( 'internal_reference', 'customer_reference_number', 'company_name', ) fieldsets = ( ( None, { 'fields': ( 'internal_reference', 'customer_reference_number', 'created', 'modified', ) } ), ( 'Billing Information', { 'fields': ( 'company_name', 'company_contact_name', 'company_contact_email', 'recipient_name', 'recipient_email', 'address_line_1', 'address_line_2', 'address_line_3', 'city', 'state', 'zip', 'country' ) } ) ) readonly_fields = ( 'internal_reference', 'customer_reference_number', 'created', 'modified', 'company_name', 'company_contact_name', 'company_contact_email', 'recipient_name', 'recipient_email', 'address_line_1', 'address_line_2', 'address_line_3', 'city', 'state', 'zip', 'country' ) inlines = [ CourseRegistrationCodeInvoiceItemInline, InvoiceTransactionInline ] def save_formset(self, request, form, formset, change): """Save the user who created and modified invoice transactions. """ instances = formset.save(commit=False) for instance in instances: if isinstance(instance, InvoiceTransaction): if not hasattr(instance, 'created_by'): instance.created_by = request.user instance.last_modified_by = request.user instance.save() def has_add_permission(self, request): return False def has_delete_permission(self, request, obj=None): return False admin.site.register(PaidCourseRegistrationAnnotation) admin.site.register(Coupon, SoftDeleteCouponAdmin) admin.site.register(DonationConfiguration) admin.site.register(Invoice, InvoiceAdmin)
agpl-3.0
lzw120/django
tests/regressiontests/many_to_one_regress/models.py
124
1396
""" Regression tests for a few ForeignKey bugs. """ from django.db import models # If ticket #1578 ever slips back in, these models will not be able to be # created (the field names being lower-cased versions of their opposite # classes is important here). class First(models.Model): second = models.IntegerField() class Second(models.Model): first = models.ForeignKey(First, related_name = 'the_first') # Protect against repetition of #1839, #2415 and #2536. class Third(models.Model): name = models.CharField(max_length=20) third = models.ForeignKey('self', null=True, related_name='child_set') class Parent(models.Model): name = models.CharField(max_length=20) bestchild = models.ForeignKey('Child', null=True, related_name='favored_by') class Child(models.Model): name = models.CharField(max_length=20) parent = models.ForeignKey(Parent) # Multiple paths to the same model (#7110, #7125) class Category(models.Model): name = models.CharField(max_length=20) def __unicode__(self): return self.name class Record(models.Model): category = models.ForeignKey(Category) class Relation(models.Model): left = models.ForeignKey(Record, related_name='left_set') right = models.ForeignKey(Record, related_name='right_set') def __unicode__(self): return u"%s - %s" % (self.left.category.name, self.right.category.name)
bsd-3-clause
virtuald/pynsq
setup.py
1
1354
from setuptools import setup from setuptools.command.test import test as TestCommand import sys class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.test_args) sys.exit(errno) # also update in nsq/version.py version = '0.6.9' setup( name='pynsq', version=version, description='official Python client library for NSQ', keywords='python nsq', author='Matt Reiferson', author_email='snakes@gmail.com', url='https://github.com/nsqio/pynsq', download_url=( 'https://s3.amazonaws.com/bitly-downloads/nsq/pynsq-%s.tar.gz' % version ), packages=['nsq'], install_requires=['tornado'], include_package_data=True, zip_safe=False, tests_require=['pytest', 'mock', 'simplejson', 'python-snappy', 'tornado'], cmdclass={'test': PyTest}, classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: Implementation :: CPython', ] )
mit
Hikari-no-Tenshi/android_external_skia
tools/skpbench/_hardware_pixel2.py
12
4472
# Copyright 2018 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from _hardware import Expectation from _hardware_android import HardwareAndroid CPU_CLOCK_RATE = 2035200 MEM_CLOCK_RATE = 13763 GPU_CLOCK_RATE = 670000000 GPU_POWER_LEVEL = 1 # lower is faster, minimum is 0 class HardwarePixel2(HardwareAndroid): def __init__(self, adb): HardwareAndroid.__init__(self, adb) def __enter__(self): HardwareAndroid.__enter__(self) if not self._adb.is_root(): return self self._adb.shell('\n'.join([ ''' stop thermal-engine stop perfd''', # turn off the slow cores and one fast core ''' for N in 0 1 2 3 7; do echo 0 > /sys/devices/system/cpu/cpu$N/online done''', # lock 3 fast cores: two for Skia and one for the OS ''' for N in 4 5 6; do echo 1 > /sys/devices/system/cpu/cpu$N/online echo userspace > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_governor echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_max_freq echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_min_freq echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_setspeed done''' % tuple(CPU_CLOCK_RATE for _ in range(3)), # Set GPU bus and idle timer ''' echo 0 > /sys/class/kgsl/kgsl-3d0/bus_split''', # csmartdalton, 4-26-2018: this line hangs my device # echo 1 > /sys/class/kgsl/kgsl-3d0/force_clk_on ''' echo 10000 > /sys/class/kgsl/kgsl-3d0/idle_timer''', # Set mem frequency to max ''' echo %i > /sys/class/devfreq/soc\:qcom,gpubw/min_freq echo %i > /sys/class/devfreq/soc\:qcom,gpubw/max_freq echo %i > /sys/class/devfreq/soc\:qcom,cpubw/min_freq echo %i > /sys/class/devfreq/soc\:qcom,cpubw/max_freq echo %i > /sys/class/devfreq/soc\:qcom,mincpubw/min_freq echo %i > /sys/class/devfreq/soc\:qcom,mincpubw/max_freq echo %i > /sys/class/devfreq/soc\:qcom,memlat-cpu0/min_freq echo %i > /sys/class/devfreq/soc\:qcom,memlat-cpu0/max_freq''' % tuple(MEM_CLOCK_RATE for _ in range(8)), # Set GPU to performance mode ''' echo performance > /sys/class/kgsl/kgsl-3d0/devfreq/governor echo %i > /sys/class/kgsl/kgsl-3d0/devfreq/max_freq echo %i > /sys/class/kgsl/kgsl-3d0/devfreq/min_freq''' % tuple(GPU_CLOCK_RATE for _ in range(2)), # Set GPU power level ''' echo %i > /sys/class/kgsl/kgsl-3d0/max_pwrlevel echo %i > /sys/class/kgsl/kgsl-3d0/min_pwrlevel''' % tuple(GPU_POWER_LEVEL for _ in range(2))])) assert('msm_therm' == self._adb.check(\ 'cat /sys/class/thermal/thermal_zone10/type').strip()) assert('pm8998_tz' == self._adb.check(\ 'cat /sys/class/thermal/thermal_zone7/type').strip()) return self def sanity_check(self): HardwareAndroid.sanity_check(self) if not self._adb.is_root(): return result = self._adb.check(' '.join( ['cat', '/sys/class/power_supply/battery/capacity', '/sys/devices/system/cpu/online'] + \ ['/sys/devices/system/cpu/cpu%i/cpufreq/scaling_cur_freq' % i for i in range(4, 7)] + \ # Unfortunately we can't monitor the gpu clock: # # /sys/class/kgsl/kgsl-3d0/devfreq/cur_freq # # It doesn't respect the min_freq/max_freq values when not under load. ['/sys/kernel/debug/clk/bimc_clk/measure', '/sys/class/kgsl/kgsl-3d0/temp', '/sys/class/kgsl/kgsl-3d0/throttling', '/sys/class/thermal/thermal_zone10/temp', '/sys/class/thermal/thermal_zone7/temp'])) expectations = \ [Expectation(int, min_value=30, name='battery', sleeptime=30*60), Expectation(str, exact_value='4-6', name='online cpus')] + \ [Expectation(int, exact_value=CPU_CLOCK_RATE, name='cpu_%i clock rate' %i) for i in range(4, 7)] + \ [Expectation(long, min_value=902390000, max_value=902409999, name='measured ddr clock', sleeptime=10), Expectation(int, max_value=750, name='gpu temperature'), Expectation(int, exact_value=1, name='gpu throttling'), Expectation(int, max_value=75, name='msm_therm temperature'), Expectation(int, max_value=75000, name='pm8998_tz temperature')] Expectation.check_all(expectations, result.splitlines())
bsd-3-clause
swatilodha/coala
tests/output/ConfWriterTest.py
26
2819
import os import tempfile import unittest from coalib.output.ConfWriter import ConfWriter from coalib.parsing.ConfParser import ConfParser class ConfWriterTest(unittest.TestCase): example_file = ("to be ignored \n" " save=true\n" " a_default, another = val \n" " TEST = tobeignored # thats a comment \n" " test = push \n" " t = \n" " [MakeFiles] \n" " j , ANother = a \n" " multiline \n" " value \n" " ; just a omment \n" " ; just a omment \n" " key\\ space = value space\n" " key\\=equal = value=equal\n" " key\\\\backslash = value\\\\backslash\n" " key\\,comma = value,comma\n" " key\\#hash = value\\#hash\n" " key\\.dot = value.dot\n") def setUp(self): self.file = os.path.join(tempfile.gettempdir(), "ConfParserTestFile") with open(self.file, "w", encoding='utf-8') as file: file.write(self.example_file) self.conf_parser = ConfParser() self.write_file_name = os.path.join(tempfile.gettempdir(), "ConfWriterTestFile") self.uut = ConfWriter(self.write_file_name) def tearDown(self): self.uut.close() os.remove(self.file) os.remove(self.write_file_name) def test_exceptions(self): self.assertRaises(TypeError, self.uut.write_section, 5) def test_write(self): result_file = ["[Default]\n", "save = true\n", "a_default, another = val\n", "# thats a comment\n", "test = push\n", "t = \n", "\n", "[MakeFiles]\n", "j, ANother = a\n", "multiline\n", "value\n", "; just a omment\n", "; just a omment\n", "key\\ space = value space\n", "key\\=equal = value=equal\n", "key\\\\backslash = value\\\\backslash\n", "key\\,comma = value,comma\n", "key\\#hash = value\\#hash\n", "key\\.dot = value.dot\n"] self.uut.write_sections(self.conf_parser.parse(self.file)) self.uut.close() with open(self.write_file_name, "r") as f: lines = f.readlines() self.assertEqual(result_file, lines)
agpl-3.0
DataDog/integrations-core
ibm_mq/datadog_checks/ibm_mq/collectors/stats_collector.py
1
4767
# (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from pymqi.CMQCFC import MQCMD_STATISTICS_CHANNEL, MQCMD_STATISTICS_Q from datadog_checks.ibm_mq.stats.base_stats import BaseStats from datadog_checks.ibm_mq.stats.queue_stats import QueueStats from ..metrics import METRIC_PREFIX, channel_stats_metrics, queue_stats_metrics from ..stats import ChannelStats try: import pymqi from pymqi import Queue except ImportError as e: pymqiException = e pymqi = None STATISTICS_QUEUE_NAME = 'SYSTEM.ADMIN.STATISTICS.QUEUE' STATS_METRIC_CHANNEL_PREFIX = '{}.stats.channel'.format(METRIC_PREFIX) STATS_METRIC_QUEUE_PREFIX = '{}.stats.queue'.format(METRIC_PREFIX) class StatsCollector(object): def __init__(self, config, send_metrics_from_properties, log): self.config = config self.send_metrics_from_properties = send_metrics_from_properties self.log = log def collect(self, queue_manager): """ Collect Statistics Messages Docs: https://www.ibm.com/support/knowledgecenter/SSFKSJ_9.1.0/com.ibm.mq.mon.doc/q037320_.htm """ self.log.debug("Collecting stats newer than %s", self.config.instance_creation_datetime) queue = Queue(queue_manager, STATISTICS_QUEUE_NAME) try: # It's expected for the loop to stop when pymqi.MQMIError is raised with reason MQRC_NO_MSG_AVAILABLE. while True: bin_message = queue.get() self.log.trace('Stats binary message: %s', bin_message) message, header = pymqi.PCFExecute.unpack(bin_message) self.log.trace('Stats unpacked message: %s, Stats unpacked header: %s', message, header) stats = self._get_stats(message, header) # We only collect metrics generated after the check instance creation. if stats.start_datetime < self.config.instance_creation_datetime: self.log.debug( "Skipping messages created before agent startup. " "Message time: %s / Check instance creation time: %s", stats.start_datetime, self.config.instance_creation_datetime, ) continue if isinstance(stats, ChannelStats): self._collect_channel_stats(stats) elif isinstance(stats, QueueStats): self._collect_queue_stats(stats) else: self.log.debug('Unknown/NotImplemented command: %s', header.Command) except pymqi.MQMIError as e: # Don't warn if no messages, see: # https://github.com/dsuch/pymqi/blob/v1.12.0/docs/examples.rst#how-to-wait-for-multiple-messages if not (e.comp == pymqi.CMQC.MQCC_FAILED and e.reason == pymqi.CMQC.MQRC_NO_MSG_AVAILABLE): raise finally: queue.close() def _collect_channel_stats(self, channel_stats): self.log.debug('Collect channel stats. Number of channels: %s', len(channel_stats.channels)) for channel_info in channel_stats.channels: tags = self.config.tags_no_channel + [ 'channel:{}'.format(channel_info.name), 'channel_type:{}'.format(channel_info.type), 'remote_q_mgr_name:{}'.format(channel_info.remote_q_mgr_name), 'connection_name:{}'.format(channel_info.connection_name), ] metrics_map = channel_stats_metrics() self.send_metrics_from_properties( channel_info.properties, metrics_map=metrics_map, prefix=STATS_METRIC_CHANNEL_PREFIX, tags=tags ) def _collect_queue_stats(self, queue_stats): self.log.debug('Collect queue stats. Number of queues: %s', len(queue_stats.queues)) for queue_info in queue_stats.queues: tags = self.config.tags_no_channel + [ 'queue:{}'.format(queue_info.name), 'queue_type:{}'.format(queue_info.type), 'definition_type:{}'.format(queue_info.definition_type), ] metrics_map = queue_stats_metrics() self.send_metrics_from_properties( queue_info.properties, metrics_map=metrics_map, prefix=STATS_METRIC_QUEUE_PREFIX, tags=tags ) @staticmethod def _get_stats(message, header): if header.Command == MQCMD_STATISTICS_CHANNEL: stats = ChannelStats(message) elif header.Command == MQCMD_STATISTICS_Q: stats = QueueStats(message) else: stats = BaseStats(message) return stats
bsd-3-clause
netmanchris/PYHPEIMC
archived/pyhpimc.py
3
35486
#!/usr/bin/env python3 # author: @netmanchris """ Copyright 2015 Hewlett Packard Enterprise Development LP Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # This section imports required libraries import requests import json import sys import time import subprocess import csv import os import ipaddress import pysnmp from requests.auth import HTTPDigestAuth from pysnmp.entity.rfc3413.oneliner import cmdgen from pysnmp.proto import rfc1902 cmdGen = cmdgen.CommandGenerator() # IMC Device Class class IMCDev: """ imc_dev class takes in the ip_address which is used as the primary key to gather the following attributes for a device which as been previously discovered in the HP IMC Network Management platform. Each instance of this class should have the following attributes ip: The IP address used to manage the device in HP IMC description: returns the description of the device as discovered in HP IMC location: returns the location of the device as discovered in HP IMC contact: returns the contact of the device as discovered in HP IMC type: returns the type of the device as discovered in HP IMC name: returns the name of the device as discovered in HP IMC status: returns the current alarm status as discovered in HP IMC devid: returns the current devid used to internally identify the device as discovered in HP IMC interfacelist: returns the current list of interfaces for the device as discovered in HP IMC numinterface: returns a count of the number of interfaces in the interfacelist attribute vlans: returns the current vlans existing in the device as discovered in HP IMC. Device must be supported in the HP IMC Platform VLAN manager module. accessinterfaces: returns the device interfaces configured as access interfaces. Device must be supported in the HP IMC Platform VLAN manager module. trunkinterfaces: returns the device interfaces configured as trunk interfaces. Device must be supported in the HP IMC Platform VLAN manager module. alarm: returns the current unrecovered alarms as known by HP IMC. num alarms: returns a count of the number of alarms as returned by the alarm attribute serial: returns the network assets, including serial numbers for the device as discovered by HP IMC. The device must support the ENTITY MIB ( rfc 4133 ) for this value to be returned. runconfig: returns the most recent running configuration for the device as known by HP IMC. The device must be be supported in the HP IMC platform ICC module. startconfig: returns the most recent startup configuration for the device as known by HP IMC. The device must be be supported in the HP IMC platform ICC module. ipmacarp: returns the current device maciparp table as discovered by HP IMC. The imc_dev class supports the following methods which can be called upon an instance of this class addvlan: This method executes the addvlan function on the specific instance of the imc_dev object. Devices must supported in the HP IMC Platform VLAN Manager module. """ def __init__(self, ip_address): self.ip = get_dev_details(ip_address)['ip'] self.description = get_dev_details(ip_address)['sysDescription'] self.location = get_dev_details(ip_address)['location'] self.contact = get_dev_details(ip_address)['contact'] self.type = get_dev_details(ip_address)['typeName'] self.name = get_dev_details(ip_address)['sysName'] self.status = get_dev_details(ip_address)['statusDesc'] self.devid = get_dev_details(ip_address)['id'] self.interfacelist = get_dev_interface(self.devid) self.numinterface = len(get_dev_interface(self.devid)) self.vlans = get_dev_vlans(self.devid) self.accessinterfaces = get_device_access_interfaces(self.devid) self.trunkinterfaces = get_trunk_interfaces(self.devid) self.alarm = get_dev_alarms(self.devid) self.numalarm = len(get_dev_alarms(self.devid)) self.serials = get_serial_numbers(get_dev_asset_details(self.ip)) self.assets = get_dev_asset_details(self.ip) self.runconfig = get_dev_run_config(self.devid) self.startconfig = get_dev_start_config(self.devid) self.ipmacarp = get_ip_mac_arp_list(self.devid) def addvlan(self, vlanid, vlan_name): create_dev_vlan(self.devid, vlanid, vlan_name) def delvlan(self, vlanid): delete_dev_vlans(self.devid, vlanid) class IMCInterface: def __init__(self, ip_address, ifIndex): self.ip = get_dev_details(ip_address)['ip'] self.devid = get_dev_details(ip_address)['id'] self.ifIndex = get_interface_details(self.devid, ifIndex)['ifIndex'] self.macaddress = get_interface_details(self.devid, ifIndex)['phyAddress'] self.status = get_interface_details(self.devid, ifIndex)['statusDesc'] self.adminstatus = get_interface_details(self.devid, ifIndex)['adminStatusDesc'] self.name = get_interface_details(self.devid, ifIndex)['ifDescription'] self.description = get_interface_details(self.devid, ifIndex) self.mtu = get_interface_details(self.devid, ifIndex)['mtu'] self.speed = get_interface_details(self.devid, ifIndex)['ifspeed'] self.accessinterfaces = get_device_access_interfaces(self.devid) self.pvid = get_access_interface_vlan(self.ifIndex, self.accessinterfaces) class Host(IMCDev): def __init__(self, ip_address): self.hostip = get_real_time_locate(ip_address)['locateIp'] self.deviceip = get_real_time_locate(ip_address)['deviceIp'] self.ifIndex = get_real_time_locate(ip_address)['ifIndex'] self.devid = get_real_time_locate(ip_address)['deviceId'] self.accessinterfaces = get_device_access_interfaces(self.devid) self.pvid = get_access_interface_vlan(self.ifIndex, self.accessinterfaces) self.devstatus = get_dev_details(self.deviceip)['statusDesc'] self.intstatus = get_interface_details(self.devid, self.ifIndex)['statusDesc'] def down(self): set_inteface_down(self.devid, self.ifIndex) def up(self): set_inteface_up(self.devid, self.ifIndex) class Hypervisor(IMCDev): def __init__(self, ipaddress): IMCDev.__init__(self, ipaddress) self.vmguests = get_host_vm_guest(self.devid) self.nics = get_host_vm_nic(self.devid) self.hostinfo = None def get_dev_asset_details(ipaddress): """Takes in ipaddress as input to fetch device assett details from HP IMC RESTFUL API :param ipaddress: IP address of the device you wish to gather the asset details :return: object of type list containing the device asset details """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_asset_url = "/imcrs/netasset/asset?assetDevice.ip=" + str(ipaddress) f_url = url + get_dev_asset_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_asset_info = (json.loads(r.text)) if len(dev_asset_info) > 0: dev_asset_info = dev_asset_info['netAsset'] if type(dev_asset_info) == dict: dev_asset_info = [dev_asset_info] if type(dev_asset_info) == list: dev_asset_info[:] = [dev for dev in dev_asset_info if dev.get('deviceIp') == ipaddress] return dev_asset_info else: print("get_dev_asset_details: An Error has occured") def get_serial_numbers(assetList): """ Helper function: Uses return of get_dev_asset_details function to evaluate to evaluate for multipe serial objects. :param assetList: output of get_dev_asset_details function :return: the serial_list object of list type which contains one or more dictionaries of the asset details """ serial_list = [] if type(assetList) == list: for i in assetList: if len(i['serialNum']) > 0: serial_list.append(i) return serial_list def get_trunk_interfaces(devId): """Function takes devId as input to RESTFULL call to HP IMC platform :param devId: output of get_dev_details :return: list of dictionaries containing of interfaces configured as an 802.1q trunk """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str(devId) + "&start=1&size=5000&total=false" f_url = url + get_trunk_interfaces_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_trunk_interfaces = (json.loads(r.text)) if len(dev_trunk_interfaces) == 2: return dev_trunk_interfaces['trunkIf'] else: dev_trunk_interfaces['trunkIf'] = ["No trunk inteface"] return dev_trunk_interfaces['trunkIf'] def get_device_access_interfaces(devId): """Function takes devId as input to RESTFUL call to HP IMC platform :param devId: requires deviceID as the only input parameter :return: list of dictionaries containing interfaces configured as access ports """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_access_interface_vlan_url = "/imcrs/vlan/access?devId=" + str(devId) + "&start=1&size=500&total=false" f_url = url + get_access_interface_vlan_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_access_interfaces = (json.loads(r.text)) if len(dev_access_interfaces) == 2: return dev_access_interfaces['accessIf'] else: dev_access_interfaces['accessIf'] = ["No access inteface"] return dev_access_interfaces['accessIf'] else: print("get_device_access_interfaces: An Error has occured") def get_access_interface_vlan(ifIndex, accessinterfacelist): for i in accessinterfacelist: if i['ifIndex'] == ifIndex: return i['pvid'] else: return "Not an Access Port" def get_interface_details(devId, ifIndex): # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_interface_details_url = "/imcrs/plat/res/device/" + str(devId) + "/interface/" + str(ifIndex) f_url = url + get_interface_details_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_details = (json.loads(r.text)) return dev_details else: print("get_interface_details: An Error has occured") def get_dev_details(ip_address): """Takes string input of IP address to issue RESTUL call to HP IMC :param ip_address: string object of dotted decimal notation of IPv4 address :return: dictionary of device details >>> get_dev_details('10.101.0.1') {'symbolLevel': '2', 'typeName': 'Cisco 2811', 'location': 'changed this too', 'status': '1', 'sysName': 'Cisco2811.haw.int', 'id': '30', 'symbolType': '3', 'symbolId': '1032', 'sysDescription': '', 'symbolName': 'Cisco2811.haw.int', 'mask': '255.255.255.0', 'label': 'Cisco2811.haw.int', 'symbolDesc': '', 'sysOid': '1.3.6.1.4.1.9.1.576', 'contact': 'changed this too', 'statusDesc': 'Normal', 'parentId': '1', 'categoryId': '0', 'topoIconName': 'iconroute', 'mac': '00:1b:d4:47:1e:68', 'devCategoryImgSrc': 'router', 'link': {'@rel': 'self', '@href': 'http://10.101.0.202:8080/imcrs/plat/res/device/30', '@op': 'GET'}, 'ip': '10.101.0.1'} >>> get_dev_details('8.8.8.8') Device not found 'Device not found' """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_details_url = "/imcrs/plat/res/device?resPrivilegeFilter=false&ip=" + \ str(ip_address) + "&start=0&size=1000&orderBy=id&desc=false&total=false" f_url = url + get_dev_details_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_details = (json.loads(r.text)) if len(dev_details) == 0: print("Device not found") return "Device not found" elif type(dev_details['device']) == list: for i in dev_details['device']: if i['ip'] == ip_address: dev_details = i return dev_details elif type(dev_details['device']) == dict: return dev_details['device'] else: print("dev_details: An Error has occured") def get_dev_vlans(devId): """Function takes input of devID to issue RESTUL call to HP IMC :param devId: requires devId as the only input parameter :return: list dictionaries of existing vlans on the devices. Device must be supported in HP IMC platform VLAN manager module """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_vlans_url = "/imcrs/vlan?devId=" + str(devId) + "&start=0&size=5000&total=false" f_url = url + get_dev_vlans_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: dev_details = (json.loads(r.text))['vlan'] return dev_details elif r.status_code == 409: return [{'vlan': 'None'}] else: print("get_dev_vlans: An Error has occured") def get_dev_interface(devid): """ Function takes devid as input to RESTFUL call to HP IMC platform :param devid: requires devid as the only input :return: list object which contains a dictionary per interface """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_interface_url = "/imcrs/plat/res/device/" + str(devid) + \ "/interface?start=0&size=1000&desc=false&total=false" f_url = url + get_dev_interface_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: int_list = (json.loads(r.text))['interface'] return int_list else: print("An Error has occured") def get_dev_run_config(devId): """ function takes the devId of a specific device and issues a RESTFUL call to get the most current running config file as known by the HP IMC Base Platform ICC module for the target device. :param devId: int or str value of the target device :return: str which contains the entire content of the target device running configuration. If the device is not currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not supported on this device" """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devId) + "/currentRun" f_url = url + get_dev_run_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # print (r.status_code) if r.status_code == 200: run_conf = (json.loads(r.text))['content'] type(run_conf) if run_conf is None: return "This features is no supported on this device" else: return run_conf else: return "This features is not supported on this device" def get_dev_start_config(devId): """ function takes the devId of a specific device and issues a RESTFUL call to get the most current startup config file as known by the HP IMC Base Platform ICC module for the target device. :param devId: int or str value of the target device :return: str which contains the entire content of the target device startup configuration. If the device is not currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not supported on this device" """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devId) + "/currentStart" f_url = url + get_dev_run_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) if r.status_code == 200: start_conf = (json.loads(r.text))['content'] return start_conf else: # print (r.status_code) return "This feature is not supported on this device" def get_dev_alarms(devId): """ function takes the devId of a specific device and issues a RESTFUL call to get the current alarms for the target device. :param devId: int or str value of the target device :return:list of dictionaries containing the alarms for this device """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_dev_alarm_url = "/imcrs/fault/alarm?operatorName=admin&deviceId=" + \ str(devId) + "&desc=false" f_url = url + get_dev_alarm_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) if r.status_code == 200: dev_alarm = (json.loads(r.text)) if 'alarm' in dev_alarm: return dev_alarm['alarm'] else: return "Device has no alarms" """ This section deals with functions to access the HP IMC Base Platform Terminal Access Specific API calls """ def get_real_time_locate(ipAddress): """ function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. :param ipAddress: str value valid IPv4 IP address :return: dictionary containing hostIp, devId, deviceIP, ifDesc, ifIndex """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() real_time_locate_url = "/imcrs/res/access/realtimeLocate?type=2&value=" + str(ipAddress) + "&total=false" f_url = url + real_time_locate_url r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents if r.status_code == 200: return json.loads(r.text)['realtimeLocation'] else: print(r.status_code) print("An Error has occured") def get_ip_mac_arp_list(devId): """ function takes devid of specific device and issues a RESTFUL call to get the IP/MAC/ARP list from the target device. :param devId: int or str value of the target device. :return: list of dictionaries containing the IP/MAC/ARP list of the target device. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() ip_mac_arp_list_url = "/imcrs/res/access/ipMacArp/" + str(devId) f_url = url + ip_mac_arp_list_url r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents if r.status_code == 200: macarplist = (json.loads(r.text)) if len(macarplist) > 1: return macarplist['ipMacArp'] else: return ['this function is unsupported'] else: print(r.status_code) print("An Error has occured") """ This section contains functions to work with the various custom views available within HPE IMC Base Platform """ def get_custom_views(name=None): """ function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input will return only the specified view. :param name: string containg the name of the desired custom view :return: list of dictionaries containing attributes of the custom views. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() if name is None: get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false' elif name is not None: get_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name='+ name + '&desc=false&total=false' f_url = url + get_custom_views_url r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents if r.status_code == 200: customviewlist = (json.loads(r.text))['customView'] if type(customviewlist) is dict: customviewlist = [customviewlist] return customviewlist else: return customviewlist else: print(r.status_code) print("An Error has occured") def create_custom_views(name=None, upperview=None): """ function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optioanl Name input will return only the specified view. :param name: string containg the name of the desired custom view :return: list of dictionaries containing attributes of the custom views. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() create_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=falsee' f_url = url + create_custom_views_url if upperview is None: payload = '''{ "name": "''' + name + '''", "upLevelSymbolId" : ""}''' else: parentviewid = get_custom_views(upperview)[0]['symbolId'] payload = '''{ "name": "'''+name+ '''"upperview" : "'''+str(parentviewid)+'''"}''' print (payload) r = requests.post(f_url, data = payload, auth=auth, headers=headers) # creates the URL using the payload variable as the contents if r.status_code == 201: return 'View ' + name +' created successfully' else: print(r.status_code) print("An Error has occured") """ This section contains functions which access the HP IMC Base Platform VLAN Manager specific API calls """ def create_dev_vlan(devid, vlanid, vlan_name): """ function takes devid and vlanid vlan_name of specific device and 802.1q VLAN tag and issues a RESTFUL call to add the specified VLAN from the target device. VLAN Name MUST be valid on target device. :param devid: int or str value of the target device :param vlanid:int or str value of target 802.1q VLAN :param vlan_name: str value of the target 802.1q VLAN name. MUST be valid name on target device. :return:HTTP Status code of 201 with no values. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() create_dev_vlan_url = "/imcrs/vlan?devId=" + str(devid) f_url = url + create_dev_vlan_url payload = '''{ "vlanId": "''' + str(vlanid) + '''", "vlanName" : "''' + str(vlan_name) + '''"}''' r = requests.post(f_url, data=payload, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print (r.status_code) if r.status_code == 201: print ('Vlan Created') return r.status_code elif r.status_code == 409: return '''Unable to create VLAN.\nVLAN Already Exists\nDevice does not support VLAN function''' else: print("An Error has occured") def delete_dev_vlans(devid, vlanid): """ function takes devid and vlanid of specific device and 802.1q VLAN tag and issues a RESTFUL call to remove the specified VLAN from the target device. :param devid: int or str value of the target device :param vlanid: :return:HTTP Status code of 204 with no values. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() remove_dev_vlan_url = "/imcrs/vlan/delvlan?devId=" + str(devid) + "&vlanId=" + str(vlanid) f_url = url + remove_dev_vlan_url payload = None r = requests.delete(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print (r.status_code) if r.status_code == 204: print ('Vlan deleted') return r.status_code elif r.status_code == 409: print ('Unable to delete VLAN.\nVLAN does not Exist\nDevice does not support VLAN function') return r.status_code else: print("An Error has occured") """ This section deals with functions which access the HP IMC Base Platform Device Resource specific API calls. """ def set_inteface_down(devid, ifindex): """ function takest devid and ifindex of specific device and interface and issues a RESTFUL call to " shut" the specifie d interface on the target device. :param devid: int or str value of the target device :param ifindex: int or str value of the target interface :return: HTTP status code 204 with no values. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() set_int_down_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/down" f_url = url + set_int_down_url payload = None r = requests.put(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 204: return r.status_code else: print("An Error has occured") def set_inteface_up(devid, ifindex): """ function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the spec ified interface on the target device. :param devid: int or str value of the target device :param ifindex: int or str value of the target interface :return: HTTP status code 204 with no values. """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up" f_url = url + set_int_up_url payload = None r = requests.put(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 204: return r.status_code else: print("An Error has occured") def get_vm_host_info(hostId): """ function takes hostId as input to RESTFUL call to HP IMC :param hostId: int or string of HostId of Hypervisor host :return:list of dictionatires contraining the VM Host information for the target hypervisor """ global r if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() get_vm_host_info_url = "/imcrs/vrm/host?hostId=" + str(hostId) f_url = url + get_vm_host_info_url payload = None r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents # print(r.status_code) if r.status_code == 200: if len(r.text) > 0: return json.loads(r.text) elif r.status_code == 204: print("Device is not a supported Hypervisor") return "Device is not a supported Hypervisor" else: print("An Error has occured") def get_host_info(hostId): """ function takes hostId as input to RESTFUL call to HP IMC :param hostId: int or string of HostId of Hypervisor host :return: list of dictionaries containing the host information for the target hypervisor """ global r if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() get_host_info_url = "/imcrs/vrm/host/vm?hostId=" + str(hostId) f_url = url + get_host_info_url payload = None r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 200: if len(json.loads(r.text)) > 1: return json.loads(r.text)['vmDevice'] else: return "Device is not a supported Hypervisor" else: print(r.text) print("An Error has occured") def get_host_vm_guest(hostId): """ function takes hostId as input to RESTFUL call to HP IMC :param hostId: HostId of Hypervisor host. :return: list of dictionaries containing the VM Guest information for the target hypervisor """ global r if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() get_host_vm_guest_url = "/imcrs/vrm/host/vm?hostId=" + str(hostId) f_url = url + get_host_vm_guest_url payload = None r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 200: if len(r.text) > 0: return json.loads(r.text)['vmDevice'] else: print(r.text) print("An Error has occured") def get_host_vm_nic(hostId): """ function takes hostId as input to RESTFUL call to HP IMC :param hostId: hostID of Hypervisor host. :return: list of dictionaries containing the NIC information for the target hypervisor """ global r if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() get_host_vm_nic_url = "/imcrs/vrm/host/vnic?hostDevId=" + str(hostId) f_url = url + get_host_vm_nic_url payload = None r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents print(r.status_code) if r.status_code == 200: if len(r.text) > 0: return json.loads(r.text)['Nic'] else: print(r.text) print("An Error has occured") """ System Level Functions """ def get_trap_definitions(): """Takes in no param as input to fetch SNMP TRAP definitions from HP IMC RESTFUL API :param None :return: object of type list containing the device asset details """ # checks to see if the imc credentials are already available if auth is None or url is None: set_imc_creds() global r get_trap_def_url = "/imcrs/fault/trapDefine/sync/query?enterpriseId=1.3.6.1.4.1.11&size=10000" f_url = url + get_trap_def_url payload = None # creates the URL using the payload variable as the contents r = requests.get(f_url, auth=auth, headers=headers) # r.status_code if r.status_code == 200: trap_def_list = (json.loads(r.text)) return trap_def_list['trapDefine'] else: print("get_dev_asset_details: An Error has occured") """ next section specifies the HP IMC authentication handler """ # url header to preprend on all IMC eAPI calls url = None # auth handler for eAPI calls auth = None # headers forcing IMC to respond with JSON content. XML content return is # the default headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Accept-encoding': 'application/json'} """======================== Helper Functions ===========================""" def print_to_file(object): """ Function takes in object of type str, list, or dict and prints out to current working directory as pyoutput.txt :param: Object: object of type str, list, or dict :return: No return. Just prints out to file handler and save to current working directory as pyoutput.txt ''' with open ('pyoutput.txt', 'w') as fh: x = None if type(object) is list: x = json.dumps(object, indent = 4) if type(object) is dict: x = json.dumps(object, indent = 4) if type (object) is str: x = object fh.write(x) def print_to_csv(list_of_dicts): with open('file.csv', 'w') as output: w = csv.DictWriter(output, list_of_dicts[0].keys()) w = w.writeheader() w = w.writerows(list_of_dicts) def set_imc_creds(): """ This function prompts user for IMC server information and credentuials and stores values in url and auth global variables""" global url, auth, r imc_protocol = input( "What protocol would you like to use to connect to the IMC server: \n Press 1 for HTTP: \n Press 2 for HTTPS:") if imc_protocol == "1": h_url = 'http://' else: h_url = 'https://' imc_server = input("What is the ip address of the IMC server?") imc_port = input("What is the port number of the IMC server?") imc_user = input("What is the username of the IMC eAPI user?") imc_pw = input('''What is the password of the IMC eAPI user?''') url = h_url + imc_server + ":" + imc_port auth = requests.auth.HTTPDigestAuth(imc_user, imc_pw) test_url = '/imcrs' f_url = url + test_url try: r = requests.get(f_url, auth=auth, headers=headers, verify=False) # checks for reqeusts exceptions except requests.exceptions.RequestException as e: print("Error:\n" + str(e)) print("\n\nThe IMC server address is invalid. Please try again\n\n") set_imc_creds() if r.status_code != 200: # checks for valid IMC credentials print("Error: \n You're credentials are invalid. Please try again\n\n") set_imc_creds() else: print("You've successfully access the IMC eAPI")
apache-2.0
ramcn/demo3
venv/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py
2931
2318
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart from .compat import wrap_ord class CodingStateMachine: def __init__(self, sm): self._mModel = sm self._mCurrentBytePos = 0 self._mCurrentCharLen = 0 self.reset() def reset(self): self._mCurrentState = eStart def next_state(self, c): # for each byte we get its class # if it is first byte, we also get byte length # PY3K: aBuf is a byte stream, so c is an int, not a byte byteCls = self._mModel['classTable'][wrap_ord(c)] if self._mCurrentState == eStart: self._mCurrentBytePos = 0 self._mCurrentCharLen = self._mModel['charLenTable'][byteCls] # from byte's class and stateTable, we get its next state curr_state = (self._mCurrentState * self._mModel['classFactor'] + byteCls) self._mCurrentState = self._mModel['stateTable'][curr_state] self._mCurrentBytePos += 1 return self._mCurrentState def get_current_charlen(self): return self._mCurrentCharLen def get_coding_state_machine(self): return self._mModel['name']
mit
gonboy/sl4a
python/src/Demo/pdist/rrcs.py
47
3993
#! /usr/bin/env python "Remote RCS -- command line interface" import sys import os import getopt import string import md5 import tempfile from rcsclient import openrcsclient def main(): sys.stdout = sys.stderr try: opts, rest = getopt.getopt(sys.argv[1:], 'h:p:d:qvL') if not rest: cmd = 'head' else: cmd, rest = rest[0], rest[1:] if not commands.has_key(cmd): raise getopt.error, "unknown command" coptset, func = commands[cmd] copts, files = getopt.getopt(rest, coptset) except getopt.error, msg: print msg print "usage: rrcs [options] command [options] [file] ..." print "where command can be:" print " ci|put # checkin the given files" print " co|get # checkout" print " info # print header info" print " head # print revision of head branch" print " list # list filename if valid" print " log # print full log" print " diff # diff rcs file and work file" print "if no files are given, all remote rcs files are assumed" sys.exit(2) x = openrcsclient(opts) if not files: files = x.listfiles() for fn in files: try: func(x, copts, fn) except (IOError, os.error), msg: print "%s: %s" % (fn, msg) def checkin(x, copts, fn): f = open(fn) data = f.read() f.close() new = not x.isvalid(fn) if not new and same(x, copts, fn, data): print "%s: unchanged since last checkin" % fn return print "Checking in", fn, "..." message = asklogmessage(new) messages = x.put(fn, data, message) if messages: print messages def checkout(x, copts, fn): data = x.get(fn) f = open(fn, 'w') f.write(data) f.close() def lock(x, copts, fn): x.lock(fn) def unlock(x, copts, fn): x.unlock(fn) def info(x, copts, fn): dict = x.info(fn) keys = dict.keys() keys.sort() for key in keys: print key + ':', dict[key] print '='*70 def head(x, copts, fn): head = x.head(fn) print fn, head def list(x, copts, fn): if x.isvalid(fn): print fn def log(x, copts, fn): flags = '' for o, a in copts: flags = flags + ' ' + o + a flags = flags[1:] messages = x.log(fn, flags) print messages def diff(x, copts, fn): if same(x, copts, fn): return flags = '' for o, a in copts: flags = flags + ' ' + o + a flags = flags[1:] data = x.get(fn) tf = tempfile.NamedTemporaryFile() tf.write(data) tf.flush() print 'diff %s -r%s %s' % (flags, x.head(fn), fn) sts = os.system('diff %s %s %s' % (flags, tf.name, fn)) if sts: print '='*70 def same(x, copts, fn, data = None): if data is None: f = open(fn) data = f.read() f.close() lsum = md5.new(data).digest() rsum = x.sum(fn) return lsum == rsum def asklogmessage(new): if new: print "enter description,", else: print "enter log message,", print "terminate with single '.' or end of file:" if new: print "NOTE: This is NOT the log message!" message = "" while 1: sys.stderr.write(">> ") sys.stderr.flush() line = sys.stdin.readline() if not line or line == '.\n': break message = message + line return message def remove(fn): try: os.unlink(fn) except os.error: pass commands = { 'ci': ('', checkin), 'put': ('', checkin), 'co': ('', checkout), 'get': ('', checkout), 'info': ('', info), 'head': ('', head), 'list': ('', list), 'lock': ('', lock), 'unlock': ('', unlock), 'log': ('bhLRtd:l:r:s:w:V:', log), 'diff': ('c', diff), } if __name__ == '__main__': main()
apache-2.0
kevenli/FeedIn
tests/util/etreetodicttests.py
2
1761
# -*- coding: utf-8 -*- ''' Created on 2014��11��21�� @author: ��� ''' import unittest from lxml.etree import Element from feedin import util class Test(unittest.TestCase): def test_etree_to_dict(self): root = Element('div') root.append(Element('a', {'href': 'http://aaa.bbb/'})) result = util.etree_to_dict2(root) self.assertEqual(result['a']['href'], "http://aaa.bbb/") def test_etree_to_dict2(self): root = Element('div') root.append(Element('a', {'href': 'http://aaa.bbb/'})) root.append(Element('a', {'href': 'http://ccc.ddd/'})) result = util.etree_to_dict2(root) self.assertEqual(result['a']['0']['href'], "http://aaa.bbb/") self.assertEqual(result['a']['1']['href'], "http://ccc.ddd/") def test_etree_to_dict3(self): root = Element('div') cite1 = Element('cite') cite1.text = "123" root.append(cite1) result = util.etree_to_dict2(root) self.assertEqual(result['cite'], "123") def test_etree_to_dict4(self): root = Element('div') cite1 = Element('cite') cite1.text = "123" root.append(cite1) cite2 = Element('cite') cite2.text = "456" root.append(cite2) cite3 = Element('cite', {'class': 'author'}) cite3.text = 'cite3' root.append(cite3) result = util.etree_to_dict2(root) self.assertEqual(result['cite']['0'], "123") self.assertEqual(result['cite']['1'], '456') self.assertEqual(result['cite']['2']['content'], 'cite3') if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_etree_to_dict4'] unittest.main()
apache-2.0
bgxavier/nova
nova/tests/unit/api/openstack/test_common.py
38
23913
# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test suites for 'common' code used throughout the OpenStack HTTP API. """ import mock import six from testtools import matchers import webob import webob.exc import webob.multidict from nova.api.openstack import common from nova.compute import task_states from nova.compute import vm_states from nova import exception from nova import test from nova.tests.unit import utils NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" class LimiterTest(test.NoDBTestCase): """Unit tests for the `nova.api.openstack.common.limited` method which takes in a list of items and, depending on the 'offset' and 'limit' GET params, returns a subset or complete set of the given items. """ def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() self.tiny = range(1) self.small = range(10) self.medium = range(1000) self.large = range(10000) def test_limiter_offset_zero(self): # Test offset key works with 0. req = webob.Request.blank('/?offset=0') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_offset_medium(self): # Test offset key works with a medium sized number. req = webob.Request.blank('/?offset=10') self.assertEqual(common.limited(self.tiny, req), []) self.assertEqual(common.limited(self.small, req), self.small[10:]) self.assertEqual(common.limited(self.medium, req), self.medium[10:]) self.assertEqual(common.limited(self.large, req), self.large[10:1010]) def test_limiter_offset_over_max(self): # Test offset key works with a number over 1000 (max_limit). req = webob.Request.blank('/?offset=1001') self.assertEqual(common.limited(self.tiny, req), []) self.assertEqual(common.limited(self.small, req), []) self.assertEqual(common.limited(self.medium, req), []) self.assertEqual( common.limited(self.large, req), self.large[1001:2001]) def test_limiter_offset_blank(self): # Test offset key works with a blank offset. req = webob.Request.blank('/?offset=') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_offset_bad(self): # Test offset key works with a BAD offset. req = webob.Request.blank(u'/?offset=\u0020aa') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_nothing(self): # Test request with no offset or limit. req = webob.Request.blank('/') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_zero(self): # Test limit of zero. req = webob.Request.blank('/?limit=0') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_medium(self): # Test limit of 10. req = webob.Request.blank('/?limit=10') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium[:10]) self.assertEqual(common.limited(self.large, req), self.large[:10]) def test_limiter_limit_over_max(self): # Test limit of 3000. req = webob.Request.blank('/?limit=3000') self.assertEqual(common.limited(self.tiny, req), self.tiny) self.assertEqual(common.limited(self.small, req), self.small) self.assertEqual(common.limited(self.medium, req), self.medium) self.assertEqual(common.limited(self.large, req), self.large[:1000]) def test_limiter_limit_and_offset(self): # Test request with both limit and offset. items = range(2000) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual(common.limited(items, req), items[1:4]) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual(common.limited(items, req), items[3:1003]) req = webob.Request.blank('/?offset=3&limit=1500') self.assertEqual(common.limited(items, req), items[3:1003]) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual(common.limited(items, req), []) def test_limiter_custom_max_limit(self): # Test a max_limit other than 1000. items = range(2000) req = webob.Request.blank('/?offset=1&limit=3') self.assertEqual( common.limited(items, req, max_limit=2000), items[1:4]) req = webob.Request.blank('/?offset=3&limit=0') self.assertEqual( common.limited(items, req, max_limit=2000), items[3:]) req = webob.Request.blank('/?offset=3&limit=2500') self.assertEqual( common.limited(items, req, max_limit=2000), items[3:]) req = webob.Request.blank('/?offset=3000&limit=10') self.assertEqual(common.limited(items, req, max_limit=2000), []) def test_limiter_negative_limit(self): # Test a negative limit. req = webob.Request.blank('/?limit=-3000') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) def test_limiter_negative_offset(self): # Test a negative offset. req = webob.Request.blank('/?offset=-30') self.assertRaises( webob.exc.HTTPBadRequest, common.limited, self.tiny, req) class SortParamUtilsTest(test.NoDBTestCase): def test_get_sort_params_defaults(self): '''Verifies the default sort key and direction.''' sort_keys, sort_dirs = common.get_sort_params({}) self.assertEqual(['created_at'], sort_keys) self.assertEqual(['desc'], sort_dirs) def test_get_sort_params_override_defaults(self): '''Verifies that the defaults can be overriden.''' sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1', default_dir='dir1') self.assertEqual(['key1'], sort_keys) self.assertEqual(['dir1'], sort_dirs) sort_keys, sort_dirs = common.get_sort_params({}, default_key=None, default_dir=None) self.assertEqual([], sort_keys) self.assertEqual([], sort_dirs) def test_get_sort_params_single_value(self): '''Verifies a single sort key and direction.''' params = webob.multidict.MultiDict() params.add('sort_key', 'key1') params.add('sort_dir', 'dir1') sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1'], sort_keys) self.assertEqual(['dir1'], sort_dirs) def test_get_sort_params_single_with_default(self): '''Verifies a single sort value with a default.''' params = webob.multidict.MultiDict() params.add('sort_key', 'key1') sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1'], sort_keys) # sort_key was supplied, sort_dir should be defaulted self.assertEqual(['desc'], sort_dirs) params = webob.multidict.MultiDict() params.add('sort_dir', 'dir1') sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['created_at'], sort_keys) # sort_dir was supplied, sort_key should be defaulted self.assertEqual(['dir1'], sort_dirs) def test_get_sort_params_multiple_values(self): '''Verifies multiple sort parameter values.''' params = webob.multidict.MultiDict() params.add('sort_key', 'key1') params.add('sort_key', 'key2') params.add('sort_key', 'key3') params.add('sort_dir', 'dir1') params.add('sort_dir', 'dir2') params.add('sort_dir', 'dir3') sort_keys, sort_dirs = common.get_sort_params(params) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs) # Also ensure that the input parameters are not modified sort_key_vals = [] sort_dir_vals = [] while 'sort_key' in params: sort_key_vals.append(params.pop('sort_key')) while 'sort_dir' in params: sort_dir_vals.append(params.pop('sort_dir')) self.assertEqual(['key1', 'key2', 'key3'], sort_key_vals) self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dir_vals) self.assertEqual(0, len(params)) class PaginationParamsTest(test.NoDBTestCase): """Unit tests for the `nova.api.openstack.common.get_pagination_params` method which takes in a request object and returns 'marker' and 'limit' GET params. """ def test_no_params(self): # Test no params. req = webob.Request.blank('/') self.assertEqual(common.get_pagination_params(req), {}) def test_valid_marker(self): # Test valid marker param. req = webob.Request.blank( '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2') self.assertEqual(common.get_pagination_params(req), {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'}) def test_valid_limit(self): # Test valid limit param. req = webob.Request.blank('/?limit=10') self.assertEqual(common.get_pagination_params(req), {'limit': 10}) def test_invalid_limit(self): # Test invalid limit param. req = webob.Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req) def test_valid_limit_and_marker(self): # Test valid limit and marker parameters. marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?limit=20&marker=%s' % marker) self.assertEqual(common.get_pagination_params(req), {'marker': marker, 'limit': 20}) def test_valid_page_size(self): # Test valid page_size param. req = webob.Request.blank('/?page_size=10') self.assertEqual(common.get_pagination_params(req), {'page_size': 10}) def test_invalid_page_size(self): # Test invalid page_size param. req = webob.Request.blank('/?page_size=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req) def test_valid_limit_and_page_size(self): # Test valid limit and page_size parameters. req = webob.Request.blank('/?limit=20&page_size=5') self.assertEqual(common.get_pagination_params(req), {'page_size': 5, 'limit': 20}) class MiscFunctionsTest(test.TestCase): def test_remove_major_version_from_href(self): fixture = 'http://www.testsite.com/v1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href(self): fixture = 'http://www.testsite.com/v1.1/images' expected = 'http://www.testsite.com/images' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_2(self): fixture = 'http://www.testsite.com/v1.1/' expected = 'http://www.testsite.com/' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_3(self): fixture = 'http://www.testsite.com/v10.10' expected = 'http://www.testsite.com' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_4(self): fixture = 'http://www.testsite.com/v1.1/images/v10.5' expected = 'http://www.testsite.com/images/v10.5' actual = common.remove_version_from_href(fixture) self.assertEqual(actual, expected) def test_remove_version_from_href_bad_request(self): fixture = 'http://www.testsite.com/1.1/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_2(self): fixture = 'http://www.testsite.com/v/images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_remove_version_from_href_bad_request_3(self): fixture = 'http://www.testsite.com/v1.1images' self.assertRaises(ValueError, common.remove_version_from_href, fixture) def test_get_id_from_href_with_int_url(self): fixture = 'http://www.testsite.com/dir/45' actual = common.get_id_from_href(fixture) expected = '45' self.assertEqual(actual, expected) def test_get_id_from_href_with_int(self): fixture = '45' actual = common.get_id_from_href(fixture) expected = '45' self.assertEqual(actual, expected) def test_get_id_from_href_with_int_url_query(self): fixture = 'http://www.testsite.com/dir/45?asdf=jkl' actual = common.get_id_from_href(fixture) expected = '45' self.assertEqual(actual, expected) def test_get_id_from_href_with_uuid_url(self): fixture = 'http://www.testsite.com/dir/abc123' actual = common.get_id_from_href(fixture) expected = "abc123" self.assertEqual(actual, expected) def test_get_id_from_href_with_uuid_url_query(self): fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl' actual = common.get_id_from_href(fixture) expected = "abc123" self.assertEqual(actual, expected) def test_get_id_from_href_with_uuid(self): fixture = 'abc123' actual = common.get_id_from_href(fixture) expected = 'abc123' self.assertEqual(actual, expected) def test_raise_http_conflict_for_instance_invalid_state(self): exc = exception.InstanceInvalidState(attr='fake_attr', state='fake_state', method='fake_method', instance_uuid='fake') try: common.raise_http_conflict_for_instance_invalid_state(exc, 'meow', 'fake_server_id') except webob.exc.HTTPConflict as e: self.assertEqual(six.text_type(e), "Cannot 'meow' instance fake_server_id while it is in " "fake_attr fake_state") else: self.fail("webob.exc.HTTPConflict was not raised") def test_check_img_metadata_properties_quota_valid_metadata(self): ctxt = utils.get_test_admin_context() metadata1 = {"key": "value"} actual = common.check_img_metadata_properties_quota(ctxt, metadata1) self.assertIsNone(actual) metadata2 = {"key": "v" * 260} actual = common.check_img_metadata_properties_quota(ctxt, metadata2) self.assertIsNone(actual) metadata3 = {"key": ""} actual = common.check_img_metadata_properties_quota(ctxt, metadata3) self.assertIsNone(actual) def test_check_img_metadata_properties_quota_inv_metadata(self): ctxt = utils.get_test_admin_context() metadata1 = {"a" * 260: "value"} self.assertRaises(webob.exc.HTTPBadRequest, common.check_img_metadata_properties_quota, ctxt, metadata1) metadata2 = {"": "value"} self.assertRaises(webob.exc.HTTPBadRequest, common.check_img_metadata_properties_quota, ctxt, metadata2) metadata3 = "invalid metadata" self.assertRaises(webob.exc.HTTPBadRequest, common.check_img_metadata_properties_quota, ctxt, metadata3) metadata4 = None self.assertIsNone(common.check_img_metadata_properties_quota(ctxt, metadata4)) metadata5 = {} self.assertIsNone(common.check_img_metadata_properties_quota(ctxt, metadata5)) def test_status_from_state(self): for vm_state in (vm_states.ACTIVE, vm_states.STOPPED): for task_state in (task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH): actual = common.status_from_state(vm_state, task_state) expected = 'RESIZE' self.assertEqual(expected, actual) def test_status_rebuild_from_state(self): for vm_state in (vm_states.ACTIVE, vm_states.STOPPED, vm_states.ERROR): for task_state in (task_states.REBUILDING, task_states.REBUILD_BLOCK_DEVICE_MAPPING, task_states.REBUILD_SPAWNING): actual = common.status_from_state(vm_state, task_state) expected = 'REBUILD' self.assertEqual(expected, actual) def test_status_migrating_from_state(self): for vm_state in (vm_states.ACTIVE, vm_states.PAUSED): task_state = task_states.MIGRATING actual = common.status_from_state(vm_state, task_state) expected = 'MIGRATING' self.assertEqual(expected, actual) def test_task_and_vm_state_from_status(self): fixture1 = ['reboot'] actual = common.task_and_vm_state_from_status(fixture1) expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING] self.assertEqual(expected, actual) fixture2 = ['resize'] actual = common.task_and_vm_state_from_status(fixture2) expected = ([vm_states.ACTIVE, vm_states.STOPPED], [task_states.RESIZE_FINISH, task_states.RESIZE_MIGRATED, task_states.RESIZE_MIGRATING, task_states.RESIZE_PREP]) self.assertEqual(expected, actual) fixture3 = ['resize', 'reboot'] actual = common.task_and_vm_state_from_status(fixture3) expected = ([vm_states.ACTIVE, vm_states.STOPPED], [task_states.REBOOT_PENDING, task_states.REBOOT_STARTED, task_states.REBOOTING, task_states.RESIZE_FINISH, task_states.RESIZE_MIGRATED, task_states.RESIZE_MIGRATING, task_states.RESIZE_PREP]) self.assertEqual(expected, actual) class TestCollectionLinks(test.NoDBTestCase): """Tests the _get_collection_links method.""" @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link') def test_items_less_than_limit(self, href_link_mock): items = [ {"uuid": "123"} ] req = mock.MagicMock() params = mock.PropertyMock(return_value=dict(limit=10)) type(req).params = params builder = common.ViewBuilder() results = builder._get_collection_links(req, items, "ignored", "uuid") self.assertFalse(href_link_mock.called) self.assertThat(results, matchers.HasLength(0)) @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link') def test_items_equals_given_limit(self, href_link_mock): items = [ {"uuid": "123"} ] req = mock.MagicMock() params = mock.PropertyMock(return_value=dict(limit=1)) type(req).params = params builder = common.ViewBuilder() results = builder._get_collection_links(req, items, mock.sentinel.coll_key, "uuid") href_link_mock.assert_called_once_with(req, "123", mock.sentinel.coll_key) self.assertThat(results, matchers.HasLength(1)) @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link') def test_items_equals_default_limit(self, href_link_mock): items = [ {"uuid": "123"} ] req = mock.MagicMock() params = mock.PropertyMock(return_value=dict()) type(req).params = params self.flags(osapi_max_limit=1) builder = common.ViewBuilder() results = builder._get_collection_links(req, items, mock.sentinel.coll_key, "uuid") href_link_mock.assert_called_once_with(req, "123", mock.sentinel.coll_key) self.assertThat(results, matchers.HasLength(1)) @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link') def test_items_equals_default_limit_with_given(self, href_link_mock): items = [ {"uuid": "123"} ] req = mock.MagicMock() # Given limit is greater than default max, only return default max params = mock.PropertyMock(return_value=dict(limit=2)) type(req).params = params self.flags(osapi_max_limit=1) builder = common.ViewBuilder() results = builder._get_collection_links(req, items, mock.sentinel.coll_key, "uuid") href_link_mock.assert_called_once_with(req, "123", mock.sentinel.coll_key) self.assertThat(results, matchers.HasLength(1)) class LinkPrefixTest(test.NoDBTestCase): def test_update_link_prefix(self): vb = common.ViewBuilder() result = vb._update_link_prefix("http://192.168.0.243:24/", "http://127.0.0.1/compute") self.assertEqual("http://127.0.0.1/compute", result) result = vb._update_link_prefix("http://foo.x.com/v1", "http://new.prefix.com") self.assertEqual("http://new.prefix.com/v1", result) result = vb._update_link_prefix( "http://foo.x.com/v1", "http://new.prefix.com:20455/new_extra_prefix") self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1", result)
apache-2.0
ljnutal6/media-recommend
app/virtualenvs/recommedia/lib/python2.7/site-packages/pip/vendor/html5lib/trie/py.py
80
1774
from __future__ import absolute_import, division, unicode_literals from pip.vendor.six import text_type from bisect import bisect_left from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): if not all(isinstance(x, text_type) for x in data.keys()): raise TypeError("All keys must be strings") self._data = data self._keys = sorted(data.keys()) self._cachestr = "" self._cachepoints = (0, len(data)) def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): return iter(self._data) def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): if prefix is None or prefix == "" or not self._keys: return set(self._keys) if prefix.startswith(self._cachestr): lo, hi = self._cachepoints start = i = bisect_left(self._keys, prefix, lo, hi) else: start = i = bisect_left(self._keys, prefix) keys = set() if start == len(self._keys): return keys while self._keys[i].startswith(prefix): keys.add(self._keys[i]) i += 1 self._cachestr = prefix self._cachepoints = (start, i) return keys def has_keys_with_prefix(self, prefix): if prefix in self._data: return True if prefix.startswith(self._cachestr): lo, hi = self._cachepoints i = bisect_left(self._keys, prefix, lo, hi) else: i = bisect_left(self._keys, prefix) if i == len(self._keys): return False return self._keys[i].startswith(prefix)
gpl-2.0
mozilla/ichnaea
ichnaea/webapp/app.py
1
1488
""" Holds global web application state and the WSGI handler. You can run this script for a one-process webapp. Further, you can pass in ``--check`` which will create the app and then exit making it easier to suss out startup and configuration issues. """ import sys from waitress import serve from ichnaea.conf import settings from ichnaea.webapp.config import main, shutdown_worker # Internal module global holding the runtime web app. _APP = None def wsgi_app(environ, start_response): """ Actual WSGI application endpoint, used on the command line via: .. code-block:: bash bin/gunicorn -c python:ichnaea.webapp.gunicorn_settings \ ichnaea.webapp.app:wsgi_app At startup reads the app config and calls :func:`ichnaea.webapp.config.main` once to setup the web app stored in the :data:`ichnaea.webapp.app._APP` global. """ global _APP if _APP is None: _APP = main(ping_connections=True) if environ is None and start_response is None: # Called as part of gunicorn's post_worker_init return _APP return _APP(environ, start_response) def worker_exit(server, worker): shutdown_worker(_APP) if __name__ == "__main__": if "--check" in sys.argv: main(ping_connections=False) else: serve( main(ping_connections=True), host="0.0.0.0", port=8000, expose_tracebacks=settings("local_dev_env"), )
apache-2.0
hhg2288/mkdocs
mkdocs/tests/config/config_tests.py
27
7563
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import os import shutil import tempfile import unittest from mkdocs import config from mkdocs import utils from mkdocs.config import config_options from mkdocs.exceptions import ConfigurationError from mkdocs.tests.base import dedent def ensure_utf(string): return string.encode('utf-8') if not utils.PY3 else string class ConfigTests(unittest.TestCase): def test_missing_config_file(self): def load_missing_config(): config.load_config(config_file='bad_filename.yaml') self.assertRaises(ConfigurationError, load_missing_config) def test_missing_site_name(self): c = config.Config(schema=config.DEFAULT_SCHEMA) c.load_dict({}) errors, warnings = c.validate() self.assertEqual(len(errors), 1) self.assertEqual(errors[0][0], 'site_name') self.assertEqual(str(errors[0][1]), 'Required configuration not provided.') self.assertEqual(len(warnings), 0) def test_empty_config(self): def load_empty_config(): config.load_config(config_file='/dev/null') self.assertRaises(ConfigurationError, load_empty_config) def test_nonexistant_config(self): def load_empty_config(): config.load_config(config_file='/path/that/is/not/real') self.assertRaises(ConfigurationError, load_empty_config) def test_invalid_config(self): file_contents = dedent(""" - ['index.md', 'Introduction'] - ['index.md', 'Introduction'] - ['index.md', 'Introduction'] """) config_file = tempfile.NamedTemporaryFile('w', delete=False) try: config_file.write(ensure_utf(file_contents)) config_file.flush() config_file.close() self.assertRaises( ConfigurationError, config.load_config, config_file=open(config_file.name, 'rb') ) finally: os.remove(config_file.name) def test_config_option(self): """ Users can explicitly set the config file using the '--config' option. Allows users to specify a config other than the default `mkdocs.yml`. """ expected_result = { 'site_name': 'Example', 'pages': [ {'Introduction': 'index.md'} ], } file_contents = dedent(""" site_name: Example pages: - ['index.md', 'Introduction'] """) config_file = tempfile.NamedTemporaryFile('w', delete=False) try: config_file.write(ensure_utf(file_contents)) config_file.flush() config_file.close() result = config.load_config(config_file=config_file.name) self.assertEqual(result['site_name'], expected_result['site_name']) self.assertEqual(result['pages'], expected_result['pages']) finally: os.remove(config_file.name) def test_theme(self): mytheme = tempfile.mkdtemp() custom = tempfile.mkdtemp() configs = [ dict(), # default theme {"theme": "readthedocs"}, # builtin theme {"theme_dir": mytheme}, # custom only {"theme": "readthedocs", "theme_dir": custom}, # builtin and custom ] abs_path = os.path.abspath(os.path.dirname(__file__)) mkdocs_dir = os.path.abspath(os.path.join(abs_path, '..', '..')) theme_dir = os.path.abspath(os.path.join(mkdocs_dir, 'themes')) search_asset_dir = os.path.abspath(os.path.join( mkdocs_dir, 'assets', 'search')) results = ( [os.path.join(theme_dir, 'mkdocs'), search_asset_dir], [os.path.join(theme_dir, 'readthedocs'), search_asset_dir], [mytheme, search_asset_dir], [custom, os.path.join(theme_dir, 'readthedocs'), search_asset_dir], ) for config_contents, result in zip(configs, results): c = config.Config(schema=( ('theme', config_options.Theme(default='mkdocs')), ('theme_dir', config_options.ThemeDir(exists=True)), )) c.load_dict(config_contents) c.validate() self.assertEqual(c['theme_dir'], result) def test_default_pages(self): tmp_dir = tempfile.mkdtemp() try: open(os.path.join(tmp_dir, 'index.md'), 'w').close() open(os.path.join(tmp_dir, 'about.md'), 'w').close() conf = config.Config(schema=config.DEFAULT_SCHEMA) conf.load_dict({ 'site_name': 'Example', 'docs_dir': tmp_dir }) conf.validate() self.assertEqual(['index.md', 'about.md'], conf['pages']) finally: shutil.rmtree(tmp_dir) def test_default_pages_nested(self): tmp_dir = tempfile.mkdtemp() try: open(os.path.join(tmp_dir, 'index.md'), 'w').close() open(os.path.join(tmp_dir, 'getting-started.md'), 'w').close() open(os.path.join(tmp_dir, 'about.md'), 'w').close() os.makedirs(os.path.join(tmp_dir, 'subA')) open(os.path.join(tmp_dir, 'subA', 'index.md'), 'w').close() os.makedirs(os.path.join(tmp_dir, 'subA', 'subA1')) open(os.path.join(tmp_dir, 'subA', 'subA1', 'index.md'), 'w').close() os.makedirs(os.path.join(tmp_dir, 'subC')) open(os.path.join(tmp_dir, 'subC', 'index.md'), 'w').close() os.makedirs(os.path.join(tmp_dir, 'subB')) open(os.path.join(tmp_dir, 'subB', 'index.md'), 'w').close() conf = config.Config(schema=config.DEFAULT_SCHEMA) conf.load_dict({ 'site_name': 'Example', 'docs_dir': tmp_dir }) conf.validate() self.assertEqual([ 'index.md', 'about.md', 'getting-started.md', {'subA': [ os.path.join('subA', 'index.md'), {'subA1': [ os.path.join('subA', 'subA1', 'index.md') ]} ]}, {'subB': [ os.path.join('subB', 'index.md') ]}, {'subC': [ os.path.join('subC', 'index.md') ]} ], conf['pages']) finally: shutil.rmtree(tmp_dir) def test_doc_dir_in_site_dir(self): j = os.path.join test_configs = ( {'docs_dir': j('site', 'docs'), 'site_dir': 'site'}, {'docs_dir': 'docs', 'site_dir': '.'}, {'docs_dir': '.', 'site_dir': '.'}, {'docs_dir': 'docs', 'site_dir': ''}, {'docs_dir': '', 'site_dir': ''}, {'docs_dir': j('..', 'mkdocs', 'docs'), 'site_dir': 'docs'}, ) conf = { 'site_name': 'Example', } for test_config in test_configs: patch = conf.copy() patch.update(test_config) # Same as the default schema, but don't verify the docs_dir exists. c = config.Config(schema=( ('docs_dir', config_options.Dir(default='docs')), ('site_dir', config_options.SiteDir(default='site')), )) c.load_dict(patch) self.assertRaises(config_options.ValidationError, c.validate)
bsd-2-clause
mgamer/gyp
test/defines/gyptest-defines-env-regyp.py
268
1350
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies build of an executable with C++ define specified by a gyp define, and the use of the environment during regeneration when the gyp file changes. """ import os import TestGyp # Regenerating build files when a gyp file changes is currently only supported # by the make generator. test = TestGyp.TestGyp(formats=['make']) try: os.environ['GYP_DEFINES'] = 'value=50' test.run_gyp('defines.gyp') finally: # We clear the environ after calling gyp. When the auto-regeneration happens, # the same define should be reused anyway. Reset to empty string first in # case the platform doesn't support unsetenv. os.environ['GYP_DEFINES'] = '' del os.environ['GYP_DEFINES'] test.build('defines.gyp') expect = """\ FOO is defined VALUE is 1 2*PAREN_VALUE is 12 HASH_VALUE is a#1 """ test.run_built_executable('defines', stdout=expect) # Sleep so that the changed gyp file will have a newer timestamp than the # previously generated build files. test.sleep() test.write('defines.gyp', test.read('defines-env.gyp')) test.build('defines.gyp', test.ALL) expect = """\ VALUE is 50 """ test.run_built_executable('defines', stdout=expect) test.pass_test()
bsd-3-clause
rysson/filmkodi
plugin.video.mrknow/mylib/_pydev_bundle/_pydev_filesystem_encoding.py
50
1095
import sys def __getfilesystemencoding(): ''' Note: there's a copy of this method in interpreterInfo.py ''' try: ret = sys.getfilesystemencoding() if not ret: raise RuntimeError('Unable to get encoding.') return ret except: try: #Handle Jython from java.lang import System # @UnresolvedImport env = System.getProperty("os.name").lower() if env.find('win') != -1: return 'ISO-8859-1' #mbcs does not work on Jython, so, use a (hopefully) suitable replacement return 'utf-8' except: pass #Only available from 2.3 onwards. if sys.platform == 'win32': return 'mbcs' return 'utf-8' def getfilesystemencoding(): try: ret = __getfilesystemencoding() #Check if the encoding is actually there to be used! if hasattr('', 'encode'): ''.encode(ret) if hasattr('', 'decode'): ''.decode(ret) return ret except: return 'utf-8'
apache-2.0
josephwilk/qscintilla
Python/configure-old.py
3
14886
# This script configures QScintilla for PyQt v3 and/or v4. # # Copyright (c) 2015 Riverbank Computing Limited <info@riverbankcomputing.com> # # This file is part of QScintilla. # # This file may be used under the terms of the GNU General Public License # version 3.0 as published by the Free Software Foundation and appearing in # the file LICENSE included in the packaging of this file. Please review the # following information to ensure the GNU General Public License version 3.0 # requirements will be met: http://www.gnu.org/copyleft/gpl.html. # # If you do not wish to use this file under the terms of the GPL version 3.0 # then you may purchase a commercial license. For more information contact # info@riverbankcomputing.com. # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. import sys import os import glob import optparse # Import SIP's configuration module so that we have access to the error # reporting. Then try and import the configuration modules for both PyQt3 and # PyQt4. try: import sipconfig except ImportError: sys.stderr.write("Unable to import sipconfig. Please make sure SIP is installed.\n") sys.exit(1) try: import PyQt4.pyqtconfig as pyqt4 except: pyqt4 = None try: import pyqtconfig as pyqt3 except: pyqt3 = None if pyqt4 is not None: pyqt = pyqt4.Configuration() qt_data_dir = pyqt.qt_data_dir elif pyqt3 is not None: pyqt = pyqt3.Configuration() qt_data_dir = pyqt.qt_dir else: sipconfig.error("Unable to find either PyQt v3 or v4.") # This must be kept in sync with Python/configure.py, qscintilla.pro, # example-Qt4Qt5/application.pro and designer-Qt4Qt5/designer.pro. QSCI_API_MAJOR = 12 # Initialise the globals. sip_min_version = 0x040c00 if sys.platform == "win32": qsci_define = "QSCINTILLA_DLL" else: qsci_define = "" def create_optparser(): """Create the parser for the command line. """ def store_abspath(option, opt_str, value, parser): setattr(parser.values, option.dest, os.path.abspath(value)) def store_abspath_dir(option, opt_str, value, parser): if not os.path.isdir(value): raise optparse.OptionValueError("'%s' is not a directory" % value) setattr(parser.values, option.dest, os.path.abspath(value)) p = optparse.OptionParser(usage="python %prog [options]", version="2.9") p.add_option("-a", "--apidir", action="callback", default=None, type="string", metavar="DIR", dest="qscidir", callback=store_abspath, help="where QScintilla's API file will be " "installed [default: QTDIR/qsci]") p.add_option("-c", "--concatenate", action="store_true", default=False, dest="concat", help="concatenate the C++ source files") p.add_option("-d", "--destdir", action="callback", default=pyqt.pyqt_mod_dir, type="string", metavar="DIR", dest="qscimoddir", callback=store_abspath, help="where the " "QScintilla module will be installed [default: %s]" % pyqt.pyqt_mod_dir) p.add_option("-j", "--concatenate-split", type="int", default=1, metavar="N", dest="split", help="split the concatenated C++ " "source files into N pieces [default: 1]") p.add_option("-k", "--static", action="store_true", default=False, dest="static", help="build the QScintilla module as a static " "library") p.add_option("-n", action="callback", default=None, type="string", metavar="DIR", dest="qsciincdir", callback=store_abspath_dir, help="the directory containing the QScintilla Qsci header file " "directory [default: %s]" % pyqt.qt_inc_dir) p.add_option("--no-docstrings", action="store_true", default=False, dest="no_docstrings", help="disable the generation of docstrings") p.add_option("-o", action="callback", default=None, type="string", metavar="DIR", dest="qscilibdir", callback=store_abspath_dir, help="the directory containing the QScintilla library [default: " "%s]" % pyqt.qt_lib_dir) p.add_option("-p", type="int", default=-1, metavar="3|4", dest="pyqt_major", help="specifically configure for PyQt v3 or v4 [default v4, if " "found]") p.add_option("-r", "--trace", action="store_true", default=False, dest="tracing", help="build the QScintilla module with tracing " "enabled") p.add_option("-s", action="store_true", default=False, dest="not_dll", help="QScintilla is a static library and not a DLL (Windows only)") p.add_option("-u", "--debug", action="store_true", default=False, help="build the QScintilla module with debugging symbols") p.add_option("-v", "--sipdir", action="callback", default=None, metavar="DIR", dest="qscisipdir", callback=store_abspath, type="string", help="where the QScintilla .sip files will be " "installed [default: %s]" % pyqt.pyqt_sip_dir) p.add_option("-T", "--no-timestamp", action="store_true", default=False, dest="no_timestamp", help="suppress timestamps in the header " "comments of generated code [default: include timestamps]") if sys.platform != 'win32': if sys.platform.startswith('linux') or sys.platform == 'darwin': pip_default = True pip_default_str = "enabled" else: pip_default = False pip_default_str = "disabled" p.add_option("--protected-is-public", action="store_true", default=pip_default, dest="prot_is_public", help="enable building with 'protected' redefined as 'public' " "[default: %s]" % pip_default_str) p.add_option("--protected-not-public", action="store_false", dest="prot_is_public", help="disable building with 'protected' redefined as 'public'") return p def inform_user(): """Tell the user the option values that are going to be used. """ sipconfig.inform("PyQt %s is being used." % pyqt.pyqt_version_str) sipconfig.inform("Qt v%s %s edition is being used." % (sipconfig.version_to_string(pyqt.qt_version), pyqt.qt_edition)) sipconfig.inform("SIP %s is being used." % pyqt.sip_version_str) sipconfig.inform("The QScintilla module will be installed in %s." % opts.qscimoddir) sipconfig.inform("The QScintilla API file will be installed in %s." % os.path.join(opts.qscidir, "api", "python")) sipconfig.inform("The QScintilla .sip files will be installed in %s." % opts.qscisipdir) if opts.no_docstrings: sipconfig.inform("The QScintilla module is being built without generated docstrings.") else: sipconfig.inform("The QScintilla module is being built with generated docstrings.") if opts.prot_is_public: sipconfig.inform("The QScintilla module is being built with 'protected' redefined as 'public'.") def check_qscintilla(): """See if QScintilla can be found and what its version is. """ # Find the QScintilla header files. sciglobal = os.path.join(opts.qsciincdir, "Qsci", "qsciglobal.h") if os.access(sciglobal, os.F_OK): # Get the QScintilla version string. _, sciversstr = sipconfig.read_version(sciglobal, "QScintilla", "QSCINTILLA_VERSION", "QSCINTILLA_VERSION_STR") if glob.glob(os.path.join(opts.qscilibdir, "*qscintilla2*")): # Because we include the Python bindings with the C++ code we can # reasonably force the same version to be used and not bother about # versioning. if sciversstr != "2.9": sipconfig.error("QScintilla %s is being used but the Python bindings 2.9 are being built. Please use matching versions." % sciversstr) sipconfig.inform("QScintilla %s is being used." % sciversstr) else: sipconfig.error("The QScintilla library could not be found in %s. If QScintilla is installed then use the -o argument to explicitly specify the correct directory." % opts.qscilibdir) else: sipconfig.error("Qsci/qsciglobal.h could not be found in %s. If QScintilla is installed then use the -n argument to explicitly specify the correct directory." % opts.qsciincdir) def sip_flags(): """Return the SIP flags. """ # Get the flags used for the main PyQt module. if pyqt.pyqt_version >= 0x040000: flags = pyqt.pyqt_sip_flags.split() else: flags = pyqt.pyqt_qt_sip_flags.split() flags.append("-x") flags.append("Qsci_Qt4") # Generate the API file. flags.append("-a") flags.append("QScintilla2.api") # Add PyQt's .sip files to the search path. flags.append("-I") flags.append(pyqt.pyqt_sip_dir) return flags def generate_code(): """Generate the code for the QScintilla module. """ if pyqt.pyqt_version >= 0x040000: mname = "Qsci" else: mname = "qsci" sipconfig.inform("Generating the C++ source for the %s module..." % mname) # Build the SIP command line. argv = ['"' + pyqt.sip_bin + '"'] argv.extend(sip_flags()) if opts.no_timestamp: argv.append("-T") if not opts.no_docstrings: argv.append("-o"); if opts.prot_is_public: argv.append("-P"); if opts.concat: argv.append("-j") argv.append(str(opts.split)) if opts.tracing: argv.append("-r") argv.append("-c") argv.append(".") buildfile = os.path.join("qsci.sbf") argv.append("-b") argv.append(buildfile) if pyqt.pyqt_version >= 0x040000: argv.append("sip/qscimod4.sip") else: argv.append("sip/qscimod3.sip") os.system(" ".join(argv)) # Check the result. if not os.access(buildfile, os.F_OK): sipconfig.error("Unable to create the C++ code.") # Generate the Makefile. sipconfig.inform("Creating the Makefile for the %s module..." % mname) def fix_install(mfile): if sys.platform != "darwin" or opts.static: return mfile.write("\tinstall_name_tool -change libqscintilla2.%u.dylib %s/libqscintilla2.%u.dylib $(DESTDIR)%s/$(TARGET)\n" % (QSCI_API_MAJOR, opts.qscilibdir, QSCI_API_MAJOR, opts.qscimoddir)) if pyqt.pyqt_version >= 0x040000: class Makefile(pyqt4.QtGuiModuleMakefile): def generate_target_install(self, mfile): pyqt4.QtGuiModuleMakefile.generate_target_install(self, mfile) fix_install(mfile) else: class Makefile(pyqt3.QtModuleMakefile): def generate_target_install(self, mfile): pyqt3.QtModuleMakefile.generate_target_install(self, mfile) fix_install(mfile) installs = [] sipfiles = [] for s in glob.glob("sip/*.sip"): sipfiles.append(os.path.join("sip", os.path.basename(s))) installs.append([sipfiles, os.path.join(opts.qscisipdir, mname)]) installs.append(("QScintilla2.api", os.path.join(opts.qscidir, "api", "python"))) # PyQt v4.2 and later can handle MacOS/X universal binaries. if pyqt.pyqt_version >= 0x040200: makefile = Makefile( configuration=pyqt, build_file="qsci.sbf", install_dir=opts.qscimoddir, installs=installs, static=opts.static, debug=opts.debug, universal=pyqt.universal, arch=pyqt.arch, prot_is_public=opts.prot_is_public, deployment_target=pyqt.deployment_target ) else: makefile = Makefile( configuration=pyqt, build_file="qsci.sbf", install_dir=opts.qscimoddir, installs=installs, static=opts.static, debug=opts.debug ) if qsci_define: makefile.extra_defines.append(qsci_define) makefile.extra_include_dirs.append(opts.qsciincdir) makefile.extra_lib_dirs.append(opts.qscilibdir) makefile.extra_libs.append("qscintilla2") makefile.generate() def main(argv): """Create the configuration module module. argv is the list of command line arguments. """ global pyqt # Check SIP is new enough. if "preview" not in pyqt.sip_version_str and "snapshot" not in pyqt.sip_version_str: if pyqt.sip_version < sip_min_version: sipconfig.error("This version of QScintilla requires SIP v%s or later" % sipconfig.version_to_string(sip_min_version)) # Parse the command line. global opts p = create_optparser() opts, args = p.parse_args() if args: p.print_help() sys.exit(2) # Provide defaults for platform-specific options. if sys.platform == 'win32': opts.prot_is_public = False if opts.not_dll: global qsci_define qsci_define = "" # Set the version of PyQt explicitly. global qt_data_dir if opts.pyqt_major == 4: if pyqt4 is None: sipconfig.error("PyQt v4 was specified with the -p argument but doesn't seem to be installed.") else: pyqt = pyqt4.Configuration() qt_data_dir = pyqt.qt_data_dir elif opts.pyqt_major == 3: if pyqt3 is None: sipconfig.error("PyQt v3 was specified with the -p argument but doesn't seem to be installed.") else: pyqt = pyqt3.Configuration() qt_data_dir = pyqt.qt_dir elif opts.pyqt_major >= 0: sipconfig.error("Specify either 3 or 4 with the -p argument.") # Now we know which version of PyQt to use we can set defaults for those # arguments that weren't specified. if opts.qscimoddir is None: opts.qscimoddir = pyqt.pyqt_mod_dir if opts.qsciincdir is None: opts.qsciincdir = pyqt.qt_inc_dir if opts.qscilibdir is None: opts.qscilibdir = pyqt.qt_lib_dir if opts.qscisipdir is None: opts.qscisipdir = pyqt.pyqt_sip_dir if opts.qscidir is None: opts.qscidir = os.path.join(qt_data_dir, "qsci") # Check for QScintilla. check_qscintilla() # Tell the user what's been found. inform_user() # Generate the code. generate_code() ############################################################################### # The script starts here. ############################################################################### if __name__ == "__main__": try: main(sys.argv) except SystemExit: raise except: sys.stderr.write( """An internal error occured. Please report all the output from the program, including the following traceback, to support@riverbankcomputing.com. """) raise
gpl-3.0
ReactiveX/RxPY
tests/test_observable/test_map.py
1
12515
import unittest from rx import Observable, return_value, throw, empty, create from rx.testing import TestScheduler, ReactiveTest from rx.disposable import SerialDisposable from rx.operators import map, map_indexed on_next = ReactiveTest.on_next on_completed = ReactiveTest.on_completed on_error = ReactiveTest.on_error subscribe = ReactiveTest.subscribe subscribed = ReactiveTest.subscribed disposed = ReactiveTest.disposed created = ReactiveTest.created class RxException(Exception): pass # Helper function for raising exceptions within lambdas def _raise(ex): raise RxException(ex) class TestSelect(unittest.TestCase): def test_map_throws(self): mapper = map(lambda x: x) with self.assertRaises(RxException): return_value(1).pipe( mapper ).subscribe(lambda x: _raise("ex")) with self.assertRaises(RxException): throw('ex').pipe( mapper ).subscribe(on_error=lambda ex: _raise(ex)) with self.assertRaises(RxException): empty().pipe( mapper ).subscribe(lambda x: x, lambda ex: ex, lambda: _raise('ex')) def subscribe(observer, scheduler=None): _raise('ex') with self.assertRaises(RxException): create(subscribe).pipe( map(lambda x: x) ).subscribe() def test_map_disposeinsidemapper(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(100, 1), on_next(200, 2), on_next(500, 3), on_next(600, 4)) results = scheduler.create_observer() d = SerialDisposable() invoked = [0] def projection(x, *args, **kw): invoked[0] += 1 if scheduler.clock > 400: d.dispose() return x d.disposable = xs.pipe( map(projection) ).subscribe(results, scheduler) def action(scheduler, state): return d.dispose() scheduler.schedule_absolute(ReactiveTest.disposed, action) scheduler.start() assert results.messages == [on_next(100, 1), on_next(200, 2)] assert xs.subscriptions == [ReactiveTest.subscribe(0, 500)] assert invoked[0] == 3 def test_map_completed(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable( on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) invoked = [0] def factory(): def projection(x): invoked[0] += 1 return x + 1 return xs.pipe(map(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 3), on_next( 240, 4), on_next(290, 5), on_next(350, 6), on_completed(400)] assert xs.subscriptions == [ReactiveTest.subscribe(200, 400)] assert invoked[0] == 4 def test_map_default_mapper(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable( on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): return xs.pipe(map()) results = scheduler.start(factory) assert results.messages == [ on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400)] assert xs.subscriptions == [ReactiveTest.subscribe(200, 400)] def test_map_completed_two(self): for i in range(100): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next( 290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): def projection(x): invoked[0] += 1 return x + 1 return xs.pipe(map(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 3), on_next( 240, 4), on_next(290, 5), on_next(350, 6), on_completed(400)] assert xs.subscriptions == [subscribe(200, 400)] assert invoked[0] == 4 def test_map_not_completed(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(180, 1), on_next( 210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5)) def factory(): def projection(x): invoked[0] += 1 return x + 1 return xs.pipe(map(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 3), on_next(240, 4), on_next(290, 5), on_next(350, 6)] assert xs.subscriptions == [subscribe(200, 1000)] assert invoked[0] == 4 def test_map_error(self): scheduler = TestScheduler() ex = 'ex' invoked = [0] xs = scheduler.create_hot_observable(on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next( 290, 4), on_next(350, 5), on_error(400, ex), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): def projection(x): invoked[0] += 1 return x + 1 return xs.pipe(map(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 3), on_next( 240, 4), on_next(290, 5), on_next(350, 6), on_error(400, ex)] assert xs.subscriptions == [subscribe(200, 400)] assert invoked[0] == 4 def test_map_mapper_throws(self): scheduler = TestScheduler() invoked = [0] ex = 'ex' xs = scheduler.create_hot_observable( on_next(180, 1), on_next(210, 2), on_next(240, 3), on_next(290, 4), on_next(350, 5), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): def projection(x): invoked[0] += 1 if invoked[0] == 3: raise Exception(ex) return x + 1 return xs.pipe(map(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 3), on_next(240, 4), on_error(290, ex)] assert xs.subscriptions == [subscribe(200, 290)] assert invoked[0] == 3 def test_map_with_index_throws(self): with self.assertRaises(RxException): mapper = map_indexed(lambda x, index: x) return return_value(1).pipe( mapper ).subscribe(lambda x: _raise('ex')) with self.assertRaises(RxException): return throw('ex').pipe( mapper ).subscribe(lambda x: x, lambda ex: _raise(ex)) with self.assertRaises(RxException): return empty().pipe( mapper ).subscribe(lambda x: x, lambda ex: None, lambda: _raise('ex')) with self.assertRaises(RxException): return create(lambda o, s: _raise('ex')).pipe( mapper ).subscribe() def test_map_with_index_dispose_inside_mapper(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable(on_next(100, 4), on_next(200, 3), on_next(500, 2), on_next(600, 1)) invoked = [0] results = scheduler.create_observer() d = SerialDisposable() def projection(x, index): invoked[0] += 1 if scheduler.clock > 400: d.dispose() return x + index * 10 d.disposable = xs.pipe(map_indexed(projection)).subscribe(results) def action(scheduler, state): return d.dispose() scheduler.schedule_absolute(disposed, action) scheduler.start() assert results.messages == [on_next(100, 4), on_next(200, 13)] assert xs.subscriptions == [subscribe(0, 500)] assert invoked[0] == 3 def test_map_with_index_completed(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next( 290, 2), on_next(350, 1), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): def projection(x, index): invoked[0] += 1 return (x + 1) + (index * 10) return xs.pipe(map_indexed(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 5), on_next( 240, 14), on_next(290, 23), on_next(350, 32), on_completed(400)] assert xs.subscriptions == [subscribe(200, 400)] assert invoked[0] == 4 def test_map_with_index_default_mapper(self): scheduler = TestScheduler() xs = scheduler.create_hot_observable( on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): return xs.pipe(map_indexed()) results = scheduler.start(factory) assert results.messages == [ on_next(210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1), on_completed(400)] assert xs.subscriptions == [subscribe(200, 400)] def test_map_with_index_not_completed(self): scheduler = TestScheduler() invoked = [0] xs = scheduler.create_hot_observable(on_next(180, 5), on_next( 210, 4), on_next(240, 3), on_next(290, 2), on_next(350, 1)) def factory(): def projection(x, index): invoked[0] += 1 return (x + 1) + (index * 10) return xs.pipe(map_indexed(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 5), on_next(240, 14), on_next(290, 23), on_next(350, 32)] assert xs.subscriptions == [subscribe(200, 1000)] assert invoked[0] == 4 def test_map_with_index_error(self): scheduler = TestScheduler() ex = 'ex' invoked = [0] xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next( 290, 2), on_next(350, 1), on_error(400, ex), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): def projection(x, index): invoked[0] += 1 return (x + 1) + (index * 10) return xs.pipe(map_indexed(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 5), on_next( 240, 14), on_next(290, 23), on_next(350, 32), on_error(400, ex)] assert xs.subscriptions == [subscribe(200, 400)] assert invoked[0] == 4 def test_map_with_index_mapper_throws(self): scheduler = TestScheduler() invoked = [0] ex = 'ex' xs = scheduler.create_hot_observable(on_next(180, 5), on_next(210, 4), on_next(240, 3), on_next( 290, 2), on_next(350, 1), on_completed(400), on_next(410, -1), on_completed(420), on_error(430, 'ex')) def factory(): def projection(x, index): invoked[0] += 1 if invoked[0] == 3: raise Exception(ex) return (x + 1) + (index * 10) return xs.pipe(map_indexed(projection)) results = scheduler.start(factory) assert results.messages == [on_next(210, 5), on_next(240, 14), on_error(290, ex)] assert xs.subscriptions == [subscribe(200, 290)] assert invoked[0] == 3 if __name__ == '__main__': unittest.main()
mit
liveblog/superdesk
server/apps/archived/archived.py
5
2238
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from eve.utils import config from apps.publish.published_item import PublishedItemResource, PublishedItemService from superdesk.metadata.utils import aggregations from superdesk.notification import push_notification from apps.archive.common import get_user import superdesk from superdesk.utc import utcnow query_filters = [{'allow_post_publish_actions': False}, {'can_be_removed': False}] class ArchivedResource(PublishedItemResource): datasource = { 'source': 'published', 'search_backend': 'elastic', 'aggregations': aggregations, 'elastic_filter': {'and': [{'term': query_filters[0]}, {'term': query_filters[1]}]}, 'default_sort': [('_updated', -1)], 'projection': { 'old_version': 0, 'last_version': 0 } } resource_methods = ['GET'] item_methods = ['GET', 'DELETE'] privileges = {'DELETE': 'archived'} class ArchivedService(PublishedItemService): def find_by_item_ids(self, item_ids): """ Fetches items whose item_id is passed in item_ids :param item_ids: list of item_id :return: items from archived collection """ query = {'$and': [{'item_id': {'$in': item_ids}}, query_filters[0], query_filters[1]]} return super().get_from_mongo(req=None, lookup=query) def on_delete(self, doc): """ This method throws exception when invoked on PublishedItemService. Overriding to avoid that. """ pass def delete(self, lookup): super().patch(lookup[config.ID_FIELD], {'can_be_removed': True, '_updated': utcnow()}) def on_deleted(self, doc): user = get_user() push_notification('item:deleted:archived', item=str(doc[config.ID_FIELD]), user=str(user.get(config.ID_FIELD))) superdesk.privilege(name='archived', label='Archived Management', description='User can remove items from the archived')
agpl-3.0
AnnalisaS/migration_geonode
geonode/catalogue/backends/pycsw_local.py
8
5656
######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import os from lxml import etree from django.conf import settings from ConfigParser import SafeConfigParser from owslib.iso import MD_Metadata from pycsw import server from geonode.catalogue.backends.generic import CatalogueBackend as GenericCatalogueBackend from geonode.catalogue.backends.generic import METADATA_FORMATS # pycsw settings that the user shouldn't have to worry about CONFIGURATION = { 'server': { 'home': '.', 'url': settings.CATALOGUE['default']['URL'], 'encoding': 'UTF-8', 'language': settings.LANGUAGE_CODE, 'maxrecords': '10', #'loglevel': 'DEBUG', #'logfile': '/tmp/pycsw.log', #'federatedcatalogues': 'http://geo.data.gov/geoportal/csw/discovery', #'pretty_print': 'true', #'domainquerytype': 'range', #'domaincounts': 'true', 'profiles': 'apiso,ebrim', }, 'repository': { 'source': 'geonode', 'mappings': os.path.join(os.path.dirname(__file__), 'pycsw_local_mappings.py') } } class CatalogueBackend(GenericCatalogueBackend): def __init__(self, *args, **kwargs): super(CatalogueBackend, self).__init__(*args, **kwargs) self.catalogue.formats = ['Atom', 'DIF', 'Dublin Core', 'ebRIM', 'FGDC', 'TC211'] self.catalogue.local = True def remove_record(self, uuid): pass def create_record(self, item): pass def get_record(self, uuid): results = self._csw_local_dispatch(identifier=uuid) if len(results) < 1: return None result = etree.fromstring(results).find('{http://www.isotc211.org/2005/gmd}MD_Metadata') if result is None: return None record = MD_Metadata(result) record.keywords = [] if hasattr(record, 'identification') and hasattr(record.identification, 'keywords'): for kw in record.identification.keywords: record.keywords.extend(kw['keywords']) record.links = {} record.links['metadata'] = self.catalogue.urls_for_uuid(uuid) record.links['download'] = self.catalogue.extract_links(record) return record def search_records(self, keywords, start, limit, bbox): with self.catalogue: lresults = self._csw_local_dispatch(keywords, keywords, start+1, limit, bbox) # serialize XML e = etree.fromstring(lresults) self.catalogue.records = [MD_Metadata(x) for x in e.findall('//{http://www.isotc211.org/2005/gmd}MD_Metadata')] # build results into JSON for API results = [self.catalogue.metadatarecord2dict(doc) for v, doc in self.catalogue.records.iteritems()] result = { 'rows': results, 'total': e.find('{http://www.opengis.net/cat/csw/2.0.2}SearchResults').attrib.get('numberOfRecordsMatched'), 'next_page': e.find('{http://www.opengis.net/cat/csw/2.0.2}SearchResults').attrib.get('nextRecord') } return result def _csw_local_dispatch(self, keywords=None, start=0, limit=10, bbox=None, identifier=None): """ HTTP-less CSW """ # serialize pycsw settings into SafeConfigParser # object for interaction with pycsw mdict = dict(settings.PYCSW['CONFIGURATION'], **CONFIGURATION) config = SafeConfigParser() for section, options in mdict.iteritems(): config.add_section(section) for option, value in options.iteritems(): config.set(section, option, value) # fake HTTP environment variable os.environ['QUERY_STRING'] = '' # init pycsw csw = server.Csw(config) # fake HTTP method csw.requesttype = 'POST' # fake HTTP request parameters if identifier is None: # it's a GetRecords request formats = [] for f in self.catalogue.formats: formats.append(METADATA_FORMATS[f][0]) csw.kvp = { 'elementsetname': 'full', 'typenames': formats, 'resulttype': 'results', 'constraintlanguage': 'CQL_TEXT', 'constraint': 'csw:AnyText like "%%%s%%"' % keywords, 'outputschema': 'http://www.isotc211.org/2005/gmd', 'constraint': None, 'startposition': start, 'maxrecords': limit } response = csw.getrecords() else: # it's a GetRecordById request csw.kvp = { 'id': [identifier], 'outputschema': 'http://www.isotc211.org/2005/gmd', } response = csw.getrecordbyid() return etree.tostring(response)
gpl-3.0
splaice/Virtbox
tests/functional.py
1
1841
# -*- coding: utf-8 -*- """ This module contains the functional tests for using virbox. :copyright: (c) 2012 by Sean Plaice :license: ISC, see LICENSE for more details. """ #import logging #import testify # #from virtbox.manage import ( # modifyvm, # storageattach, # startvm, # ) #from virtbox.utils import ( # generate_vm, # generate_hd, # generate_ctl, # #delete_vm, # #delete_hd, # #delete_ctl #) # # ## setup module level logger #logger = logging.getLogger(__name__) # # #class VirtboxFunctionalTestCase(testify.TestCase): # @testify.setup # def setup(self): # self.vm = generate_vm() # self.hdd = generate_hd() # self.ctl = generate_ctl(vmname=self.vm['name']) # self.memory = "1024" # self.rtcuseutc = "on" # self.iso_path = '/tmp/fedora.iso' # modifyvm(vm_uuid=self.vm['uuid'], memory=self.memory, # rtcuseutc=self.rtcuseutc) # storageattach(vmname=self.vm['name'], # name=self.ctl['name'], port='0', device='0', # storage_type='hdd', medium=self.hdd['filename']) # storageattach(vmname=self.vm['name'], # name=self.ctl['name'], port='1', device='0', # storage_type='dvddrive', medium=self.iso_path) # # #@testify.teardown # #def teardown(self): # # delete_ctl(**self.ctl) # # delete_hd(**self.hdd) # # delete_vm(**self.vm) # # def test_start_and_stop_vm(self): # result = startvm(vm_name=self.vm['uuid'], start_type='gui') # testify.assert_equal(self.vm['uuid'], result['uuid']) # # this looks hacky but waiting on these operations finishing will # # be handled in the biz logic at a higher level # #controlvm(vm_uuid=self.vm['uuid'], action='poweroff')
isc
blueboxgroup/nova
nova/tests/unit/virt/test_imagecache.py
63
7011
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova import block_device from nova.compute import vm_states from nova import context from nova import objects from nova import test from nova.tests.unit import fake_instance from nova.virt import imagecache CONF = cfg.CONF swap_bdm_128 = [block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 128, 'boot_index': -1})] swap_bdm_256 = [block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 256, 'boot_index': -1})] class ImageCacheManagerTests(test.NoDBTestCase): def test_configurationi_defaults(self): self.assertEqual(2400, CONF.image_cache_manager_interval) self.assertEqual('_base', CONF.image_cache_subdirectory_name) self.assertTrue(CONF.remove_unused_base_images) self.assertEqual(24 * 3600, CONF.remove_unused_original_minimum_age_seconds) def test_cache_manager(self): cache_manager = imagecache.ImageCacheManager() self.assertTrue(cache_manager.remove_unused_base_images) self.assertRaises(NotImplementedError, cache_manager.update, None, []) self.assertRaises(NotImplementedError, cache_manager._get_base) base_images = cache_manager._list_base_images(None) self.assertEqual([], base_images['unexplained_images']) self.assertEqual([], base_images['originals']) self.assertRaises(NotImplementedError, cache_manager._age_and_verify_cached_images, None, [], None) def test_list_running_instances(self): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'host': CONF.host, 'id': '2', 'uuid': '456', 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'kernel_id': '21', 'ramdisk_id': '22', 'host': 'remotehost', 'id': '3', 'uuid': '789', 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(swap_bdm_256) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(swap_bdm_128) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '789').AndReturn(swap_bdm_128) self.mox.ReplayAll() # The argument here should be a context, but it's mocked out running = image_cache_manager._list_running_instances(ctxt, all_instances) self.assertEqual(4, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual((1, 1, ['instance-00000002', 'instance-00000003']), running['used_images']['2']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['21']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['22']) self.assertIn('instance-00000001', running['instance_names']) self.assertIn('123', running['instance_names']) self.assertEqual(4, len(running['image_popularity'])) self.assertEqual(1, running['image_popularity']['1']) self.assertEqual(2, running['image_popularity']['2']) self.assertEqual(1, running['image_popularity']['21']) self.assertEqual(1, running['image_popularity']['22']) self.assertEqual(len(running['used_swap_images']), 2) self.assertIn('swap_128', running['used_swap_images']) self.assertIn('swap_256', running['used_swap_images']) def test_list_resizing_instances(self): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': '123', 'vm_state': vm_states.RESIZED, 'task_state': None}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(swap_bdm_256) self.mox.ReplayAll() running = image_cache_manager._list_running_instances(ctxt, all_instances) self.assertEqual(1, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual(set(['instance-00000001', '123', 'instance-00000001_resize', '123_resize']), running['instance_names']) self.assertEqual(1, len(running['image_popularity'])) self.assertEqual(1, running['image_popularity']['1'])
apache-2.0
proversity-org/edx-platform
lms/djangoapps/grades/tests/base.py
14
4306
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory from lms.djangoapps.course_blocks.api import get_course_blocks from openedx.core.djangolib.testing.utils import get_mock_request from student.models import CourseEnrollment from student.tests.factories import UserFactory from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from ..course_data import CourseData from ..subsection_grade_factory import SubsectionGradeFactory class GradeTestBase(SharedModuleStoreTestCase): """ Base class for some Grades tests. """ @classmethod def setUpClass(cls): super(GradeTestBase, cls).setUpClass() cls.course = CourseFactory.create() with cls.store.bulk_operations(cls.course.id): cls.chapter = ItemFactory.create( parent=cls.course, category="chapter", display_name="Test Chapter" ) cls.sequence = ItemFactory.create( parent=cls.chapter, category='sequential', display_name="Test Sequential X", graded=True, format="Homework" ) cls.vertical = ItemFactory.create( parent=cls.sequence, category='vertical', display_name='Test Vertical 1' ) problem_xml = MultipleChoiceResponseXMLFactory().build_xml( question_text='The correct answer is Choice 3', choices=[False, False, True, False], choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3'] ) cls.problem = ItemFactory.create( parent=cls.vertical, category="problem", display_name="Test Problem", data=problem_xml ) cls.sequence2 = ItemFactory.create( parent=cls.chapter, category='sequential', display_name="Test Sequential A", graded=True, format="Homework" ) cls.problem2 = ItemFactory.create( parent=cls.sequence2, category="problem", display_name="Test Problem", data=problem_xml ) # AED 2017-06-19: make cls.sequence belong to multiple parents, # so we can test that DAGs with this shape are handled correctly. cls.chapter_2 = ItemFactory.create( parent=cls.course, category='chapter', display_name='Test Chapter 2' ) cls.chapter_2.children.append(cls.sequence.location) cls.store.update_item(cls.chapter_2, UserFactory().id) def setUp(self): super(GradeTestBase, self).setUp() self.request = get_mock_request(UserFactory()) self.client.login(username=self.request.user.username, password="test") self._set_grading_policy() self.course_structure = get_course_blocks(self.request.user, self.course.location) self.course_data = CourseData(self.request.user, structure=self.course_structure) self.subsection_grade_factory = SubsectionGradeFactory(self.request.user, self.course, self.course_structure) CourseEnrollment.enroll(self.request.user, self.course.id) def _set_grading_policy(self, passing=0.5): """ Updates the course's grading policy. """ self.grading_policy = { "GRADER": [ { "type": "Homework", "min_count": 1, "drop_count": 0, "short_label": "HW", "weight": 1.0, }, { "type": "NoCredit", "min_count": 0, "drop_count": 0, "short_label": "NC", "weight": 0.0, }, ], "GRADE_CUTOFFS": { "Pass": passing, }, } self.course.set_grading_policy(self.grading_policy) self.store.update_item(self.course, 0)
agpl-3.0
aeron15/luigi
test/wrap_test.py
72
2886
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import datetime from helpers import unittest import luigi import luigi.notifications from luigi.mock import MockTarget from luigi.util import inherits luigi.notifications.DEBUG = True class A(luigi.Task): def output(self): return MockTarget('/tmp/a.txt') def run(self): f = self.output().open('w') print('hello, world', file=f) f.close() class B(luigi.Task): date = luigi.DateParameter() def output(self): return MockTarget(self.date.strftime('/tmp/b-%Y-%m-%d.txt')) def run(self): f = self.output().open('w') print('goodbye, space', file=f) f.close() def XMLWrapper(cls): @inherits(cls) class XMLWrapperCls(luigi.Task): def requires(self): return self.clone_parent() def run(self): f = self.input().open('r') g = self.output().open('w') print('<?xml version="1.0" ?>', file=g) for line in f: print('<dummy-xml>' + line.strip() + '</dummy-xml>', file=g) g.close() return XMLWrapperCls class AXML(XMLWrapper(A)): def output(self): return MockTarget('/tmp/a.xml') class BXML(XMLWrapper(B)): def output(self): return MockTarget(self.date.strftime('/tmp/b-%Y-%m-%d.xml')) class WrapperTest(unittest.TestCase): ''' This test illustrates how a task class can wrap another task class by modifying its behavior. See instance_wrap_test.py for an example of how instances can wrap each other. ''' workers = 1 def setUp(self): MockTarget.fs.clear() def test_a(self): luigi.build([AXML()], local_scheduler=True, no_lock=True, workers=self.workers) self.assertEqual(MockTarget.fs.get_data('/tmp/a.xml'), b'<?xml version="1.0" ?>\n<dummy-xml>hello, world</dummy-xml>\n') def test_b(self): luigi.build([BXML(datetime.date(2012, 1, 1))], local_scheduler=True, no_lock=True, workers=self.workers) self.assertEqual(MockTarget.fs.get_data('/tmp/b-2012-01-01.xml'), b'<?xml version="1.0" ?>\n<dummy-xml>goodbye, space</dummy-xml>\n') class WrapperWithMultipleWorkersTest(WrapperTest): workers = 7 if __name__ == '__main__': luigi.run()
apache-2.0
lbernau/smarthome
tests/mock/core.py
2
6808
import os import datetime import dateutil.tz import logging import lib.config import lib.connection import lib.item import lib.plugin from lib.shtime import Shtime from lib.module import Modules import lib.utils from lib.model.smartplugin import SmartPlugin from lib.constants import (YAML_FILE, CONF_FILE, DEFAULT_FILE) from tests.common import BASE logger = logging.getLogger('Mockup') class MockScheduler(): def __init__(self): # set scheduler_instance to MockScheduler instance import lib.scheduler lib.scheduler._scheduler_instance = self def add(self, name, obj, prio=3, cron=None, cycle=None, value=None, offset=None, next=None): logger.warning('MockScheduler (add): {}, cron={}, cycle={}, value={}, offset={}'.format( name, str(cron), str(cycle), str(value), str(offset) )) try: if isinstance(obj.__self__, SmartPlugin): name = name +'_'+ obj.__self__.get_instance_name() except: pass def remove(self, name): logger.warning('MockScheduler (remove): {}'.format( name )) class MockSmartHome(): _base_dir = BASE base_dir = _base_dir # for external modules using that var (backend, ...?) _default_language = 'de' _etc_dir = os.path.join(_base_dir, 'tests', 'resources', 'etc') # _var_dir = os.path.join(_base_dir, 'var') _lib_dir = os.path.join(_base_dir, 'lib') _env_dir = os.path.join(_lib_dir, 'env' + os.path.sep) _module_conf_basename = os.path.join(_etc_dir,'module') _module_conf = '' # is filled by module.py while reading the configuration file, needed by Backend plugin _plugin_conf_basename = os.path.join(_etc_dir,'plugin') _plugin_conf = '' # is filled by plugin.py while reading the configuration file, needed by Backend plugin _env_logic_conf_basename = os.path.join( _env_dir ,'logic') # _items_dir = os.path.join(_base_dir, 'items'+os.path.sep) _logic_conf_basename = os.path.join(_etc_dir, 'logic') _logic_dir = os.path.join(_base_dir, 'tests', 'resources', 'logics'+os.path.sep) # _cache_dir = os.path.join(_var_dir,'cache'+os.path.sep) # _log_config = os.path.join(_etc_dir,'logging'+YAML_FILE) # _smarthome_conf_basename = None # the APIs available though the smarthome object instance: shtime = None plugins = None items = None logics = None scheduler = None _SmartHome__items = [] def __init__(self): VERSION = '1.4c.' VERSION += '0.man' self.version = VERSION self.__logs = {} # self.__item_dict = {} # self.__items = [] self.children = [] self._use_modules = 'True' self._modules = [] self._moduledict = {} self._plugins = [] self.shtime = Shtime(self) # self._tzinfo = dateutil.tz.tzutc() # self.shtime.set_tzinfo(dateutil.tz.tzutc()) # TZ = dateutil.tz.gettz('UTC') TZ = dateutil.tz.gettz('Europe/Berlin') self.shtime.set_tzinfo(TZ) self.scheduler = MockScheduler() self.connections = lib.connection.Connections() self.shtime = Shtime(self) # Start() # self.scheduler = lib.scheduler.Scheduler(self) self.modules = lib.module.Modules(self, configfile=self._module_conf_basename) self.items = lib.item.Items(self) self.plugins = lib.plugin.Plugins(self, configfile=self._plugin_conf_basename) def get_defaultlanguage(self): return self._default_language def set_defaultlanguage(self, language): self._default_language = language def get_basedir(self): return self._base_dir def getBaseDir(self): """ Deprecated """ return self._base_dir def trigger(self, name, obj=None, by='Logic', source=None, value=None, dest=None, prio=3, dt=None): logger.warning('MockSmartHome (trigger): {}'.format(str(obj))) def with_plugins_from(self, conf): lib.plugin.Plugins._plugins = [] lib.plugin.Plugins._threads = [] self._plugins = lib.plugin.Plugins(self, conf) return self._plugins def with_modules_from(self, conf): lib.module.Modules._modules = [] self._modules = lib.module.Modules(self, conf) return self._plugins def with_items_from(self, conf): item_conf = lib.config.parse(conf, None) for attr, value in item_conf.items(): if isinstance(value, dict): child_path = attr try: child = lib.item.Item(self, self, child_path, value) except Exception as e: print("Item {}: problem creating: {}".format(child_path, e)) else: vars(self)[attr] = child self.add_item(child_path, child) self.children.append(child) return item_conf def add_log(self, name, log): self.__logs[name] = log # ------------------------------------------------------------ # Deprecated methods # ------------------------------------------------------------ def now(self): # return datetime.datetime.now() return self.shtime.now() def tzinfo(self): # return self._tzinfo return self.shtime.tzinfo() def add_item(self, path, item): # if path not in self.__items: # self.__items.append(path) # self.__item_dict[path] = item return self.items.add_item(path, item) def return_item(self, string): # if string in self.__items: # return self.__item_dict[string] return self.items.return_item(string) def return_items(self): # for item in self.__items: # yield self.__item_dict[item] return self.items.return_items() def return_plugins(self): # for plugin in self._plugins: # yield plugin return self.plugins.get_module(name) def return_modules(self): # l = [] # for module_key in self._moduledict.keys(): # l.append(module_key) # return l return self.modules.return_modules() def get_module(self, name): # return self._moduledict.get(name) return self.modules.get_module(name) def string2bool(self, string): # if isinstance(string, bool): # return string # if string.lower() in ['0', 'false', 'n', 'no', 'off']: # return False # if string.lower() in ['1', 'true', 'y', 'yes', 'on']: # return True # else: # return None try: return lib.utils.Utils.to_bool(string) except Exception as e: return None def return_none(self): return None
gpl-3.0
od-eon/django-fts-odeon
fts/backends/unported/simple.py
2
3124
"Simple Fts backend" import os from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.db.models import Q from django.db import transaction from fts.backends.base import BaseClass, BaseModel, BaseManager from fts.models import IndexWord, Index from fts.words.stop import FTS_STOPWORDS try: from fts.words.snowball import Stemmer except ImportError: from fts.words.porter import Stemmer WEIGHTS = { 'A' : 10, 'B' : 4, 'C' : 2, 'D' : 1 } class SearchClass(BaseClass): def __init__(self, server, params): self.backend = 'simple' class SearchManager(BaseManager): @transaction.commit_on_success def update_index(self, pk=None): if pk is not None: if isinstance(pk, (list,tuple)): items = self.filter(pk__in=pk) else: items = self.filter(pk=pk) items[0]._index.all().delete() else: items = self.all() model_type = ContentType.objects.get_for_model(self.model) Index.objects.filter(content_type__pk=model_type.id).delete() IW = {} for item in items: for field, weight in self._fields.items(): for w in set(getattr(item, field).lower().split(' ')): if w and w not in FTS_STOPWORDS[self.language_code]: p = Stemmer(self.language_code) w = p(w) try: iw = IW[w]; except KeyError: iw = IndexWord.objects.get_or_create(word=w)[0] IW[w] = iw i = Index(content_object=item, word=iw, weight=WEIGHTS[weight]) i.save() def search(self, query, **kwargs): params = Q() #SELECT core_blog.*, count(DISTINCT word_id), sum(weight) #FROM core_blog INNER JOIN fts_index ON (core_blog.id = fts_index.object_id) INNER JOIN fts_indexword ON (fts_index.word_id = fts_indexword.id) #WHERE fts_index.content_type_id = 18 AND (fts_indexword.word='titl' OR fts_indexword.word='simpl') #GROUP BY core_blog.id, core_blog.title, core_blog.body #HAVING count(DISTINCT word_id) = 2; words = 0 for w in set(query.lower().split(' ')): if w and w not in FTS_STOPWORDS[self.language_code]: words += 1 p = Stemmer(self.language_code) w = p(w) params |= Q(_index__word__word=w) qs = self.filter(params) #if words > 1: # qs.query.group_by = ['core_blog.id, core_blog.title, core_blog.body'] # qs.query.having = ['(COUNT(DISTINCT fts_index.word_id)) = %d' % words] return qs.distinct() class SearchableModel(BaseModel): class Meta: abstract = True _index = generic.GenericRelation(Index) search_objects = SearchManager()
bsd-3-clause
haxoza/django
django/contrib/gis/geos/polygon.py
450
6843
from ctypes import byref, c_uint from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.geometry import GEOSGeometry from django.contrib.gis.geos.libgeos import GEOM_PTR, get_pointer_arr from django.contrib.gis.geos.linestring import LinearRing from django.utils import six from django.utils.six.moves import range class Polygon(GEOSGeometry): _minlength = 1 def __init__(self, *args, **kwargs): """ Initializes on an exterior ring and a sequence of holes (both instances may be either LinearRing instances, or a tuple/list that may be constructed into a LinearRing). Examples of initialization, where shell, hole1, and hole2 are valid LinearRing geometries: >>> from django.contrib.gis.geos import LinearRing, Polygon >>> shell = hole1 = hole2 = LinearRing() >>> poly = Polygon(shell, hole1, hole2) >>> poly = Polygon(shell, (hole1, hole2)) >>> # Example where a tuple parameters are used: >>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)), ... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4))) """ if not args: raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.') # Getting the ext_ring and init_holes parameters from the argument list ext_ring = args[0] init_holes = args[1:] n_holes = len(init_holes) # If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility] if n_holes == 1 and isinstance(init_holes[0], (tuple, list)): if len(init_holes[0]) == 0: init_holes = () n_holes = 0 elif isinstance(init_holes[0][0], LinearRing): init_holes = init_holes[0] n_holes = len(init_holes) polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes) super(Polygon, self).__init__(polygon, **kwargs) def __iter__(self): "Iterates over each ring in the polygon." for i in range(len(self)): yield self[i] def __len__(self): "Returns the number of rings in this Polygon." return self.num_interior_rings + 1 @classmethod def from_bbox(cls, bbox): "Constructs a Polygon from a bounding box (4-tuple)." x0, y0, x1, y1 = bbox for z in bbox: if not isinstance(z, six.integer_types + (float,)): return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0)) return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0))) # ### These routines are needed for list-like operation w/ListMixin ### def _create_polygon(self, length, items): # Instantiate LinearRing objects if necessary, but don't clone them yet # _construct_ring will throw a TypeError if a parameter isn't a valid ring # If we cloned the pointers here, we wouldn't be able to clean up # in case of error. rings = [] for r in items: if isinstance(r, GEOM_PTR): rings.append(r) else: rings.append(self._construct_ring(r)) shell = self._clone(rings.pop(0)) n_holes = length - 1 if n_holes: holes = get_pointer_arr(n_holes) for i, r in enumerate(rings): holes[i] = self._clone(r) holes_param = byref(holes) else: holes_param = None return capi.create_polygon(shell, holes_param, c_uint(n_holes)) def _clone(self, g): if isinstance(g, GEOM_PTR): return capi.geom_clone(g) else: return capi.geom_clone(g.ptr) def _construct_ring(self, param, msg=( 'Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings')): "Helper routine for trying to construct a ring from the given parameter." if isinstance(param, LinearRing): return param try: ring = LinearRing(param) return ring except TypeError: raise TypeError(msg) def _set_list(self, length, items): # Getting the current pointer, replacing with the newly constructed # geometry, and destroying the old geometry. prev_ptr = self.ptr srid = self.srid self.ptr = self._create_polygon(length, items) if srid: self.srid = srid capi.destroy_geom(prev_ptr) def _get_single_internal(self, index): """ Returns the ring at the specified index. The first index, 0, will always return the exterior ring. Indices > 0 will return the interior ring at the given index (e.g., poly[1] and poly[2] would return the first and second interior ring, respectively). CAREFUL: Internal/External are not the same as Interior/Exterior! _get_single_internal returns a pointer from the existing geometries for use internally by the object's methods. _get_single_external returns a clone of the same geometry for use by external code. """ if index == 0: return capi.get_extring(self.ptr) else: # Getting the interior ring, have to subtract 1 from the index. return capi.get_intring(self.ptr, index - 1) def _get_single_external(self, index): return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid) _set_single = GEOSGeometry._set_single_rebuild _assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild # #### Polygon Properties #### @property def num_interior_rings(self): "Returns the number of interior rings." # Getting the number of rings return capi.get_nrings(self.ptr) def _get_ext_ring(self): "Gets the exterior ring of the Polygon." return self[0] def _set_ext_ring(self, ring): "Sets the exterior ring of the Polygon." self[0] = ring # Properties for the exterior ring/shell. exterior_ring = property(_get_ext_ring, _set_ext_ring) shell = exterior_ring @property def tuple(self): "Gets the tuple for each ring in this Polygon." return tuple(self[i].tuple for i in range(len(self))) coords = tuple @property def kml(self): "Returns the KML representation of this Polygon." inner_kml = ''.join("<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml for i in range(self.num_interior_rings)) return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
bsd-3-clause
giltis/scikit-xray
skxray/core/tests/test_calibration.py
3
4493
# ###################################################################### # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # # # # @author: Li Li (lili@bnl.gov) # # created on 08/19/2014 # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the Brookhaven Science Associates, Brookhaven # # National Laboratory nor the names of its contributors may be used # # to endorse or promote products derived from this software without # # specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ######################################################################## from __future__ import absolute_import, division, print_function import numpy as np import skxray.core.calibration as calibration import skxray.core.calibration as core def _draw_gaussian_rings(shape, calibrated_center, r_list, r_width): R = core.radial_grid(calibrated_center, shape) I = np.zeros_like(R) for r in r_list: tmp = 100 * np.exp(-((R - r)/r_width)**2) I += tmp return I def test_refine_center(): center = np.array((500, 550)) I = _draw_gaussian_rings((1000, 1001), center, [50, 75, 100, 250, 500], 5) nx_opts = [None, 300] for nx in nx_opts: out = calibration.refine_center(I, center+1, (1, 1), phi_steps=20, nx=nx, min_x=10, max_x=300, window_size=5, thresh=0, max_peaks=4) assert np.all(np.abs(center - out) < .1) def test_blind_d(): gaus = lambda x, center, height, width: ( height * np.exp(-((x-center) / width)**2)) name = 'Si' wavelength = .18 window_size = 5 threshold = .1 cal = calibration.calibration_standards[name] tan2theta = np.tan(cal.convert_2theta(wavelength)) D = 200 expected_r = D * tan2theta bin_centers = np.linspace(0, 50, 2000) I = np.zeros_like(bin_centers) for r in expected_r: I += gaus(bin_centers, r, 100, .2) d, dstd = calibration.estimate_d_blind(name, wavelength, bin_centers, I, window_size, len(expected_r), threshold) assert np.abs(d - D) < 1e-6 if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
bsd-3-clause
Titan-C/scikit-learn
sklearn/linear_model/omp.py
8
31640
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause import warnings import numpy as np from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel, _pre_fit from ..base import RegressorMixin from ..utils import as_float_array, check_array, check_X_y from ..model_selection import check_cv from ..externals.joblib import Parallel, delayed solve_triangular_args = {'check_finite': False} premature = """ Orthogonal matching pursuit ended prematurely due to linear dependence in the dictionary. The requested precision might not have been met. """ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : array, shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : array, shape (n_samples,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coef : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy('F') else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) potrs, = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=X.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=X.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = np.sqrt(1 - v) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the Cholesky decomposition method. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data matrix Xy : array, shape (n_features,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol_0 : float Squared norm of y, required if tol is not None. tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coefs : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram) if copy_Xy: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,)) potrs, = get_lapack_funcs(('potrs',), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=Gram.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = np.sqrt(1 - v) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array, shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : array, shape (n_samples,) or (n_samples, n_targets) Input targets n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : {True, False, 'auto'}, Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp_gram lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError("The number of atoms cannot be more than the number " "of features") if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y ** 2), axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False): """Gram Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data: X.T * X Xy : array, shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. norms_squared : array-like, shape (n_targets,) Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order='F', copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError('Gram OMP needs the precomputed norms in order ' 'to evaluate the error sum of squares.') if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError("The number of atoms cannot be more than the number " "of features") if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram))) else: coef = np.zeros((len(Gram), Xy.shape[1])) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=copy_Xy, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) class OrthogonalMatchingPursuit(LinearModel, RegressorMixin): """Orthogonal Matching Pursuit model (OMP) Parameters ---------- n_nonzero_coefs : int, optional Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, optional Maximum norm of the residual. If not None, overrides n_nonzero_coefs. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. precompute : {True, False, 'auto'}, default 'auto' Whether to use a precomputed Gram and Xy matrix to speed up calculations. Improves performance when `n_targets` or `n_samples` is very large. Note that if you already have such matrices, you can pass them directly to the fit method. Read more in the :ref:`User Guide <omp>`. Attributes ---------- coef_ : array, shape (n_features,) or (n_targets, n_features) parameter vector (w in the formula) intercept_ : float or array, shape (n_targets,) independent term in decision function. n_iter_ : int or array-like Number of active features across every target. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars decomposition.sparse_encode """ def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto'): self.n_nonzero_coefs = n_nonzero_coefs self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] X, y, X_offset, y_offset, X_scale, Gram, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if self.n_nonzero_coefs is None and self.tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if Gram is False: coef_, self.n_iter_ = orthogonal_mp( X, y, self.n_nonzero_coefs_, self.tol, precompute=False, copy_X=True, return_n_iter=True) else: norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None coef_, self.n_iter_ = orthogonal_mp_gram( Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True) self.coef_ = coef_.T self._set_intercept(X_offset, y_offset, X_scale) return self def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True, fit_intercept=True, normalize=True, max_iter=100): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied. If False, they may be overwritten. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 100 by default. Returns ------- residues : array, shape (n_samples, max_features) Residues of the prediction on the test data """ if copy: X_train = X_train.copy() y_train = y_train.copy() X_test = X_test.copy() y_test = y_test.copy() if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None, precompute=False, copy_X=False, return_path=True) if coefs.ndim == 1: coefs = coefs[:, np.newaxis] if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] return np.dot(coefs.T, X_test.T) - y_test class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin): """Cross-validated Orthogonal Matching Pursuit model (OMP) Parameters ---------- copy : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default True This parameter is ignored when ``fit_intercept`` is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use :class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on an estimator with ``normalize=False``. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 10% of ``n_features`` but at least 5 if available. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Read more in the :ref:`User Guide <omp>`. Attributes ---------- intercept_ : float or array, shape (n_targets,) Independent term in decision function. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the problem formulation). n_nonzero_coefs_ : int Estimated number of non-zero coefficients giving the best mean squared error over the cross-validation folds. n_iter_ : int or array-like Number of active features across every target for the model refit with the best hyperparameters got by cross-validating across all folds. See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars OrthogonalMatchingPursuit LarsCV LassoLarsCV decomposition.sparse_encode """ def __init__(self, copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=1, verbose=False): self.copy = copy self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.cv = cv self.n_jobs = n_jobs self.verbose = verbose def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape [n_samples, n_features] Training data. y : array-like, shape [n_samples] Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) cv = check_cv(self.cv, classifier=False) max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) if not self.max_iter else self.max_iter) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_omp_path_residues)( X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, self.normalize, max_iter) for train, test in cv.split(X)) min_early_stop = min(fold.shape[0] for fold in cv_paths) mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]) best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, normalize=self.normalize) omp.fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
bsd-3-clause
rentongzhang/servo
tests/wpt/web-platform-tests/tools/html5lib/parse.py
420
8783
#!/usr/bin/env python """usage: %prog [options] filename Parse a document to a tree, with optional profiling """ import sys import os import traceback from optparse import OptionParser from html5lib import html5parser, sanitizer from html5lib.tokenizer import HTMLTokenizer from html5lib import treebuilders, serializer, treewalkers from html5lib import constants def parse(): optParser = getOptParser() opts,args = optParser.parse_args() encoding = "utf8" try: f = args[-1] # Try opening from the internet if f.startswith('http://'): try: import urllib.request, urllib.parse, urllib.error, cgi f = urllib.request.urlopen(f) contentType = f.headers.get('content-type') if contentType: (mediaType, params) = cgi.parse_header(contentType) encoding = params.get('charset') except: pass elif f == '-': f = sys.stdin if sys.version_info[0] >= 3: encoding = None else: try: # Try opening from file system f = open(f, "rb") except IOError as e: sys.stderr.write("Unable to open file: %s\n" % e) sys.exit(1) except IndexError: sys.stderr.write("No filename provided. Use -h for help\n") sys.exit(1) treebuilder = treebuilders.getTreeBuilder(opts.treebuilder) if opts.sanitize: tokenizer = sanitizer.HTMLSanitizer else: tokenizer = HTMLTokenizer p = html5parser.HTMLParser(tree=treebuilder, tokenizer=tokenizer, debug=opts.log) if opts.fragment: parseMethod = p.parseFragment else: parseMethod = p.parse if opts.profile: import cProfile import pstats cProfile.runctx("run(parseMethod, f, encoding)", None, {"run": run, "parseMethod": parseMethod, "f": f, "encoding": encoding}, "stats.prof") # XXX - We should use a temp file here stats = pstats.Stats('stats.prof') stats.strip_dirs() stats.sort_stats('time') stats.print_stats() elif opts.time: import time t0 = time.time() document = run(parseMethod, f, encoding) t1 = time.time() if document: printOutput(p, document, opts) t2 = time.time() sys.stderr.write("\n\nRun took: %fs (plus %fs to print the output)"%(t1-t0, t2-t1)) else: sys.stderr.write("\n\nRun took: %fs"%(t1-t0)) else: document = run(parseMethod, f, encoding) if document: printOutput(p, document, opts) def run(parseMethod, f, encoding): try: document = parseMethod(f, encoding=encoding) except: document = None traceback.print_exc() return document def printOutput(parser, document, opts): if opts.encoding: print("Encoding:", parser.tokenizer.stream.charEncoding) for item in parser.log: print(item) if document is not None: if opts.xml: sys.stdout.write(document.toxml("utf-8")) elif opts.tree: if not hasattr(document,'__getitem__'): document = [document] for fragment in document: print(parser.tree.testSerializer(fragment)) elif opts.hilite: sys.stdout.write(document.hilite("utf-8")) elif opts.html: kwargs = {} for opt in serializer.HTMLSerializer.options: try: kwargs[opt] = getattr(opts,opt) except: pass if not kwargs['quote_char']: del kwargs['quote_char'] tokens = treewalkers.getTreeWalker(opts.treebuilder)(document) if sys.version_info[0] >= 3: encoding = None else: encoding = "utf-8" for text in serializer.HTMLSerializer(**kwargs).serialize(tokens, encoding=encoding): sys.stdout.write(text) if not text.endswith('\n'): sys.stdout.write('\n') if opts.error: errList=[] for pos, errorcode, datavars in parser.errors: errList.append("Line %i Col %i"%pos + " " + constants.E.get(errorcode, 'Unknown error "%s"' % errorcode) % datavars) sys.stdout.write("\nParse errors:\n" + "\n".join(errList)+"\n") def getOptParser(): parser = OptionParser(usage=__doc__) parser.add_option("-p", "--profile", action="store_true", default=False, dest="profile", help="Use the hotshot profiler to " "produce a detailed log of the run") parser.add_option("-t", "--time", action="store_true", default=False, dest="time", help="Time the run using time.time (may not be accurate on all platforms, especially for short runs)") parser.add_option("-b", "--treebuilder", action="store", type="string", dest="treebuilder", default="simpleTree") parser.add_option("-e", "--error", action="store_true", default=False, dest="error", help="Print a list of parse errors") parser.add_option("-f", "--fragment", action="store_true", default=False, dest="fragment", help="Parse as a fragment") parser.add_option("", "--tree", action="store_true", default=False, dest="tree", help="Output as debug tree") parser.add_option("-x", "--xml", action="store_true", default=False, dest="xml", help="Output as xml") parser.add_option("", "--no-html", action="store_false", default=True, dest="html", help="Don't output html") parser.add_option("", "--hilite", action="store_true", default=False, dest="hilite", help="Output as formatted highlighted code.") parser.add_option("-c", "--encoding", action="store_true", default=False, dest="encoding", help="Print character encoding used") parser.add_option("", "--inject-meta-charset", action="store_true", default=False, dest="inject_meta_charset", help="inject <meta charset>") parser.add_option("", "--strip-whitespace", action="store_true", default=False, dest="strip_whitespace", help="strip whitespace") parser.add_option("", "--omit-optional-tags", action="store_true", default=False, dest="omit_optional_tags", help="omit optional tags") parser.add_option("", "--quote-attr-values", action="store_true", default=False, dest="quote_attr_values", help="quote attribute values") parser.add_option("", "--use-best-quote-char", action="store_true", default=False, dest="use_best_quote_char", help="use best quote character") parser.add_option("", "--quote-char", action="store", default=None, dest="quote_char", help="quote character") parser.add_option("", "--no-minimize-boolean-attributes", action="store_false", default=True, dest="minimize_boolean_attributes", help="minimize boolean attributes") parser.add_option("", "--use-trailing-solidus", action="store_true", default=False, dest="use_trailing_solidus", help="use trailing solidus") parser.add_option("", "--space-before-trailing-solidus", action="store_true", default=False, dest="space_before_trailing_solidus", help="add space before trailing solidus") parser.add_option("", "--escape-lt-in-attrs", action="store_true", default=False, dest="escape_lt_in_attrs", help="escape less than signs in attribute values") parser.add_option("", "--escape-rcdata", action="store_true", default=False, dest="escape_rcdata", help="escape rcdata element values") parser.add_option("", "--sanitize", action="store_true", default=False, dest="sanitize", help="sanitize") parser.add_option("-l", "--log", action="store_true", default=False, dest="log", help="log state transitions") return parser if __name__ == "__main__": parse()
mpl-2.0
unifycore/ryu
ryu/ofproto/ofproto_v1_0.py
11
28910
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from struct import calcsize MAX_XID = 0xffffffff # define constants OFP_VERSION = 0x01 OFP_MAX_TABLE_NAME_LEN = 32 OFP_MAX_TABLE_NAME_LEN_STR = str(OFP_MAX_TABLE_NAME_LEN) OFP_MAX_PORT_NAME_LEN = 16 OFP_TCP_PORT = 6633 OFP_SSL_PORT = 6633 OFP_ETH_ALEN = 6 OFP_ETH_ALEN_STR = str(OFP_ETH_ALEN) OFP_NO_BUFFER = 0xffffffff # enum ofp_port OFPP_MAX = 0xff00 OFPP_IN_PORT = 0xfff8 # Send the packet out the input port. This # virtual port must be explicitly used # in order to send back out of the input # port. OFPP_TABLE = 0xfff9 # Perform actions in flow table. # NB: This can only be the destination # port for packet-out messages. OFPP_NORMAL = 0xfffa # Process with normal L2/L3 switching. OFPP_FLOOD = 0xfffb # All physical ports except input port and # those disabled by STP. OFPP_ALL = 0xfffc # All physical ports except input port. OFPP_CONTROLLER = 0xfffd # Send to controller. OFPP_LOCAL = 0xfffe # Local openflow "port". OFPP_NONE = 0xffff # Not associated with a physical port. # enum ofp_type OFPT_HELLO = 0 # Symmetric message OFPT_ERROR = 1 # Symmetric message OFPT_ECHO_REQUEST = 2 # Symmetric message OFPT_ECHO_REPLY = 3 # Symmetric message OFPT_VENDOR = 4 # Symmetric message OFPT_FEATURES_REQUEST = 5 # Controller/switch message OFPT_FEATURES_REPLY = 6 # Controller/switch message OFPT_GET_CONFIG_REQUEST = 7 # Controller/switch message OFPT_GET_CONFIG_REPLY = 8 # Controller/switch message OFPT_SET_CONFIG = 9 # Controller/switch message OFPT_PACKET_IN = 10 # Async message OFPT_FLOW_REMOVED = 11 # Async message OFPT_PORT_STATUS = 12 # Async message OFPT_PACKET_OUT = 13 # Controller/switch message OFPT_FLOW_MOD = 14 # Controller/switch message OFPT_PORT_MOD = 15 # Controller/switch message OFPT_STATS_REQUEST = 16 # Controller/switch message OFPT_STATS_REPLY = 17 # Controller/switch message OFPT_BARRIER_REQUEST = 18 # Controller/switch message OFPT_BARRIER_REPLY = 19 # Controller/switch message OFPT_QUEUE_GET_CONFIG_REQUEST = 20 # Controller/switch message OFPT_QUEUE_GET_CONFIG_REPLY = 21 # Controller/switch message OFP_HEADER_PACK_STR = '!BBHI' OFP_HEADER_SIZE = 8 OFP_MSG_SIZE_MAX = 65535 assert calcsize(OFP_HEADER_PACK_STR) == OFP_HEADER_SIZE # define constants OFP_DEFAULT_MISS_SEND_LEN = 128 # enum ofp_config_flags OFPC_FRAG_NORMAL = 0 # No special handling for fragments. OFPC_FRAG_DROP = 1 # Drop fragments. OFPC_FRAG_REASM = 2 # Reassemble (only if OFPC_IP_REASM set). OFPC_FRAG_NX_MATCH = 3 # Make first fragments available for matching. OFPC_FRAG_MASK = 3 OFP_SWITCH_CONFIG_PACK_STR = '!HH' OFP_SWITCH_CONFIG_SIZE = 12 assert (calcsize(OFP_SWITCH_CONFIG_PACK_STR) + OFP_HEADER_SIZE == OFP_SWITCH_CONFIG_SIZE) # enum ofp_capabilities OFPC_FLOW_STATS = 1 << 0 # Flow statistics. OFPC_TABLE_STATS = 1 << 1 # Table statistics. OFPC_PORT_STATS = 1 << 2 # Port statistics. OFPC_STP = 1 << 3 # 802.1d spanning tree. OFPC_RESERVED = 1 << 4 # Reserved, must not be set. OFPC_IP_REASM = 1 << 5 # Can reassemble IP fragments. OFPC_QUEUE_STATS = 1 << 6 # Queue statistics. OFPC_ARP_MATCH_IP = 1 << 7 # Match IP addresses in ARP pkts. # enum ofp_port_config OFPPC_PORT_DOWN = 1 << 0 # Port is administratively down. OFPPC_NO_STP = 1 << 1 # Disable 802.1D spanning tree on port. OFPPC_NO_RECV = 1 << 2 # Drop all packets except 802.1D # spanning tree packets OFPPC_NO_RECV_STP = 1 << 3 # Drop received 802.1D STP packets. OFPPC_NO_FLOOD = 1 << 4 # Do not include this port when flooding. OFPPC_NO_FWD = 1 << 5 # Drop packets forwarded to port. OFPPC_NO_PACKET_IN = 1 << 6 # Do not send packet-in msgs for port. # enum ofp_port_state OFPPS_LINK_DOWN = 1 << 0 # No physical link present. OFPPS_STP_LISTEN = 0 << 8 # Not learning or relaying frames. OFPPS_STP_LEARN = 1 << 8 # Learning but not relaying frames. OFPPS_STP_FORWARD = 2 << 8 # Learning and relaying frames. OFPPS_STP_BLOCK = 3 << 8 # Not part of spanning tree. OFPPS_STP_MASK = 3 << 8 # Bit mask for OFPPS_STP_* values. # enum ofp_port_features OFPPF_10MB_HD = 1 << 0 # 10 Mb half-duplex rate support. OFPPF_10MB_FD = 1 << 1 # 10 Mb full-duplex rate support. OFPPF_100MB_HD = 1 << 2 # 100 Mb half-duplex rate support. OFPPF_100MB_FD = 1 << 3 # 100 Mb full-duplex rate support. OFPPF_1GB_HD = 1 << 4 # 1 Gb half-duplex rate support. OFPPF_1GB_FD = 1 << 5 # 1 Gb full-duplex rate support. OFPPF_10GB_FD = 1 << 6 # 10 Gb full-duplex rate support. OFPPF_COPPER = 1 << 7 # Copper medium. OFPPF_FIBER = 1 << 8 # Fiber medium. OFPPF_AUTONEG = 1 << 9 # Auto-negotiation. OFPPF_PAUSE = 1 << 10 # Pause. OFPPF_PAUSE_ASYM = 1 << 11 # Asymmetric pause. _OFP_PHY_PORT_PACK_STR = 'H' + OFP_ETH_ALEN_STR + 's' + \ str(OFP_MAX_PORT_NAME_LEN) + 'sIIIIII' OFP_PHY_PORT_PACK_STR = '!' + _OFP_PHY_PORT_PACK_STR OFP_PHY_PORT_SIZE = 48 assert calcsize(OFP_PHY_PORT_PACK_STR) == OFP_PHY_PORT_SIZE OFP_SWITCH_FEATURES_PACK_STR = '!QIB3xII' OFP_SWITCH_FEATURES_SIZE = 32 assert (calcsize(OFP_SWITCH_FEATURES_PACK_STR) + OFP_HEADER_SIZE == OFP_SWITCH_FEATURES_SIZE) # enum ofp_port_reason OFPPR_ADD = 0 # The port was added. OFPPR_DELETE = 1 # The port was removed. OFPPR_MODIFY = 2 # Some attribute of the port has changed. OFP_PORT_STATUS_PACK_STR = '!B7x' + _OFP_PHY_PORT_PACK_STR OFP_PORT_STATUS_DESC_OFFSET = OFP_HEADER_SIZE + 8 OFP_PORT_STATUS_SIZE = 64 assert (calcsize(OFP_PORT_STATUS_PACK_STR) + OFP_HEADER_SIZE == OFP_PORT_STATUS_SIZE) OFP_PORT_MOD_PACK_STR = '!H' + OFP_ETH_ALEN_STR + 'sIII4x' OFP_PORT_MOD_SIZE = 32 assert calcsize(OFP_PORT_MOD_PACK_STR) + OFP_HEADER_SIZE == OFP_PORT_MOD_SIZE # enum ofp_packet_in_reason OFPR_NO_MATCH = 0 # No matching flow. OFPR_ACTION = 1 # Action explicitly output to controller. # OF1.0 spec says OFP_ASSERT(sizeof(struct ofp_packet_in) == 20). # It's quite bogus as it assumes a specific class of C implementations. # (well, if it was C. it's unclear from the spec itself.) # We just use the real size of the structure as this is not C. This # agrees with on-wire messages OpenFlow Reference Release and Open vSwitch # produce. OFP_PACKET_IN_PACK_STR = '!IHHBx' OFP_PACKET_IN_SIZE = 18 assert calcsize(OFP_PACKET_IN_PACK_STR) + OFP_HEADER_SIZE == OFP_PACKET_IN_SIZE # enum ofp_action_type OFPAT_OUTPUT = 0 # Output to switch port. OFPAT_SET_VLAN_VID = 1 # Set the 802.1q VLAN id. OFPAT_SET_VLAN_PCP = 2 # Set the 802.1q priority. OFPAT_STRIP_VLAN = 3 # Strip the 802.1q header. OFPAT_SET_DL_SRC = 4 # Ethernet source address. OFPAT_SET_DL_DST = 5 # Ethernet destination address. OFPAT_SET_NW_SRC = 6 # IP source address. OFPAT_SET_NW_DST = 7 # IP destination address. OFPAT_SET_NW_TOS = 8 # IP ToS (DSCP field, 6 bits). OFPAT_SET_TP_SRC = 9 # TCP/UDP source port. OFPAT_SET_TP_DST = 10 # TCP/UDP destination port. OFPAT_ENQUEUE = 11 # Output to queue. OFPAT_VENDOR = 0xffff OFP_ACTION_OUTPUT_PACK_STR = '!HHHH' OFP_ACTION_OUTPUT_SIZE = 8 assert calcsize(OFP_ACTION_OUTPUT_PACK_STR) == OFP_ACTION_OUTPUT_SIZE # define constants OFP_VLAN_NONE = 0xffff OFP_ACTION_VLAN_VID_PACK_STR = '!HHH2x' OFP_ACTION_VLAN_VID_SIZE = 8 assert calcsize(OFP_ACTION_VLAN_VID_PACK_STR) == OFP_ACTION_VLAN_VID_SIZE OFP_ACTION_VLAN_PCP_PACK_STR = '!HHB3x' OFP_ACTION_VLAN_PCP_SIZE = 8 assert calcsize(OFP_ACTION_VLAN_PCP_PACK_STR) == OFP_ACTION_VLAN_PCP_SIZE OFP_ACTION_DL_ADDR_PACK_STR = '!HH' + OFP_ETH_ALEN_STR + 's6x' OFP_ACTION_DL_ADDR_SIZE = 16 assert calcsize(OFP_ACTION_DL_ADDR_PACK_STR) == OFP_ACTION_DL_ADDR_SIZE OFP_ACTION_NW_ADDR_PACK_STR = '!HHI' OFP_ACTION_NW_ADDR_SIZE = 8 assert calcsize(OFP_ACTION_NW_ADDR_PACK_STR) == OFP_ACTION_NW_ADDR_SIZE OFP_ACTION_NW_TOS_PACK_STR = '!HHB3x' OFP_ACTION_NW_TOS_SIZE = 8 assert calcsize(OFP_ACTION_NW_TOS_PACK_STR) == OFP_ACTION_NW_TOS_SIZE OFP_ACTION_TP_PORT_PACK_STR = '!HHH2x' OFP_ACTION_TP_PORT_SIZE = 8 assert calcsize(OFP_ACTION_TP_PORT_PACK_STR) == OFP_ACTION_TP_PORT_SIZE OFP_ACTION_VENDOR_HEADER_PACK_STR = '!HHI' OFP_ACTION_VENDOR_HEADER_SIZE = 8 assert (calcsize(OFP_ACTION_VENDOR_HEADER_PACK_STR) == OFP_ACTION_VENDOR_HEADER_SIZE) OFP_ACTION_HEADER_PACK_STR = '!HH4x' OFP_ACTION_HEADER_SIZE = 8 assert calcsize(OFP_ACTION_HEADER_PACK_STR) == OFP_ACTION_HEADER_SIZE OFP_ACTION_ENQUEUE_PACK_STR = '!HHH6xI' OFP_ACTION_ENQUEUE_SIZE = 16 assert calcsize(OFP_ACTION_ENQUEUE_PACK_STR) == OFP_ACTION_ENQUEUE_SIZE OFP_ACTION_PACK_STR = '!H' # because of union ofp_action # OFP_ACTION_SIZE = 8 # assert calcsize(OFP_ACTION_PACK_STR) == OFP_ACTION_SIZE # enum nx_action_subtype NXAST_RESUBMIT = 1 NXAST_SET_TUNNEL = 2 NXAST_DROP_SPOOFED_ARP__OBSOLETE = 3 NXAST_SET_QUEUE = 4 NXAST_POP_QUEUE = 5 NXAST_REG_MOVE = 6 NXAST_REG_LOAD = 7 NXAST_NOTE = 8 NXAST_SET_TUNNEL64 = 9 NXAST_MULTIPATH = 10 NXAST_AUTOPATH = 11 NXAST_BUNDLE = 12 NXAST_BUNDLE_LOAD = 13 NXAST_RESUBMIT_TABLE = 14 NXAST_OUTPUT_REG = 15 NXAST_LEARN = 16 NXAST_EXIT = 17 NXAST_DEC_TTL = 18 NXAST_FIN_TIMEOUT = 19 NXAST_CONTROLLER = 20 NX_ACTION_RESUBMIT_PACK_STR = '!HHIHHB3x' NX_ACTION_RESUBMIT_SIZE = 16 assert calcsize(NX_ACTION_RESUBMIT_PACK_STR) == NX_ACTION_RESUBMIT_SIZE NX_ACTION_SET_TUNNEL_PACK_STR = '!HHIH2xI' NX_ACTION_SET_TUNNEL_SIZE = 16 assert calcsize(NX_ACTION_SET_TUNNEL_PACK_STR) == NX_ACTION_SET_TUNNEL_SIZE NX_ACTION_SET_QUEUE_PACK_STR = '!HHIH2xI' NX_ACTION_SET_QUEUE_SIZE = 16 assert calcsize(NX_ACTION_SET_QUEUE_PACK_STR) == NX_ACTION_SET_QUEUE_SIZE NX_ACTION_POP_QUEUE_PACK_STR = '!HHIH6x' NX_ACTION_POP_QUEUE_SIZE = 16 assert calcsize(NX_ACTION_POP_QUEUE_PACK_STR) == NX_ACTION_POP_QUEUE_SIZE NX_ACTION_REG_MOVE_PACK_STR = '!HHIHHHHII' NX_ACTION_REG_MOVE_SIZE = 24 assert calcsize(NX_ACTION_REG_MOVE_PACK_STR) == NX_ACTION_REG_MOVE_SIZE NX_ACTION_REG_LOAD_PACK_STR = '!HHIHHIQ' NX_ACTION_REG_LOAD_SIZE = 24 assert calcsize(NX_ACTION_REG_LOAD_PACK_STR) == NX_ACTION_REG_LOAD_SIZE NX_ACTION_SET_TUNNEL64_PACK_STR = '!HHIH6xQ' NX_ACTION_SET_TUNNEL64_SIZE = 24 assert calcsize(NX_ACTION_SET_TUNNEL64_PACK_STR) == NX_ACTION_SET_TUNNEL64_SIZE NX_ACTION_MULTIPATH_PACK_STR = '!HHIHHH2xHHI2xHI' NX_ACTION_MULTIPATH_SIZE = 32 assert calcsize(NX_ACTION_MULTIPATH_PACK_STR) == NX_ACTION_MULTIPATH_SIZE NX_ACTION_NOTE_PACK_STR = '!HHIH6B' NX_ACTION_NOTE_SIZE = 16 assert calcsize(NX_ACTION_NOTE_PACK_STR) == NX_ACTION_NOTE_SIZE NX_ACTION_BUNDLE_PACK_STR = '!HHIHHHHIHHI4x' NX_ACTION_BUNDLE_SIZE = 32 assert calcsize(NX_ACTION_BUNDLE_PACK_STR) == NX_ACTION_BUNDLE_SIZE NX_ACTION_AUTOPATH_PACK_STR = '!HHIHHII4x' NX_ACTION_AUTOPATH_SIZE = 24 assert calcsize(NX_ACTION_AUTOPATH_PACK_STR) == NX_ACTION_AUTOPATH_SIZE NX_ACTION_OUTPUT_REG_PACK_STR = '!HHIHHIH6x' NX_ACTION_OUTPUT_REG_SIZE = 24 assert calcsize(NX_ACTION_OUTPUT_REG_PACK_STR) == NX_ACTION_OUTPUT_REG_SIZE NX_ACTION_LEARN_PACK_STR = '!HHIHHHHQHBxHH' NX_ACTION_LEARN_SIZE = 32 assert calcsize(NX_ACTION_LEARN_PACK_STR) == NX_ACTION_LEARN_SIZE NX_ACTION_CONTROLLER_PACK_STR = '!HHIHHHBB' NX_ACTION_CONTROLLER_SIZE = 16 assert calcsize(NX_ACTION_CONTROLLER_PACK_STR) == NX_ACTION_CONTROLLER_SIZE NX_ACTION_FIN_TIMEOUT_PACK_STR = '!HHIHHH2x' NX_ACTION_FIN_TIMEOUT_SIZE = 16 assert calcsize(NX_ACTION_FIN_TIMEOUT_PACK_STR) == NX_ACTION_FIN_TIMEOUT_SIZE NX_ACTION_HEADER_PACK_STR = '!HHIH6x' NX_ACTION_HEADER_SIZE = 16 assert calcsize(NX_ACTION_HEADER_PACK_STR) == NX_ACTION_HEADER_SIZE OFP_PACKET_OUT_PACK_STR = '!IHH' OFP_PACKET_OUT_SIZE = 16 assert (calcsize(OFP_PACKET_OUT_PACK_STR) + OFP_HEADER_SIZE == OFP_PACKET_OUT_SIZE) # enum ofp_flow_mod_command OFPFC_ADD = 0 # New flow. OFPFC_MODIFY = 1 # Modify all matching flows. OFPFC_MODIFY_STRICT = 2 # Modify entry strictly matching wildcards OFPFC_DELETE = 3 # Delete all matching flows. OFPFC_DELETE_STRICT = 4 # Strictly match wildcards and priority. # enum ofp_flow_wildcards OFPFW_IN_PORT = 1 << 0 # Switch input port. OFPFW_DL_VLAN = 1 << 1 # VLAN vid. OFPFW_DL_SRC = 1 << 2 # Ethernet source address. OFPFW_DL_DST = 1 << 3 # Ethernet destination address. OFPFW_DL_TYPE = 1 << 4 # Ethernet frame type. OFPFW_NW_PROTO = 1 << 5 # IP protocol. OFPFW_TP_SRC = 1 << 6 # TCP/UDP source port. OFPFW_TP_DST = 1 << 7 # TCP/UDP destination port. OFPFW_NW_SRC_SHIFT = 8 OFPFW_NW_SRC_BITS = 6 OFPFW_NW_SRC_MASK = ((1 << OFPFW_NW_SRC_BITS) - 1) << OFPFW_NW_SRC_SHIFT OFPFW_NW_SRC_ALL = 32 << OFPFW_NW_SRC_SHIFT OFPFW_NW_DST_SHIFT = 14 OFPFW_NW_DST_BITS = 6 OFPFW_NW_DST_MASK = ((1 << OFPFW_NW_DST_BITS) - 1) << OFPFW_NW_DST_SHIFT OFPFW_NW_DST_ALL = 32 << OFPFW_NW_DST_SHIFT OFPFW_DL_VLAN_PCP = 1 << 20 # VLAN priority. OFPFW_NW_TOS = 1 << 21 # IP ToS (DSCP field, 6 bits). OFPFW_ALL = ((1 << 22) - 1) # define constants OFPFW_ICMP_TYPE = OFPFW_TP_SRC OFPFW_ICMP_CODE = OFPFW_TP_DST OFP_DL_TYPE_ETH2_CUTOFF = 0x0600 OFP_DL_TYPE_NOT_ETH_TYPE = 0x05ff OFP_VLAN_NONE = 0xffff _OFP_MATCH_PACK_STR = 'IH' + OFP_ETH_ALEN_STR + 's' + OFP_ETH_ALEN_STR + \ 'sHBxHBB2xIIHH' OFP_MATCH_PACK_STR = '!' + _OFP_MATCH_PACK_STR OFP_MATCH_SIZE = 40 assert calcsize(OFP_MATCH_PACK_STR) == OFP_MATCH_SIZE OFP_FLOW_PERMANENT = 0 OFP_DEFAULT_PRIORITY = 0x8000 # enum ofp_flow_mod_flags OFPFF_SEND_FLOW_REM = 1 << 0 # Send flow removed message when flow # expires or is deleted. OFPFF_CHECK_OVERLAP = 1 << 1 # Check for overlapping entries first. OFPFF_EMERG = 1 << 2 # Ramark this is for emergency. _OFP_FLOW_MOD_PACK_STR0 = 'QHHHHIHH' OFP_FLOW_MOD_PACK_STR = '!' + _OFP_MATCH_PACK_STR + _OFP_FLOW_MOD_PACK_STR0 OFP_FLOW_MOD_PACK_STR0 = '!' + _OFP_FLOW_MOD_PACK_STR0 OFP_FLOW_MOD_SIZE = 72 assert calcsize(OFP_FLOW_MOD_PACK_STR) + OFP_HEADER_SIZE == OFP_FLOW_MOD_SIZE # enum ofp_flow_removed_reason OFPRR_IDLE_TIMEOUT = 0 # Flow idle time exceeded idle_timeout. OFPRR_HARD_TIMEOUT = 1 # Time exceeded hard_timeout. OFPRR_DELETE = 2 # Evicted by a DELETE flow mod. _OFP_FLOW_REMOVED_PACK_STR0 = 'QHBxIIH2xQQ' OFP_FLOW_REMOVED_PACK_STR = '!' + _OFP_MATCH_PACK_STR + \ _OFP_FLOW_REMOVED_PACK_STR0 OFP_FLOW_REMOVED_PACK_STR0 = '!' + _OFP_FLOW_REMOVED_PACK_STR0 OFP_FLOW_REMOVED_SIZE = 88 assert (calcsize(OFP_FLOW_REMOVED_PACK_STR) + OFP_HEADER_SIZE == OFP_FLOW_REMOVED_SIZE) # enum ofp_error_type OFPET_HELLO_FAILED = 0 # Hello protocol failed. OFPET_BAD_REQUEST = 1 # Request was not understood. OFPET_BAD_ACTION = 2 # Error in action description. OFPET_FLOW_MOD_FAILED = 3 # Problem modifying flow entry. OFPET_PORT_MOD_FAILED = 4 # OFPT_PORT_MOD failed. OFPET_QUEUE_OP_FAILED = 5 # Queue operation failed. # enum ofp_hello_failed_code OFPHFC_INCOMPATIBLE = 0 # No compatible version. OFPHFC_EPERM = 1 # Permissions error. # enum ofp_bad_request_code OFPBRC_BAD_VERSION = 0 # ofp_header.version not supported. OFPBRC_BAD_TYPE = 1 # ofp_header.type not supported. OFPBRC_BAD_STAT = 2 # ofp_stats_msg.type not supported. OFPBRC_BAD_VENDOR = 3 # Vendor not supported (in ofp_vendor_header # or ofp_stats_msg). OFPBRC_BAD_SUBTYPE = 4 # Vendor subtype not supported. OFPBRC_EPERM = 5 # Permissions error. OFPBRC_BAD_LEN = 6 # Wrong request length for type. OFPBRC_BUFFER_EMPTY = 7 # Specified buffer has already been used. OFPBRC_BUFFER_UNKNOWN = 8 # Specified buffer does not exist. # enum ofp_bad_action_code OFPBAC_BAD_TYPE = 0 # Unknown action type. OFPBAC_BAD_LEN = 1 # Length problem in actions. OFPBAC_BAD_VENDOR = 2 # Unknown vendor id specified. OFPBAC_BAD_VENDOR_TYPE = 3 # Unknown action type for vendor id. OFPBAC_BAD_OUT_PORT = 4 # Problem validating output action. OFPBAC_BAD_ARGUMENT = 5 # Bad action argument. OFPBAC_EPERM = 6 # Permissions error. OFPBAC_TOO_MANY = 7 # Can't handle this many actions. OFPBAC_BAD_QUEUE = 8 # Problem validating output queue. # enum ofp_flow_mod_failed_code OFPFMFC_ALL_TABLES_FULL = 0 # Flow not added because of full tables. OFPFMFC_OVERLAP = 1 # Attempted to add overlapping flow with # CHECK_OVERLAP flags set. OFPFMFC_EPERM = 2 # Permissions error. OFPFMFC_BAD_EMERG_TIMEOUT = 3 # Flow not added because of non-zero idle/hard # timeout. OFPFMFC_BAD_COMMAND = 4 # Unknown command. OFPFMFC_UNSUPPORTED = 5 # Unsupported action list - cannot process in # the order specified. # enum ofp_port_mod_failed_code OFPPMFC_BAD_PORT = 0 # Specified port does not exist. OFPPMFC_BAD_HW_ADDR = 1 # Specified hardware address is wrong. # enum ofp_queue_op_failed_code OFPQOFC_BAD_PORT = 0 # Invalid port (or port does not exist). OFPQOFC_BAD_QUEUE = 1 # Queue does not exist. OFPQOFC_EPERM = 2 # Permissions error. OFP_ERROR_MSG_PACK_STR = '!HH' OFP_ERROR_MSG_SIZE = 12 assert calcsize(OFP_ERROR_MSG_PACK_STR) + OFP_HEADER_SIZE == OFP_ERROR_MSG_SIZE # enum ofp_stats_types OFPST_DESC = 0 OFPST_FLOW = 1 OFPST_AGGREGATE = 2 OFPST_TABLE = 3 OFPST_PORT = 4 OFPST_QUEUE = 5 OFPST_VENDOR = 0xffff _OFP_STATS_MSG_PACK_STR = 'HH' OFP_STATS_MSG_PACK_STR = '!' + _OFP_STATS_MSG_PACK_STR OFP_STATS_MSG_SIZE = 12 assert calcsize(OFP_STATS_MSG_PACK_STR) + OFP_HEADER_SIZE == OFP_STATS_MSG_SIZE # enum ofp_stats_reply_flags OFPSF_REPLY_MORE = 1 << 0 # More replies to follow. # define constants DESC_STR_LEN = 256 DESC_STR_LEN_STR = str(DESC_STR_LEN) SERIAL_NUM_LEN = 32 SERIAL_NUM_LEN_STR = str(SERIAL_NUM_LEN) OFP_DESC_STATS_PACK_STR = '!' + \ DESC_STR_LEN_STR + 's' + \ DESC_STR_LEN_STR + 's' + \ DESC_STR_LEN_STR + 's' + \ SERIAL_NUM_LEN_STR + 's' + \ DESC_STR_LEN_STR + 's' OFP_DESC_STATS_SIZE = 1068 assert (calcsize(OFP_DESC_STATS_PACK_STR) + OFP_STATS_MSG_SIZE == OFP_DESC_STATS_SIZE) _OFP_FLOW_STATS_REQUEST_ID_PORT_STR = 'BxH' OFP_FLOW_STATS_REQUEST_ID_PORT_STR = '!' + _OFP_FLOW_STATS_REQUEST_ID_PORT_STR OFP_FLOW_STATS_REQUEST_PACK_STR = '!' + _OFP_MATCH_PACK_STR + \ _OFP_FLOW_STATS_REQUEST_ID_PORT_STR OFP_FLOW_STATS_REQUEST_SIZE = 56 assert (calcsize(OFP_FLOW_STATS_REQUEST_PACK_STR) + OFP_STATS_MSG_SIZE == OFP_FLOW_STATS_REQUEST_SIZE) _OFP_FLOW_STATS_0_PACK_STR = 'HBx' OFP_FLOW_STATS_0_PACK_STR = '!' + _OFP_FLOW_STATS_0_PACK_STR OFP_FLOW_STATS_0_SIZE = 4 assert calcsize(OFP_FLOW_STATS_0_PACK_STR) == OFP_FLOW_STATS_0_SIZE _OFP_FLOW_STATS_1_PACK_STR = 'IIHHH6xQQQ' OFP_FLOW_STATS_1_PACK_STR = '!' + _OFP_FLOW_STATS_1_PACK_STR OFP_FLOW_STATS_1_SIZE = 44 assert calcsize(OFP_FLOW_STATS_1_PACK_STR) == OFP_FLOW_STATS_1_SIZE OFP_FLOW_STATS_PACK_STR = '!' + _OFP_FLOW_STATS_0_PACK_STR +\ _OFP_MATCH_PACK_STR + _OFP_FLOW_STATS_1_PACK_STR OFP_FLOW_STATS_SIZE = 88 assert calcsize(OFP_FLOW_STATS_PACK_STR) == OFP_FLOW_STATS_SIZE OFP_AGGREGATE_STATS_REPLY_PACK_STR = '!QQI4x' OFP_AGGREGATE_STATS_REPLY_SIZE = 36 assert (calcsize(OFP_AGGREGATE_STATS_REPLY_PACK_STR) + OFP_STATS_MSG_SIZE == OFP_AGGREGATE_STATS_REPLY_SIZE) OFP_TABLE_STATS_PACK_STR = '!B3x' + OFP_MAX_TABLE_NAME_LEN_STR + 'sIIIQQ' OFP_TABLE_STATS_SIZE = 64 assert calcsize(OFP_TABLE_STATS_PACK_STR) == OFP_TABLE_STATS_SIZE OFP_PORT_STATS_REQUEST_PACK_STR = '!H6x' OFP_PORT_STATS_REQUEST_SIZE = 20 assert (calcsize(OFP_PORT_STATS_REQUEST_PACK_STR) + OFP_STATS_MSG_SIZE == OFP_PORT_STATS_REQUEST_SIZE) OFP_PORT_STATS_PACK_STR = '!H6xQQQQQQQQQQQQ' OFP_PORT_STATS_SIZE = 104 assert calcsize(OFP_PORT_STATS_PACK_STR) == OFP_PORT_STATS_SIZE OFPQ_ALL = 0xffffffff OFP_QUEUE_STATS_REQUEST_PACK_STR = '!HxxI' OFP_QUEUE_STATS_REQUEST_SIZE = 8 assert (calcsize(OFP_QUEUE_STATS_REQUEST_PACK_STR) == OFP_QUEUE_STATS_REQUEST_SIZE) OFP_QUEUE_STATS_PACK_STR = '!H2xIQQQ' OFP_QUEUE_STATS_SIZE = 32 assert calcsize(OFP_QUEUE_STATS_PACK_STR) == OFP_QUEUE_STATS_SIZE OFP_VENDOR_STATS_MSG_PACK_STR = '!I' OFP_VENDOR_STATS_MSG_SIZE = 16 assert (calcsize(OFP_VENDOR_STATS_MSG_PACK_STR) + OFP_STATS_MSG_SIZE == OFP_VENDOR_STATS_MSG_SIZE) OFP_VENDOR_HEADER_PACK_STR = '!I' OFP_VENDOR_HEADER_SIZE = 12 assert (calcsize(OFP_VENDOR_HEADER_PACK_STR) + OFP_HEADER_SIZE == OFP_VENDOR_HEADER_SIZE) OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR = '!H2x' OFP_QUEUE_GET_CONFIG_REQUEST_SIZE = 12 assert (calcsize(OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR) + OFP_HEADER_SIZE == OFP_QUEUE_GET_CONFIG_REQUEST_SIZE) OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR = '!H6x' OFP_QUEUE_GET_CONFIG_REPLY_SIZE = 16 assert (calcsize(OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR) + OFP_HEADER_SIZE == OFP_QUEUE_GET_CONFIG_REPLY_SIZE) OFP_PACKET_QUEUE_PQCK_STR = '!IH2x' OFP_PACKET_QUEUE_SIZE = 8 assert calcsize(OFP_PACKET_QUEUE_PQCK_STR) == OFP_PACKET_QUEUE_SIZE OFPQT_NONE = 0 OFPQT_MIN_RATE = 1 OFP_QUEUE_PROP_HEADER_PACK_STR = '!HH4x' OFP_QUEUE_PROP_HEADER_SIZE = 8 assert calcsize(OFP_QUEUE_PROP_HEADER_PACK_STR) == OFP_QUEUE_PROP_HEADER_SIZE OFP_QUEUE_PROP_MIN_RATE_PACK_STR = '!H6x' OFP_QUEUE_PROP_MIN_RATE_SIZE = 16 assert (calcsize(OFP_QUEUE_PROP_MIN_RATE_PACK_STR) + OFP_QUEUE_PROP_HEADER_SIZE == OFP_QUEUE_PROP_MIN_RATE_SIZE) NX_VENDOR_ID = 0x00002320 # enum nicira_type (abridged) NXT_ROLE_REQUEST = 10 NXT_ROLE_REPLY = 11 NXT_SET_FLOW_FORMAT = 12 NXT_FLOW_MOD = 13 NXT_FLOW_REMOVED = 14 NXT_FLOW_MOD_TABLE_ID = 15 NXT_SET_PACKET_IN_FORMAT = 16 NXT_PACKET_IN = 17 NXT_FLOW_AGE = 18 NXT_SET_ASYNC_CONFIG = 19 NXT_SET_CONTROLLER_ID = 20 # enum nx_role NX_ROLE_OTHER = 0 NX_ROLE_MASTER = 1 NX_ROLE_SLAVE = 2 # enum nx_flow_format NXFF_OPENFLOW10 = 0 NXFF_NXM = 2 # enum nx_packet_in_format NXPIF_OPENFLOW10 = 0 NXPIF_NXM = 1 # enum nx_stats_types NXST_FLOW = 0 NXST_AGGREGATE = 1 NXST_FLOW_MONITOR = 2 NICIRA_HEADER_PACK_STR = '!II' NICIRA_HEADER_SIZE = 16 assert (calcsize(NICIRA_HEADER_PACK_STR) + OFP_HEADER_SIZE == NICIRA_HEADER_SIZE) NX_ROLE_PACK_STR = '!I' NX_ROLE_SIZE = 20 assert (calcsize(NX_ROLE_PACK_STR) + NICIRA_HEADER_SIZE == NX_ROLE_SIZE) NX_FLOW_MOD_PACK_STR = '!Q4HI3H6x' NX_FLOW_MOD_SIZE = 48 assert (calcsize(NX_FLOW_MOD_PACK_STR) + NICIRA_HEADER_SIZE == NX_FLOW_MOD_SIZE) NX_SET_FLOW_FORMAT_PACK_STR = '!I' NX_SET_FLOW_FORMAT_SIZE = 20 assert (calcsize(NX_SET_FLOW_FORMAT_PACK_STR) + NICIRA_HEADER_SIZE == NX_SET_FLOW_FORMAT_SIZE) NX_FLOW_REMOVED_PACK_STR = '!QHBxIIHHQQ' NX_FLOW_REMOVED_SIZE = 56 assert (calcsize(NX_FLOW_REMOVED_PACK_STR) + NICIRA_HEADER_SIZE == NX_FLOW_REMOVED_SIZE) NX_FLOW_MOD_TABLE_ID_PACK_STR = '!B7x' NX_FLOW_MOD_TABLE_ID_SIZE = 24 assert (calcsize(NX_FLOW_MOD_TABLE_ID_PACK_STR) + NICIRA_HEADER_SIZE == NX_FLOW_MOD_TABLE_ID_SIZE) NX_SET_PACKET_IN_FORMAT_PACK_STR = '!I' NX_SET_PACKET_IN_FORMAT_SIZE = 20 assert (calcsize(NX_SET_PACKET_IN_FORMAT_PACK_STR) + NICIRA_HEADER_SIZE == NX_SET_PACKET_IN_FORMAT_SIZE) NX_PACKET_IN_PACK_STR = '!IHBBQH6x' NX_PACKET_IN_SIZE = 40 assert (calcsize(NX_PACKET_IN_PACK_STR) + NICIRA_HEADER_SIZE == NX_PACKET_IN_SIZE) NX_ASYNC_CONFIG_PACK_STR = '!IIIIII' NX_ASYNC_CONFIG_SIZE = 40 assert (calcsize(NX_ASYNC_CONFIG_PACK_STR) + NICIRA_HEADER_SIZE == NX_ASYNC_CONFIG_SIZE) NX_CONTROLLER_ID_PACK_STR = '!6xH' NX_CONTROLLER_ID_SIZE = 24 assert (calcsize(NX_CONTROLLER_ID_PACK_STR) + NICIRA_HEADER_SIZE == NX_CONTROLLER_ID_SIZE) NX_STATS_MSG_PACK_STR = '!I4x' NX_STATS_MSG0_SIZE = 8 assert calcsize(NX_STATS_MSG_PACK_STR) == NX_STATS_MSG0_SIZE NX_STATS_MSG_SIZE = 24 assert (calcsize(NX_STATS_MSG_PACK_STR) + OFP_VENDOR_STATS_MSG_SIZE == NX_STATS_MSG_SIZE) NX_FLOW_STATS_REQUEST_PACK_STR = '!2HB3x' NX_FLOW_STATS_REQUEST_SIZE = 8 assert (calcsize(NX_FLOW_STATS_REQUEST_PACK_STR) == NX_FLOW_STATS_REQUEST_SIZE) NX_FLOW_STATS_PACK_STR = '!HBxIIHHHHHHQQQ' NX_FLOW_STATS_SIZE = 48 assert calcsize(NX_FLOW_STATS_PACK_STR) == NX_FLOW_STATS_SIZE NX_AGGREGATE_STATS_REQUEST_PACK_STR = '!2HB3x' NX_AGGREGATE_STATS_REQUEST_SIZE = 8 assert (calcsize(NX_AGGREGATE_STATS_REQUEST_PACK_STR) == NX_AGGREGATE_STATS_REQUEST_SIZE) NX_AGGREGATE_STATS_REPLY_PACK_STR = '!QQI4x' NX_AGGREGATE_STATS_REPLY_SIZE = 24 assert (calcsize(NX_AGGREGATE_STATS_REPLY_PACK_STR) == NX_AGGREGATE_STATS_REPLY_SIZE) def nxm_header__(vendor, field, hasmask, length): return (vendor << 16) | (field << 9) | (hasmask << 8) | length def nxm_header(vendor, field, length): return nxm_header__(vendor, field, 0, length) def nxm_header_w(vendor, field, length): return nxm_header__(vendor, field, 1, (length) * 2) NXM_OF_IN_PORT = nxm_header(0x0000, 0, 2) NXM_OF_ETH_DST = nxm_header(0x0000, 1, 6) NXM_OF_ETH_DST_W = nxm_header_w(0x0000, 1, 6) NXM_OF_ETH_SRC = nxm_header(0x0000, 2, 6) NXM_OF_ETH_SRC_W = nxm_header_w(0x0000, 2, 6) NXM_OF_ETH_TYPE = nxm_header(0x0000, 3, 2) NXM_OF_VLAN_TCI = nxm_header(0x0000, 4, 2) NXM_OF_VLAN_TCI_W = nxm_header_w(0x0000, 4, 2) NXM_OF_IP_TOS = nxm_header(0x0000, 5, 1) NXM_OF_IP_PROTO = nxm_header(0x0000, 6, 1) NXM_OF_IP_SRC = nxm_header(0x0000, 7, 4) NXM_OF_IP_SRC_W = nxm_header_w(0x0000, 7, 4) NXM_OF_IP_DST = nxm_header(0x0000, 8, 4) NXM_OF_IP_DST_W = nxm_header_w(0x0000, 8, 4) NXM_OF_TCP_SRC = nxm_header(0x0000, 9, 2) NXM_OF_TCP_SRC_W = nxm_header_w(0x0000, 9, 2) NXM_OF_TCP_DST = nxm_header(0x0000, 10, 2) NXM_OF_TCP_DST_W = nxm_header_w(0x0000, 10, 2) NXM_OF_UDP_SRC = nxm_header(0x0000, 11, 2) NXM_OF_UDP_SRC_W = nxm_header_w(0x0000, 11, 2) NXM_OF_UDP_DST = nxm_header(0x0000, 12, 2) NXM_OF_UDP_DST_W = nxm_header_w(0x0000, 12, 2) NXM_OF_ICMP_TYPE = nxm_header(0x0000, 13, 1) NXM_OF_ICMP_CODE = nxm_header(0x0000, 14, 1) NXM_OF_ARP_OP = nxm_header(0x0000, 15, 2) NXM_OF_ARP_SPA = nxm_header(0x0000, 16, 4) NXM_OF_ARP_SPA_W = nxm_header_w(0x0000, 16, 4) NXM_OF_ARP_TPA = nxm_header(0x0000, 17, 4) NXM_OF_ARP_TPA_W = nxm_header_w(0x0000, 17, 4) NXM_NX_TUN_ID = nxm_header(0x0001, 16, 8) NXM_NX_TUN_ID_W = nxm_header_w(0x0001, 16, 8) NXM_NX_ARP_SHA = nxm_header(0x0001, 17, 6) NXM_NX_ARP_THA = nxm_header(0x0001, 18, 6) NXM_NX_IPV6_SRC = nxm_header(0x0001, 19, 16) NXM_NX_IPV6_SRC_W = nxm_header_w(0x0001, 19, 16) NXM_NX_IPV6_DST = nxm_header(0x0001, 20, 16) NXM_NX_IPV6_DST_W = nxm_header_w(0x0001, 20, 16) NXM_NX_ICMPV6_TYPE = nxm_header(0x0001, 21, 1) NXM_NX_ICMPV6_CODE = nxm_header(0x0001, 22, 1) NXM_NX_ND_TARGET = nxm_header(0x0001, 23, 16) NXM_NX_ND_TARGET_W = nxm_header_w(0x0001, 23, 16) NXM_NX_ND_SLL = nxm_header(0x0001, 24, 6) NXM_NX_ND_TLL = nxm_header(0x0001, 25, 6) NXM_NX_IP_FRAG = nxm_header(0x0001, 26, 1) NXM_NX_IP_FRAG_W = nxm_header_w(0x0001, 26, 1) NXM_NX_IPV6_LABEL = nxm_header(0x0001, 27, 4) NXM_NX_IP_ECN = nxm_header(0x0001, 28, 1) NXM_NX_IP_TTL = nxm_header(0x0001, 29, 1) def nxm_nx_reg(idx): return nxm_header(0x0001, idx, 4) def nxm_nx_reg_w(idx): return nxm_header_w(0x0001, idx, 4) NXM_HEADER_PACK_STRING = '!I' # enum nx_hash_fields NX_HASH_FIELDS_ETH_SRC = 0 NX_HASH_FIELDS_SYMMETRIC_L4 = 1 # enum nx_mp_algorithm NX_MP_ALG_MODULO_N = 0 NX_MP_ALG_HASH_THRESHOLD = 1 NX_MP_ALG_HRW = 2 NX_MP_ALG_ITER_HASH = 3 # enum nx_bd_algorithm NX_BD_ALG_ACTIVE_BACKUP = 0 NX_BD_ALG_HRW = 1 # nx_learn constants NX_LEARN_N_BITS_MASK = 0x3ff NX_LEARN_SRC_FIELD = 0 << 13 # Copy from field. NX_LEARN_SRC_IMMEDIATE = 1 << 13 # Copy from immediate value. NX_LEARN_SRC_MASK = 1 << 13 NX_LEARN_DST_MATCH = 0 << 11 # Add match criterion. NX_LEARN_DST_LOAD = 1 << 11 # Add NXAST_REG_LOAD action NX_LEARN_DST_OUTPUT = 2 << 11 # Add OFPAT_OUTPUT action. NX_LEARN_DST_RESERVED = 3 << 11 # Not yet defined. NX_LEARN_DST_MASK = 3 << 11
apache-2.0
bnbowman/MetagenomicTools
src/pbmgx/nucmer/segment.py
2
2790
#! /usr/bin/env python __author__ = 'bbowman@pacificbiosciences.com' class Segment( object ): """ A Class for representing a segment of a Nucmer Alignment """ def __init__(self, start, end, source=None): assert isinstance(start, int) assert isinstance(end, int) assert start != end self._start = start self._end = end self._source = source if self.start < self.end: self._forward = True elif self.start > self.end: self._forward = False @property def start(self): return self._start @property def end(self): return self._end @property def source(self): return self._source @property def leftmost(self): return min(self.start, self.end) @property def rightmost(self): return max(self.start, self.end) @property def forward(self): return self._forward @property def reverse(self): return not self._forward @property def orientation(self): if self.forward: return 'forward' elif self.reverse: return 'reverse' def __len__(self): return abs(self.start - self.end) def __repr__(self): return '<Segment: {0}-{1} from {2}>'.format(self.start, self.end, self.source) def __lt__(self, other): assert isinstance(other, Segment) return self.leftmost < other.leftmost def __gt__(self, other): assert isinstance(other, Segment) return self.leftmost > other.leftmost def overlaps(self, other): assert isinstance( other, Segment ) if self.orientation != other.orientation: return False if other.end >= self.end >= other.start: return True elif other.end >= self.start >= other.start: return True return False def combine(self, other): assert isinstance( other, Segment ) assert self.overlaps( other ) if self.forward: start = min( self.start, other.start ) end = max( self.end, other.end ) else: start = max( self.start, other.start ) end = min( self.end, other.end ) # Maintain the Source if shared, otherwise discard if self.source == other.source: return Segment( start, end, self.source ) return Segment( start, end ) def contains(self, other): assert isinstance(other, Segment) if (self.leftmost <= other.leftmost) and \ (self.rightmost >= other.rightmost): return True return False def is_contained_by(self, other): assert isinstance(other, Segment) return other.contains( self )
bsd-3-clause
tkhirianov/kpk2016
graphs/input_graph.py
1
2588
import networkx import matplotlib.pyplot as plt def input_edges_list(): """считывает список рёбер в форме: в первой строке N - число рёбер, затем следует N строк из двух слов и одного числа слова - названия вершин, концы ребра, а число - его вес return граф в форме словаря рёбер и соответствующих им весов """ N = int(input('Введите количество рёбер:')) G = {} for i in range(N): vertex1, vertex2, weight = input().split() weight = float(weight) G[(vertex1, vertex2)] = weight return G def edges_list_to_adjacency_list(E): """E - граф в форме словаря рёбер и соответствующих им весов return граф в форме словаря словарей смежности с весами """ G = {} for vertex1, vertex2 in E: weight = E[(vertex1, vertex2)] # добавляю ребро (vertex1, vertex2) if vertex1 not in G: G[vertex1] = {vertex2:weight} else: # значит такая вершина уже встречалась G[vertex1][vertex2] = weight # граф не направленный, поэтому добавляю ребро (vertex2, vertex1) if vertex2 not in G: G[vertex2] = {vertex1:weight} else: # значит такая вершина уже встречалась G[vertex2][vertex1] = weight return G def dfs(G, start, called = set(), skelet = set()): called.add(start) for neighbour in G[start]: if neighbour not in called: dfs(G, neighbour, called, skelet) skelet.add((start, neighbour)) s = """A B 1 B D 1 B C 2 C A 2 C D 3 D E 5""".split('\n') E = {} for line in s: a, b, weight = line.split() E[(a, b)] = int(weight) A = edges_list_to_adjacency_list(E) called = set() skelet = set() dfs(A, 'A', called, skelet) print(called) print(skelet) G = networkx.Graph(A) position = networkx.spring_layout(G) # positions for all nodes networkx.draw(G, position) networkx.draw_networkx_labels(G, position) networkx.draw_networkx_edge_labels(G, position, edge_labels=E) # нарисуем остовное дерево: networkx.draw_networkx_edges(G, position, edgelist=skelet, width=5, alpha=0.5, edge_color='red') plt.show() # display
gpl-3.0
justinpotts/mozillians
vendor-local/lib/python/markdown/extensions/abbr.py
11
3057
''' Abbreviation Extension for Python-Markdown ========================================== This extension adds abbreviation handling to Python-Markdown. Simple Usage: >>> import markdown >>> text = """ ... Some text with an ABBR and a REF. Ignore REFERENCE and ref. ... ... *[ABBR]: Abbreviation ... *[REF]: Abbreviation Reference ... """ >>> print markdown.markdown(text, ['abbr']) <p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p> Copyright 2007-2008 * [Waylan Limberg](http://achinghead.com/) * [Seemant Kulleen](http://www.kulleen.org/) ''' from __future__ import absolute_import from __future__ import unicode_literals from . import Extension from ..preprocessors import Preprocessor from ..inlinepatterns import Pattern from ..util import etree, AtomicString import re # Global Vars ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)') class AbbrExtension(Extension): """ Abbreviation Extension for Python-Markdown. """ def extendMarkdown(self, md, md_globals): """ Insert AbbrPreprocessor before ReferencePreprocessor. """ md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference') class AbbrPreprocessor(Preprocessor): """ Abbreviation Preprocessor - parse text for abbr references. """ def run(self, lines): ''' Find and remove all Abbreviation references from the text. Each reference is set as a new AbbrPattern in the markdown instance. ''' new_text = [] for line in lines: m = ABBR_REF_RE.match(line) if m: abbr = m.group('abbr').strip() title = m.group('title').strip() self.markdown.inlinePatterns['abbr-%s'%abbr] = \ AbbrPattern(self._generate_pattern(abbr), title) else: new_text.append(line) return new_text def _generate_pattern(self, text): ''' Given a string, returns an regex pattern to match that string. 'HTML' -> r'(?P<abbr>[H][T][M][L])' Note: we force each char as a literal match (in brackets) as we don't know what they will be beforehand. ''' chars = list(text) for i in range(len(chars)): chars[i] = r'[%s]' % chars[i] return r'(?P<abbr>\b%s\b)' % (r''.join(chars)) class AbbrPattern(Pattern): """ Abbreviation inline pattern. """ def __init__(self, pattern, title): super(AbbrPattern, self).__init__(pattern) self.title = title def handleMatch(self, m): abbr = etree.Element('abbr') abbr.text = AtomicString(m.group('abbr')) abbr.set('title', self.title) return abbr def makeExtension(configs=None): return AbbrExtension(configs=configs)
bsd-3-clause
jkankiewicz/kivy
examples/canvas/fbo_canvas.py
59
2544
''' FBO Canvas ========== This demonstrates a layout using an FBO (Frame Buffer Off-screen) instead of a plain canvas. You should see a black canvas with a button labelled 'FBO' in the bottom left corner. Clicking it animates the button moving right to left. ''' __all__ = ('FboFloatLayout', ) from kivy.graphics import Color, Rectangle, Canvas, ClearBuffers, ClearColor from kivy.graphics.fbo import Fbo from kivy.uix.floatlayout import FloatLayout from kivy.properties import ObjectProperty, NumericProperty from kivy.app import App from kivy.core.window import Window from kivy.animation import Animation from kivy.factory import Factory class FboFloatLayout(FloatLayout): texture = ObjectProperty(None, allownone=True) alpha = NumericProperty(1) def __init__(self, **kwargs): self.canvas = Canvas() with self.canvas: self.fbo = Fbo(size=self.size) self.fbo_color = Color(1, 1, 1, 1) self.fbo_rect = Rectangle() with self.fbo: ClearColor(0, 0, 0, 0) ClearBuffers() # wait that all the instructions are in the canvas to set texture self.texture = self.fbo.texture super(FboFloatLayout, self).__init__(**kwargs) def add_widget(self, *largs): # trick to attach graphics instruction to fbo instead of canvas canvas = self.canvas self.canvas = self.fbo ret = super(FboFloatLayout, self).add_widget(*largs) self.canvas = canvas return ret def remove_widget(self, *largs): canvas = self.canvas self.canvas = self.fbo super(FboFloatLayout, self).remove_widget(*largs) self.canvas = canvas def on_size(self, instance, value): self.fbo.size = value self.texture = self.fbo.texture self.fbo_rect.size = value def on_pos(self, instance, value): self.fbo_rect.pos = value def on_texture(self, instance, value): self.fbo_rect.texture = value def on_alpha(self, instance, value): self.fbo_color.rgba = (1, 1, 1, value) class ScreenLayerApp(App): def build(self): f = FboFloatLayout() b = Factory.Button(text="FBO", size_hint=(None, None)) f.add_widget(b) def anim_btn(*args): if b.pos[0] == 0: Animation(x=f.width - b.width).start(b) else: Animation(x=0).start(b) b.bind(on_press=anim_btn) return f if __name__ == "__main__": ScreenLayerApp().run()
mit
kogotko/carburetor
horizon/contrib/bootstrap_datepicker.py
87
1584
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Map Horizon languages to datepicker locales LOCALE_MAPPING = { 'ar': 'ar', 'az': 'az', 'bg': 'bg', 'ca': 'ca', 'cs': 'cs', 'cy': 'cy', 'da': 'da', 'de': 'de', 'el': 'el', 'es': 'es', 'et': 'et', 'fa': 'fa', 'fi': 'fi', 'fr': 'fr', 'gl': 'gl', 'he': 'he', 'hr': 'hr', 'hu': 'hu', 'id': 'id', 'is': 'is', 'it': 'it', 'ja': 'ja', 'ka': 'ka', 'kk': 'kk', 'ko': 'kr', # difference between horizon and datepicker 'lt': 'lt', 'lv': 'lv', 'mk': 'mk', 'ms': 'ms', 'nb': 'nb', 'nl-be': 'nl-BE', 'nl': 'nl', 'no': 'no', 'pl': 'pl', 'pt-br': 'pt-BR', 'pt': 'pt', 'ro': 'ro', 'rs-latin': 'rs-latin', 'sr': 'rs', # difference between horizon and datepicker 'ru': 'ru', 'sk': 'sk', 'sl': 'sl', 'sq': 'sq', 'sv': 'sv', 'sw': 'sw', 'th': 'th', 'tr': 'tr', 'ua': 'ua', 'vi': 'vi', 'zh-cn': 'zh-CN', 'zh-tw': 'zh-TW', }
apache-2.0
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/tools/swarming_client/swarming.py
1
46560
#!/usr/bin/env python # Copyright 2013 The Swarming Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 that # can be found in the LICENSE file. """Client tool to trigger tasks or retrieve results from a Swarming server.""" __version__ = '0.8.2' import collections import datetime import json import logging import optparse import os import subprocess import sys import threading import time import urllib from third_party import colorama from third_party.depot_tools import fix_encoding from third_party.depot_tools import subcommand from utils import file_path from utils import logging_utils from third_party.chromium import natsort from utils import net from utils import on_error from utils import threading_utils from utils import tools import auth import isolated_format import isolateserver ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class Failure(Exception): """Generic failure.""" pass ### Isolated file handling. def isolated_to_hash(arg, algo): """Archives a .isolated file if needed. Returns the file hash to trigger and a bool specifying if it was a file (True) or a hash (False). """ if arg.endswith('.isolated'): file_hash = isolated_format.hash_file(arg, algo) if not file_hash: on_error.report('Archival failure %s' % arg) return None, True return file_hash, True elif isolated_format.is_valid_hash(arg, algo): return arg, False else: on_error.report('Invalid hash %s' % arg) return None, False def isolated_handle_options(options, args): """Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments. Returns: tuple(command, inputs_ref). """ isolated_cmd_args = [] if not options.isolated: if '--' in args: index = args.index('--') isolated_cmd_args = args[index+1:] args = args[:index] else: # optparse eats '--' sometimes. isolated_cmd_args = args[1:] args = args[:1] if len(args) != 1: raise ValueError( 'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called ' 'process.') # Old code. To be removed eventually. options.isolated, is_file = isolated_to_hash( args[0], isolated_format.get_hash_algo(options.namespace)) if not options.isolated: raise ValueError('Invalid argument %s' % args[0]) elif args: is_file = False if '--' in args: index = args.index('--') isolated_cmd_args = args[index+1:] if index != 0: raise ValueError('Unexpected arguments.') else: # optparse eats '--' sometimes. isolated_cmd_args = args # If a file name was passed, use its base name of the isolated hash. # Otherwise, use user name as an approximation of a task name. if not options.task_name: if is_file: key = os.path.splitext(os.path.basename(args[0]))[0] else: key = options.user options.task_name = u'%s/%s/%s' % ( key, '_'.join( '%s=%s' % (k, v) for k, v in sorted(options.dimensions.iteritems())), options.isolated) inputs_ref = FilesRef( isolated=options.isolated, isolatedserver=options.isolate_server, namespace=options.namespace) return isolated_cmd_args, inputs_ref ### Triggering. # See ../appengine/swarming/swarming_rpcs.py. FilesRef = collections.namedtuple( 'FilesRef', [ 'isolated', 'isolatedserver', 'namespace', ]) # See ../appengine/swarming/swarming_rpcs.py. TaskProperties = collections.namedtuple( 'TaskProperties', [ 'command', 'dimensions', 'env', 'execution_timeout_secs', 'extra_args', 'grace_period_secs', 'idempotent', 'inputs_ref', 'io_timeout_secs', ]) # See ../appengine/swarming/swarming_rpcs.py. NewTaskRequest = collections.namedtuple( 'NewTaskRequest', [ 'expiration_secs', 'name', 'parent_task_id', 'priority', 'properties', 'tags', 'user', ]) def namedtuple_to_dict(value): """Recursively converts a namedtuple to a dict.""" out = dict(value._asdict()) for k, v in out.iteritems(): if hasattr(v, '_asdict'): out[k] = namedtuple_to_dict(v) return out def task_request_to_raw_request(task_request): """Returns the json dict expected by the Swarming server for new request. This is for the v1 client Swarming API. """ out = namedtuple_to_dict(task_request) # Maps are not supported until protobuf v3. out['properties']['dimensions'] = [ {'key': k, 'value': v} for k, v in out['properties']['dimensions'].iteritems() ] out['properties']['dimensions'].sort(key=lambda x: x['key']) out['properties']['env'] = [ {'key': k, 'value': v} for k, v in out['properties']['env'].iteritems() ] out['properties']['env'].sort(key=lambda x: x['key']) return out def swarming_trigger(swarming, raw_request): """Triggers a request on the Swarming server and returns the json data. It's the low-level function. Returns: { 'request': { 'created_ts': u'2010-01-02 03:04:05', 'name': .. }, 'task_id': '12300', } """ logging.info('Triggering: %s', raw_request['name']) result = net.url_read_json( swarming + '/_ah/api/swarming/v1/tasks/new', data=raw_request) if not result: on_error.report('Failed to trigger task %s' % raw_request['name']) return None return result def setup_googletest(env, shards, index): """Sets googletest specific environment variables.""" if shards > 1: assert not any(i['key'] == 'GTEST_SHARD_INDEX' for i in env), env assert not any(i['key'] == 'GTEST_TOTAL_SHARDS' for i in env), env env = env[:] env.append({'key': 'GTEST_SHARD_INDEX', 'value': str(index)}) env.append({'key': 'GTEST_TOTAL_SHARDS', 'value': str(shards)}) return env def trigger_task_shards(swarming, task_request, shards): """Triggers one or many subtasks of a sharded task. Returns: Dict with task details, returned to caller as part of --dump-json output. None in case of failure. """ def convert(index): req = task_request_to_raw_request(task_request) if shards > 1: req['properties']['env'] = setup_googletest( req['properties']['env'], shards, index) req['name'] += ':%s:%s' % (index, shards) return req requests = [convert(index) for index in xrange(shards)] tasks = {} priority_warning = False for index, request in enumerate(requests): task = swarming_trigger(swarming, request) if not task: break logging.info('Request result: %s', task) if (not priority_warning and task['request']['priority'] != task_request.priority): priority_warning = True print >> sys.stderr, ( 'Priority was reset to %s' % task['request']['priority']) tasks[request['name']] = { 'shard_index': index, 'task_id': task['task_id'], 'view_url': '%s/user/task/%s' % (swarming, task['task_id']), } # Some shards weren't triggered. Abort everything. if len(tasks) != len(requests): if tasks: print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % ( len(tasks), len(requests)) for task_dict in tasks.itervalues(): abort_task(swarming, task_dict['task_id']) return None return tasks ### Collection. # How often to print status updates to stdout in 'collect'. STATUS_UPDATE_INTERVAL = 15 * 60. class State(object): """States in which a task can be. WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These values are part of the API so if they change, the API changed. It's in fact an enum. Values should be in decreasing order of importance. """ RUNNING = 0x10 PENDING = 0x20 EXPIRED = 0x30 TIMED_OUT = 0x40 BOT_DIED = 0x50 CANCELED = 0x60 COMPLETED = 0x70 STATES = ( 'RUNNING', 'PENDING', 'EXPIRED', 'TIMED_OUT', 'BOT_DIED', 'CANCELED', 'COMPLETED') STATES_RUNNING = ('RUNNING', 'PENDING') STATES_NOT_RUNNING = ( 'EXPIRED', 'TIMED_OUT', 'BOT_DIED', 'CANCELED', 'COMPLETED') STATES_DONE = ('TIMED_OUT', 'COMPLETED') STATES_ABANDONED = ('EXPIRED', 'BOT_DIED', 'CANCELED') _NAMES = { RUNNING: 'Running', PENDING: 'Pending', EXPIRED: 'Expired', TIMED_OUT: 'Execution timed out', BOT_DIED: 'Bot died', CANCELED: 'User canceled', COMPLETED: 'Completed', } _ENUMS = { 'RUNNING': RUNNING, 'PENDING': PENDING, 'EXPIRED': EXPIRED, 'TIMED_OUT': TIMED_OUT, 'BOT_DIED': BOT_DIED, 'CANCELED': CANCELED, 'COMPLETED': COMPLETED, } @classmethod def to_string(cls, state): """Returns a user-readable string representing a State.""" if state not in cls._NAMES: raise ValueError('Invalid state %s' % state) return cls._NAMES[state] @classmethod def from_enum(cls, state): """Returns int value based on the string.""" if state not in cls._ENUMS: raise ValueError('Invalid state %s' % state) return cls._ENUMS[state] class TaskOutputCollector(object): """Assembles task execution summary (for --task-summary-json output). Optionally fetches task outputs from isolate server to local disk (used when --task-output-dir is passed). This object is shared among multiple threads running 'retrieve_results' function, in particular they call 'process_shard_result' method in parallel. """ def __init__(self, task_output_dir, task_name, shard_count): """Initializes TaskOutputCollector, ensures |task_output_dir| exists. Args: task_output_dir: (optional) local directory to put fetched files to. task_name: name of the swarming task results belong to. shard_count: expected number of task shards. """ self.task_output_dir = task_output_dir self.task_name = task_name self.shard_count = shard_count self._lock = threading.Lock() self._per_shard_results = {} self._storage = None if self.task_output_dir and not os.path.isdir(self.task_output_dir): os.makedirs(self.task_output_dir) def process_shard_result(self, shard_index, result): """Stores results of a single task shard, fetches output files if necessary. Modifies |result| in place. shard_index is 0-based. Called concurrently from multiple threads. """ # Sanity check index is in expected range. assert isinstance(shard_index, int) if shard_index < 0 or shard_index >= self.shard_count: logging.warning( 'Shard index %d is outside of expected range: [0; %d]', shard_index, self.shard_count - 1) return if result.get('outputs_ref'): ref = result['outputs_ref'] result['outputs_ref']['view_url'] = '%s/browse?%s' % ( ref['isolatedserver'], urllib.urlencode( [('namespace', ref['namespace']), ('hash', ref['isolated'])])) # Store result dict of that shard, ignore results we've already seen. with self._lock: if shard_index in self._per_shard_results: logging.warning('Ignoring duplicate shard index %d', shard_index) return self._per_shard_results[shard_index] = result # Fetch output files if necessary. if self.task_output_dir and result.get('outputs_ref'): storage = self._get_storage( result['outputs_ref']['isolatedserver'], result['outputs_ref']['namespace']) if storage: # Output files are supposed to be small and they are not reused across # tasks. So use MemoryCache for them instead of on-disk cache. Make # files writable, so that calling script can delete them. isolateserver.fetch_isolated( result['outputs_ref']['isolated'], storage, isolateserver.MemoryCache(file_mode_mask=0700), os.path.join(self.task_output_dir, str(shard_index)), False) def finalize(self): """Assembles and returns task summary JSON, shutdowns underlying Storage.""" with self._lock: # Write an array of shard results with None for missing shards. summary = { 'shards': [ self._per_shard_results.get(i) for i in xrange(self.shard_count) ], } # Write summary.json to task_output_dir as well. if self.task_output_dir: tools.write_json( os.path.join(self.task_output_dir, 'summary.json'), summary, False) if self._storage: self._storage.close() self._storage = None return summary def _get_storage(self, isolate_server, namespace): """Returns isolateserver.Storage to use to fetch files.""" assert self.task_output_dir with self._lock: if not self._storage: self._storage = isolateserver.get_storage(isolate_server, namespace) else: # Shards must all use exact same isolate server and namespace. if self._storage.location != isolate_server: logging.error( 'Task shards are using multiple isolate servers: %s and %s', self._storage.location, isolate_server) return None if self._storage.namespace != namespace: logging.error( 'Task shards are using multiple namespaces: %s and %s', self._storage.namespace, namespace) return None return self._storage def now(): """Exists so it can be mocked easily.""" return time.time() def parse_time(value): """Converts serialized time from the API to datetime.datetime.""" # When microseconds are 0, the '.123456' suffix is elided. This means the # serialized format is not consistent, which confuses the hell out of python. for fmt in ('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'): try: return datetime.datetime.strptime(value, fmt) except ValueError: pass raise ValueError('Failed to parse %s' % value) def retrieve_results( base_url, shard_index, task_id, timeout, should_stop, output_collector): """Retrieves results for a single task ID. Returns: <result dict> on success. None on failure. """ assert isinstance(timeout, float), timeout result_url = '%s/_ah/api/swarming/v1/task/%s/result' % (base_url, task_id) output_url = '%s/_ah/api/swarming/v1/task/%s/stdout' % (base_url, task_id) started = now() deadline = started + timeout if timeout else None attempt = 0 while not should_stop.is_set(): attempt += 1 # Waiting for too long -> give up. current_time = now() if deadline and current_time >= deadline: logging.error('retrieve_results(%s) timed out on attempt %d', base_url, attempt) return None # Do not spin too fast. Spin faster at the beginning though. # Start with 1 sec delay and for each 30 sec of waiting add another second # of delay, until hitting 15 sec ceiling. if attempt > 1: max_delay = min(15, 1 + (current_time - started) / 30.0) delay = min(max_delay, deadline - current_time) if deadline else max_delay if delay > 0: logging.debug('Waiting %.1f sec before retrying', delay) should_stop.wait(delay) if should_stop.is_set(): return None # Disable internal retries in net.url_read_json, since we are doing retries # ourselves. # TODO(maruel): We'd need to know if it's a 404 and not retry at all. result = net.url_read_json(result_url, retry_50x=False) if not result: continue if result['state'] in State.STATES_NOT_RUNNING: # TODO(maruel): Not always fetch stdout? out = net.url_read_json(output_url) result['output'] = out.get('output') if out else out if not result['output']: logging.error('No output found for task %s', task_id) # Record the result, try to fetch attached output files (if any). if output_collector: # TODO(vadimsh): Respect |should_stop| and |deadline| when fetching. output_collector.process_shard_result(shard_index, result) if result.get('internal_failure'): logging.error('Internal error!') elif result['state'] == 'BOT_DIED': logging.error('Bot died!') return result def convert_to_old_format(result): """Converts the task result data from Endpoints API format to old API format for compatibility. This goes into the file generated as --task-summary-json. """ # Sets default. result.setdefault('abandoned_ts', None) result.setdefault('bot_id', None) result.setdefault('bot_version', None) result.setdefault('children_task_ids', []) result.setdefault('completed_ts', None) result.setdefault('cost_saved_usd', None) result.setdefault('costs_usd', None) result.setdefault('deduped_from', None) result.setdefault('name', None) result.setdefault('outputs_ref', None) result.setdefault('properties_hash', None) result.setdefault('server_versions', None) result.setdefault('started_ts', None) result.setdefault('tags', None) result.setdefault('user', None) # Convertion back to old API. duration = result.pop('duration', None) result['durations'] = [duration] if duration else [] exit_code = result.pop('exit_code', None) result['exit_codes'] = [int(exit_code)] if exit_code else [] result['id'] = result.pop('task_id') result['isolated_out'] = result.get('outputs_ref', None) output = result.pop('output', None) result['outputs'] = [output] if output else [] # properties_hash # server_version # Endpoints result 'state' as string. For compatibility with old code, convert # to int. result['state'] = State.from_enum(result['state']) result['try_number'] = ( int(result['try_number']) if result.get('try_number') else None) if 'bot_dimensions' in result: result['bot_dimensions'] = { i['key']: i['value'] for i in result['bot_dimensions'] } else: result['bot_dimensions'] = None def yield_results( swarm_base_url, task_ids, timeout, max_threads, print_status_updates, output_collector): """Yields swarming task results from the swarming server as (index, result). Duplicate shards are ignored. Shards are yielded in order of completion. Timed out shards are NOT yielded at all. Caller can compare number of yielded shards with len(task_keys) to verify all shards completed. max_threads is optional and is used to limit the number of parallel fetches done. Since in general the number of task_keys is in the range <=10, it's not worth normally to limit the number threads. Mostly used for testing purposes. output_collector is an optional instance of TaskOutputCollector that will be used to fetch files produced by a task from isolate server to the local disk. Yields: (index, result). In particular, 'result' is defined as the GetRunnerResults() function in services/swarming/server/test_runner.py. """ number_threads = ( min(max_threads, len(task_ids)) if max_threads else len(task_ids)) should_stop = threading.Event() results_channel = threading_utils.TaskChannel() with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool: try: # Adds a task to the thread pool to call 'retrieve_results' and return # the results together with shard_index that produced them (as a tuple). def enqueue_retrieve_results(shard_index, task_id): task_fn = lambda *args: (shard_index, retrieve_results(*args)) pool.add_task( 0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index, task_id, timeout, should_stop, output_collector) # Enqueue 'retrieve_results' calls for each shard key to run in parallel. for shard_index, task_id in enumerate(task_ids): enqueue_retrieve_results(shard_index, task_id) # Wait for all of them to finish. shards_remaining = range(len(task_ids)) active_task_count = len(task_ids) while active_task_count: shard_index, result = None, None try: shard_index, result = results_channel.pull( timeout=STATUS_UPDATE_INTERVAL) except threading_utils.TaskChannel.Timeout: if print_status_updates: print( 'Waiting for results from the following shards: %s' % ', '.join(map(str, shards_remaining))) sys.stdout.flush() continue except Exception: logging.exception('Unexpected exception in retrieve_results') # A call to 'retrieve_results' finished (successfully or not). active_task_count -= 1 if not result: logging.error('Failed to retrieve the results for a swarming key') continue # Yield back results to the caller. assert shard_index in shards_remaining shards_remaining.remove(shard_index) yield shard_index, result finally: # Done or aborted with Ctrl+C, kill the remaining threads. should_stop.set() def decorate_shard_output(swarming, shard_index, metadata): """Returns wrapped output for swarming task shard.""" if metadata.get('started_ts') and not metadata.get('deduped_from'): pending = '%.1fs' % ( parse_time(metadata['started_ts']) - parse_time(metadata['created_ts']) ).total_seconds() else: pending = 'N/A' if metadata.get('duration') is not None: duration = '%.1fs' % metadata['duration'] else: duration = 'N/A' if metadata.get('exit_code') is not None: # Integers are encoded as string to not loose precision. exit_code = '%s' % metadata['exit_code'] else: exit_code = 'N/A' bot_id = metadata.get('bot_id') or 'N/A' url = '%s/user/task/%s' % (swarming, metadata['task_id']) tag_header = 'Shard %d %s' % (shard_index, url) tag_footer = ( 'End of shard %d Pending: %s Duration: %s Bot: %s Exit: %s' % ( shard_index, pending, duration, bot_id, exit_code)) tag_len = max(len(tag_header), len(tag_footer)) dash_pad = '+-%s-+\n' % ('-' * tag_len) tag_header = '| %s |\n' % tag_header.ljust(tag_len) tag_footer = '| %s |\n' % tag_footer.ljust(tag_len) header = dash_pad + tag_header + dash_pad footer = dash_pad + tag_footer + dash_pad[:-1] output = (metadata.get('output') or '').rstrip() + '\n' return header + output + footer def collect( swarming, task_name, task_ids, timeout, decorate, print_status_updates, task_summary_json, task_output_dir): """Retrieves results of a Swarming task. Returns: process exit code that should be returned to the user. """ # Collect summary JSON and output files (if task_output_dir is not None). output_collector = TaskOutputCollector( task_output_dir, task_name, len(task_ids)) seen_shards = set() exit_code = None total_duration = 0 try: for index, metadata in yield_results( swarming, task_ids, timeout, None, print_status_updates, output_collector): seen_shards.add(index) # Default to failure if there was no process that even started. shard_exit_code = metadata.get('exit_code') if shard_exit_code: # It's encoded as a string, so bool('0') is True. shard_exit_code = int(shard_exit_code) if shard_exit_code or exit_code is None: exit_code = shard_exit_code total_duration += metadata.get('duration', 0) if decorate: print(decorate_shard_output(swarming, index, metadata)) if len(seen_shards) < len(task_ids): print('') else: print('%s: %s %s' % ( metadata.get('bot_id', 'N/A'), metadata['task_id'], shard_exit_code)) if metadata['output']: output = metadata['output'].rstrip() if output: print(''.join(' %s\n' % l for l in output.splitlines())) finally: summary = output_collector.finalize() if task_summary_json: # TODO(maruel): Make this optional. for i in summary['shards']: if i: convert_to_old_format(i) tools.write_json(task_summary_json, summary, False) if decorate and total_duration: print('Total duration: %.1fs' % total_duration) if len(seen_shards) != len(task_ids): missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards] print >> sys.stderr, ('Results from some shards are missing: %s' % ', '.join(map(str, missing_shards))) return 1 return exit_code if exit_code is not None else 1 ### API management. class APIError(Exception): pass def endpoints_api_discovery_apis(host): """Uses Cloud Endpoints' API Discovery Service to returns metadata about all the APIs exposed by a host. https://developers.google.com/discovery/v1/reference/apis/list """ data = net.url_read_json(host + '/_ah/api/discovery/v1/apis') if data is None: raise APIError('Failed to discover APIs on %s' % host) out = {} for api in data['items']: if api['id'] == 'discovery:v1': continue # URL is of the following form: # url = host + ( # '/_ah/api/discovery/v1/apis/%s/%s/rest' % (api['id'], api['version']) api_data = net.url_read_json(api['discoveryRestUrl']) if api_data is None: raise APIError('Failed to discover %s on %s' % (api['id'], host)) out[api['id']] = api_data return out ### Commands. def abort_task(_swarming, _manifest): """Given a task manifest that was triggered, aborts its execution.""" # TODO(vadimsh): No supported by the server yet. def add_filter_options(parser): parser.filter_group = optparse.OptionGroup(parser, 'Filtering slaves') parser.filter_group.add_option( '-d', '--dimension', default=[], action='append', nargs=2, dest='dimensions', metavar='FOO bar', help='dimension to filter on') parser.add_option_group(parser.filter_group) def add_sharding_options(parser): parser.sharding_group = optparse.OptionGroup(parser, 'Sharding options') parser.sharding_group.add_option( '--shards', type='int', default=1, help='Number of shards to trigger and collect.') parser.add_option_group(parser.sharding_group) def add_trigger_options(parser): """Adds all options to trigger a task on Swarming.""" isolateserver.add_isolate_server_options(parser) add_filter_options(parser) parser.task_group = optparse.OptionGroup(parser, 'Task properties') parser.task_group.add_option( '-s', '--isolated', help='Hash of the .isolated to grab from the isolate server') parser.task_group.add_option( '-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar', help='Environment variables to set') parser.task_group.add_option( '--priority', type='int', default=100, help='The lower value, the more important the task is') parser.task_group.add_option( '-T', '--task-name', help='Display name of the task. Defaults to ' '<base_name>/<dimensions>/<isolated hash>/<timestamp> if an ' 'isolated file is provided, if a hash is provided, it defaults to ' '<user>/<dimensions>/<isolated hash>/<timestamp>') parser.task_group.add_option( '--tags', action='append', default=[], help='Tags to assign to the task.') parser.task_group.add_option( '--user', default='', help='User associated with the task. Defaults to authenticated user on ' 'the server.') parser.task_group.add_option( '--idempotent', action='store_true', default=False, help='When set, the server will actively try to find a previous task ' 'with the same parameter and return this result instead if possible') parser.task_group.add_option( '--expiration', type='int', default=6*60*60, help='Seconds to allow the task to be pending for a bot to run before ' 'this task request expires.') parser.task_group.add_option( '--deadline', type='int', dest='expiration', help=optparse.SUPPRESS_HELP) parser.task_group.add_option( '--hard-timeout', type='int', default=60*60, help='Seconds to allow the task to complete.') parser.task_group.add_option( '--io-timeout', type='int', default=20*60, help='Seconds to allow the task to be silent.') parser.task_group.add_option( '--raw-cmd', action='store_true', default=False, help='When set, the command after -- is used as-is without run_isolated. ' 'In this case, no .isolated file is expected.') parser.add_option_group(parser.task_group) def process_trigger_options(parser, options, args): """Processes trigger options and uploads files to isolate server if necessary. """ options.dimensions = dict(options.dimensions) options.env = dict(options.env) if not options.dimensions: parser.error('Please at least specify one --dimension') if options.raw_cmd: if not args: parser.error( 'Arguments with --raw-cmd should be passed after -- as command ' 'delimiter.') if options.isolate_server: parser.error('Can\'t use both --raw-cmd and --isolate-server.') command = args if not options.task_name: options.task_name = u'%s/%s' % ( options.user, '_'.join( '%s=%s' % (k, v) for k, v in sorted(options.dimensions.iteritems()))) inputs_ref = None else: isolateserver.process_isolate_server_options(parser, options, False) try: command, inputs_ref = isolated_handle_options(options, args) except ValueError as e: parser.error(str(e)) # If inputs_ref is used, command is actually extra_args. Otherwise it's an # actual command to run. properties = TaskProperties( command=None if inputs_ref else command, dimensions=options.dimensions, env=options.env, execution_timeout_secs=options.hard_timeout, extra_args=command if inputs_ref else None, grace_period_secs=30, idempotent=options.idempotent, inputs_ref=inputs_ref, io_timeout_secs=options.io_timeout) return NewTaskRequest( expiration_secs=options.expiration, name=options.task_name, parent_task_id=os.environ.get('SWARMING_TASK_ID', ''), priority=options.priority, properties=properties, tags=options.tags, user=options.user) def add_collect_options(parser): parser.server_group.add_option( '-t', '--timeout', type='float', default=80*60., help='Timeout to wait for result, set to 0 for no timeout; default: ' '%default s') parser.group_logging.add_option( '--decorate', action='store_true', help='Decorate output') parser.group_logging.add_option( '--print-status-updates', action='store_true', help='Print periodic status updates') parser.task_output_group = optparse.OptionGroup(parser, 'Task output') parser.task_output_group.add_option( '--task-summary-json', metavar='FILE', help='Dump a summary of task results to this file as json. It contains ' 'only shards statuses as know to server directly. Any output files ' 'emitted by the task can be collected by using --task-output-dir') parser.task_output_group.add_option( '--task-output-dir', metavar='DIR', help='Directory to put task results into. When the task finishes, this ' 'directory contains per-shard directory with output files produced ' 'by shards: <task-output-dir>/<zero-based-shard-index>/.') parser.add_option_group(parser.task_output_group) @subcommand.usage('bots...') def CMDbot_delete(parser, args): """Forcibly deletes bots from the Swarming server.""" parser.add_option( '-f', '--force', action='store_true', help='Do not prompt for confirmation') options, args = parser.parse_args(args) if not args: parser.error('Please specific bots to delete') bots = sorted(args) if not options.force: print('Delete the following bots?') for bot in bots: print(' %s' % bot) if raw_input('Continue? [y/N] ') not in ('y', 'Y'): print('Goodbye.') return 1 result = 0 for bot in bots: url = '%s/_ah/api/swarming/v1/bot/%s/delete' % (options.swarming, bot) if net.url_read_json(url, data={}, method='POST') is None: print('Deleting %s failed. Probably already gone' % bot) result = 1 return result def CMDbots(parser, args): """Returns information about the bots connected to the Swarming server.""" add_filter_options(parser) parser.filter_group.add_option( '--dead-only', action='store_true', help='Only print dead bots, useful to reap them and reimage broken bots') parser.filter_group.add_option( '-k', '--keep-dead', action='store_true', help='Do not filter out dead bots') parser.filter_group.add_option( '-b', '--bare', action='store_true', help='Do not print out dimensions') options, args = parser.parse_args(args) if options.keep_dead and options.dead_only: parser.error('Use only one of --keep-dead and --dead-only') bots = [] cursor = None limit = 250 # Iterate via cursors. base_url = ( options.swarming + '/_ah/api/swarming/v1/bots/list?limit=%d' % limit) while True: url = base_url if cursor: url += '&cursor=%s' % urllib.quote(cursor) data = net.url_read_json(url) if data is None: print >> sys.stderr, 'Failed to access %s' % options.swarming return 1 bots.extend(data['items']) cursor = data.get('cursor') if not cursor: break for bot in natsort.natsorted(bots, key=lambda x: x['bot_id']): if options.dead_only: if not bot.get('is_dead'): continue elif not options.keep_dead and bot.get('is_dead'): continue # If the user requested to filter on dimensions, ensure the bot has all the # dimensions requested. dimensions = {i['key']: i['value'] for i in bot['dimensions']} for key, value in options.dimensions: if key not in dimensions: break # A bot can have multiple value for a key, for example, # {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will # be accepted. if isinstance(dimensions[key], list): if value not in dimensions[key]: break else: if value != dimensions[key]: break else: print bot['bot_id'] if not options.bare: print ' %s' % json.dumps(dimensions, sort_keys=True) if bot.get('task_id'): print ' task: %s' % bot['task_id'] return 0 @subcommand.usage('--json file | task_id...') def CMDcollect(parser, args): """Retrieves results of one or multiple Swarming task by its ID. The result can be in multiple part if the execution was sharded. It can potentially have retries. """ add_collect_options(parser) parser.add_option( '-j', '--json', help='Load the task ids from .json as saved by trigger --dump-json') options, args = parser.parse_args(args) if not args and not options.json: parser.error('Must specify at least one task id or --json.') if args and options.json: parser.error('Only use one of task id or --json.') if options.json: try: with open(options.json) as f: tasks = sorted( json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index']) args = [t['task_id'] for t in tasks] except (KeyError, IOError, TypeError, ValueError): parser.error('Failed to parse %s' % options.json) else: valid = frozenset('0123456789abcdef') if any(not valid.issuperset(task_id) for task_id in args): parser.error('Task ids are 0-9a-f.') try: return collect( options.swarming, None, args, options.timeout, options.decorate, options.print_status_updates, options.task_summary_json, options.task_output_dir) except Failure: on_error.report(None) return 1 @subcommand.usage('[filename]') def CMDput_bootstrap(parser, args): """Uploads a new version of bootstrap.py.""" options, args = parser.parse_args(args) if len(args) != 1: parser.error('Must specify file to upload') url = options.swarming + '/_ah/api/swarming/v1/server/put_bootstrap' with open(args[0], 'rb') as f: content = f.read().decode('utf-8') data = net.url_read_json(url, data={'content': content}) print data return 0 @subcommand.usage('[filename]') def CMDput_bot_config(parser, args): """Uploads a new version of bot_config.py.""" options, args = parser.parse_args(args) if len(args) != 1: parser.error('Must specify file to upload') url = options.swarming + '/_ah/api/swarming/v1/server/put_bot_config' with open(args[0], 'rb') as f: content = f.read().decode('utf-8') data = net.url_read_json(url, data={'content': content}) print data return 0 @subcommand.usage('[method name]') def CMDquery(parser, args): """Returns raw JSON information via an URL endpoint. Use 'query-list' to gather the list of API methods from the server. Examples: Listing all bots: swarming.py query -S https://server-url bots/list Listing last 10 tasks on a specific bot named 'swarm1': swarming.py query -S https://server-url --limit 10 bot/swarm1/tasks """ CHUNK_SIZE = 250 parser.add_option( '-L', '--limit', type='int', default=200, help='Limit to enforce on limitless items (like number of tasks); ' 'default=%default') parser.add_option( '--json', help='Path to JSON output file (otherwise prints to stdout)') parser.add_option( '--progress', action='store_true', help='Prints a dot at each request to show progress') options, args = parser.parse_args(args) if len(args) != 1: parser.error( 'Must specify only method name and optionally query args properly ' 'escaped.') base_url = options.swarming + '/_ah/api/swarming/v1/' + args[0] url = base_url if options.limit: # Check check, change if not working out. merge_char = '&' if '?' in url else '?' url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit)) data = net.url_read_json(url) if data is None: # TODO(maruel): Do basic diagnostic. print >> sys.stderr, 'Failed to access %s' % url return 1 # Some items support cursors. Try to get automatically if cursors are needed # by looking at the 'cursor' items. while ( data.get('cursor') and (not options.limit or len(data['items']) < options.limit)): merge_char = '&' if '?' in base_url else '?' url = base_url + '%scursor=%s' % (merge_char, urllib.quote(data['cursor'])) if options.limit: url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items'])) if options.progress: sys.stdout.write('.') sys.stdout.flush() new = net.url_read_json(url) if new is None: if options.progress: print('') print >> sys.stderr, 'Failed to access %s' % options.swarming return 1 data['items'].extend(new['items']) data['cursor'] = new.get('cursor') if options.progress: print('') if options.limit and len(data.get('items', [])) > options.limit: data['items'] = data['items'][:options.limit] data.pop('cursor', None) if options.json: tools.write_json(options.json, data, True) else: try: tools.write_json(sys.stdout, data, False) sys.stdout.write('\n') except IOError: pass return 0 def CMDquery_list(parser, args): """Returns list of all the Swarming APIs that can be used with command 'query'. """ parser.add_option( '--json', help='Path to JSON output file (otherwise prints to stdout)') options, args = parser.parse_args(args) if args: parser.error('No argument allowed.') try: apis = endpoints_api_discovery_apis(options.swarming) except APIError as e: parser.error(str(e)) if options.json: with open(options.json, 'wb') as f: json.dump(apis, f) else: help_url = ( 'https://apis-explorer.appspot.com/apis-explorer/?base=%s/_ah/api#p/' % options.swarming) for api_id, api in sorted(apis.iteritems()): print api_id print ' ' + api['description'] for resource_name, resource in sorted(api['resources'].iteritems()): print '' for method_name, method in sorted(resource['methods'].iteritems()): # Only list the GET ones. if method['httpMethod'] != 'GET': continue print '- %s.%s: %s' % ( resource_name, method_name, method['path']) print ' ' + method['description'] print ' %s%s%s' % (help_url, api['servicePath'], method['id']) return 0 @subcommand.usage('(hash|isolated) [-- extra_args]') def CMDrun(parser, args): """Triggers a task and wait for the results. Basically, does everything to run a command remotely. """ add_trigger_options(parser) add_collect_options(parser) add_sharding_options(parser) options, args = parser.parse_args(args) task_request = process_trigger_options(parser, options, args) try: tasks = trigger_task_shards( options.swarming, task_request, options.shards) except Failure as e: on_error.report( 'Failed to trigger %s(%s): %s' % (options.task_name, args[0], e.args[0])) return 1 if not tasks: on_error.report('Failed to trigger the task.') return 1 print('Triggered task: %s' % options.task_name) task_ids = [ t['task_id'] for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index']) ] try: return collect( options.swarming, options.task_name, task_ids, options.timeout, options.decorate, options.print_status_updates, options.task_summary_json, options.task_output_dir) except Failure: on_error.report(None) return 1 @subcommand.usage('task_id') def CMDreproduce(parser, args): """Runs a task locally that was triggered on the server. This running locally the same commands that have been run on the bot. The data downloaded will be in a subdirectory named 'work' of the current working directory. """ options, args = parser.parse_args(args) if len(args) != 1: parser.error('Must specify exactly one task id.') url = options.swarming + '/_ah/api/swarming/v1/task/%s/request' % args[0] request = net.url_read_json(url) if not request: print >> sys.stderr, 'Failed to retrieve request data for the task' return 1 if not os.path.isdir('work'): os.mkdir('work') properties = request['properties'] env = None if properties['env']: env = os.environ.copy() logging.info('env: %r', properties['env']) env.update( (i['key'].encode('utf-8'), i['value'].encode('utf-8')) for i in properties['env']) try: return subprocess.call(properties['command'], env=env, cwd='work') except OSError as e: print >> sys.stderr, 'Failed to run: %s' % ' '.join(properties['command']) print >> sys.stderr, str(e) return 1 @subcommand.usage("(hash|isolated) [-- extra_args|raw command]") def CMDtrigger(parser, args): """Triggers a Swarming task. Accepts either the hash (sha1) of a .isolated file already uploaded or the path to an .isolated file to archive. If an .isolated file is specified instead of an hash, it is first archived. Passes all extra arguments provided after '--' as additional command line arguments for an isolated command specified in *.isolate file. """ add_trigger_options(parser) add_sharding_options(parser) parser.add_option( '--dump-json', metavar='FILE', help='Dump details about the triggered task(s) to this file as json') options, args = parser.parse_args(args) task_request = process_trigger_options(parser, options, args) try: tasks = trigger_task_shards( options.swarming, task_request, options.shards) if tasks: print('Triggered task: %s' % options.task_name) tasks_sorted = sorted( tasks.itervalues(), key=lambda x: x['shard_index']) if options.dump_json: data = { 'base_task_name': options.task_name, 'tasks': tasks, } tools.write_json(options.dump_json, data, True) print('To collect results, use:') print(' swarming.py collect -S %s --json %s' % (options.swarming, options.dump_json)) else: print('To collect results, use:') print(' swarming.py collect -S %s %s' % (options.swarming, ' '.join(t['task_id'] for t in tasks_sorted))) print('Or visit:') for t in tasks_sorted: print(' ' + t['view_url']) return int(not tasks) except Failure: on_error.report(None) return 1 class OptionParserSwarming(logging_utils.OptionParserWithLogging): def __init__(self, **kwargs): logging_utils.OptionParserWithLogging.__init__( self, prog='swarming.py', **kwargs) self.server_group = optparse.OptionGroup(self, 'Server') self.server_group.add_option( '-S', '--swarming', metavar='URL', default=os.environ.get('SWARMING_SERVER', ''), help='Swarming server to use') self.add_option_group(self.server_group) auth.add_auth_options(self) def parse_args(self, *args, **kwargs): options, args = logging_utils.OptionParserWithLogging.parse_args( self, *args, **kwargs) auth.process_auth_options(self, options) user = self._process_swarming(options) if hasattr(options, 'user') and not options.user: options.user = user return options, args def _process_swarming(self, options): """Processes the --swarming option and aborts if not specified. Returns the identity as determined by the server. """ if not options.swarming: self.error('--swarming is required.') try: options.swarming = net.fix_url(options.swarming) except ValueError as e: self.error('--swarming %s' % e) on_error.report_on_exception_exit(options.swarming) try: user = auth.ensure_logged_in(options.swarming) except ValueError as e: self.error(str(e)) return user def main(args): dispatcher = subcommand.CommandDispatcher(__name__) return dispatcher.execute(OptionParserSwarming(version=__version__), args) if __name__ == '__main__': fix_encoding.fix_encoding() tools.disable_buffering() colorama.init() sys.exit(main(sys.argv[1:]))
mit
freakynit/kaggle-ndsb
configurations/featharalick_convroll5_preinit_resume_drop@420.py
6
2821
import numpy as np import theano import theano.tensor as T import lasagne as nn import data import load import nn_plankton import dihedral import tmp_dnn import tta features = [ # "hu", # "tutorial", "haralick", # "aaronmoments", # "lbp", # "pftas", # "zernike_moments", # "image_size", ] batch_size = 128 chunk_size = 32768 num_chunks_train = 240 momentum = 0.9 learning_rate_schedule = { 0: 0.001, 100: 0.0001, 200: 0.00001, } validate_every = 40 save_every = 40 sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/" train_pred_file = sdir+"train--convroll5_preinit_resume_drop@420--convroll5_preinit_resume_drop@420-schaap-20150225-131159--avg-probs.npy" valid_pred_file = sdir+"valid--convroll5_preinit_resume_drop@420--convroll5_preinit_resume_drop@420-schaap-20150225-131159--avg-probs.npy" test_pred_file = sdir+"test--convroll5_preinit_resume_drop@420--convroll5_preinit_resume_drop@420-schaap-20150225-131159--avg-probs.npy" data_loader = load.PredictionsWithFeaturesDataLoader( features = features, train_pred_file=train_pred_file, valid_pred_file=valid_pred_file, test_pred_file=test_pred_file, num_chunks_train=num_chunks_train, chunk_size=chunk_size) create_train_gen = lambda: data_loader.create_random_gen() create_eval_train_gen = lambda: data_loader.create_fixed_gen("train") create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid") create_eval_test_gen = lambda: data_loader.create_fixed_gen("test") def build_model(): l0 = nn.layers.InputLayer((batch_size, data.num_classes)) l0_size = nn.layers.InputLayer((batch_size, 52)) l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1)) l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1)) l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None) l1 = nn_plankton.NonlinLayer(l0, T.log) ltot = nn.layers.ElemwiseSumLayer([l1, l3_size]) # norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x") lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax) return [l0, l0_size], lout def build_objective(l_ins, l_out): reg_param = 0.0002 alpha = 0. # 0 -> L2 1-> L1 print "regu", reg_param, alpha # lambda_reg = 0.005 params = nn.layers.get_all_non_bias_params(l_out) # reg_term = sum(T.sum(p**2) for p in params) L2 = sum(T.sum(p**2) for p in params) L1 = sum(T.sum(T.abs_(p)) for p in params) def loss(y, t): return nn_plankton.log_loss(y, t) + reg_param*(alpha * L1 + (1-alpha) * L2) return nn.objectives.Objective(l_out, loss_function=loss)
mit
lgeiger/ide-python
lib/debugger/VendorLib/vs-py-debugger/pythonFiles/jedi/common/context.py
12
1798
class BaseContext(object): def __init__(self, evaluator, parent_context=None): self.evaluator = evaluator self.parent_context = parent_context def get_root_context(self): context = self while True: if context.parent_context is None: return context context = context.parent_context class BaseContextSet(object): def __init__(self, *args): self._set = set(args) @classmethod def from_iterable(cls, iterable): return cls.from_set(set(iterable)) @classmethod def from_set(cls, set_): self = cls() self._set = set_ return self @classmethod def from_sets(cls, sets): """ Used to work with an iterable of set. """ aggregated = set() sets = list(sets) for set_ in sets: if isinstance(set_, BaseContextSet): aggregated |= set_._set else: aggregated |= set_ return cls.from_set(aggregated) def __or__(self, other): return type(self).from_set(self._set | other._set) def __iter__(self): for element in self._set: yield element def __bool__(self): return bool(self._set) def __len__(self): return len(self._set) def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join(str(s) for s in self._set)) def filter(self, filter_func): return type(self).from_iterable(filter(filter_func, self._set)) def __getattr__(self, name): def mapper(*args, **kwargs): return type(self).from_sets( getattr(context, name)(*args, **kwargs) for context in self._set ) return mapper
mit
labcodes/django
django/contrib/gis/gdal/prototypes/ds.py
189
4413
""" This module houses the ctypes function prototypes for OGR DataSource related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*, OGR_Fld_* routines are relevant here. """ from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_void_p from django.contrib.gis.gdal.envelope import OGREnvelope from django.contrib.gis.gdal.libgdal import GDAL_VERSION, lgdal from django.contrib.gis.gdal.prototypes.generation import ( const_string_output, double_output, geom_output, int64_output, int_output, srs_output, void_output, voidptr_output, ) c_int_p = POINTER(c_int) # shortcut type # Driver Routines register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False) cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False) get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int]) get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p], errcheck=False) get_driver_count = int_output(lgdal.OGRGetDriverCount, []) get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p], decoding='ascii') # DataSource open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)]) destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False) release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p]) get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p]) get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int]) get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p]) get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p]) # Layer Routines get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int]) get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long]) get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int]) get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p]) get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p]) get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p]) reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False) test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p]) get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p]) set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False) set_spatial_filter_rect = void_output( lgdal.OGR_L_SetSpatialFilterRect, [c_void_p, c_double, c_double, c_double, c_double], errcheck=False ) # Feature Definition Routines get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p]) get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p]) get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p]) get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p]) get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int]) # Feature Routines clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p]) destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False) feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p]) get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p]) get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p]) get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int]) get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p]) get_field_as_datetime = int_output( lgdal.OGR_F_GetFieldAsDateTime, [c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p] ) get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int]) get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int]) if GDAL_VERSION >= (2, 0): get_field_as_integer64 = int64_output(lgdal.OGR_F_GetFieldAsInteger64, [c_void_p, c_int]) get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int]) get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p]) # Field Routines get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p]) get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p]) get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p]) get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int]) get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
bsd-3-clause
klonage/nlt-gcs
packages/IronPython.StdLib.2.7.4/content/Lib/htmlentitydefs.py
65
18327
"""HTML character entity references.""" # maps the HTML entity name to the Unicode codepoint name2codepoint = { 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1 'Alpha': 0x0391, # greek capital letter alpha, U+0391 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1 'Beta': 0x0392, # greek capital letter beta, U+0392 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1 'Chi': 0x03a7, # greek capital letter chi, U+03A7 'Dagger': 0x2021, # double dagger, U+2021 ISOpub 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395 'Eta': 0x0397, # greek capital letter eta, U+0397 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1 'Iota': 0x0399, # greek capital letter iota, U+0399 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1 'Kappa': 0x039a, # greek capital letter kappa, U+039A 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3 'Mu': 0x039c, # greek capital letter mu, U+039C 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1 'Nu': 0x039d, # greek capital letter nu, U+039D 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3 'Omicron': 0x039f, # greek capital letter omicron, U+039F 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3 'Rho': 0x03a1, # greek capital letter rho, U+03A1 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1 'Tau': 0x03a4, # greek capital letter tau, U+03A4 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2 'Zeta': 0x0396, # greek capital letter zeta, U+0396 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3 'amp': 0x0026, # ampersand, U+0026 ISOnum 'and': 0x2227, # logical and = wedge, U+2227 ISOtech 'ang': 0x2220, # angle, U+2220 ISOamso 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub 'cap': 0x2229, # intersection = cap, U+2229 ISOtech 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia 'cent': 0x00a2, # cent sign, U+00A2 ISOnum 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub 'cong': 0x2245, # approximately equal to, U+2245 ISOtech 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW 'cup': 0x222a, # union = cup, U+222A ISOtech 'curren': 0x00a4, # currency sign, U+00A4 ISOnum 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa 'dagger': 0x2020, # dagger, U+2020 ISOpub 'darr': 0x2193, # downwards arrow, U+2193 ISOnum 'deg': 0x00b0, # degree sign, U+00B0 ISOnum 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3 'diams': 0x2666, # black diamond suit, U+2666 ISOpub 'divide': 0x00f7, # division sign, U+00F7 ISOnum 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso 'emsp': 0x2003, # em space, U+2003 ISOpub 'ensp': 0x2002, # en space, U+2002 ISOpub 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3 'equiv': 0x2261, # identical to, U+2261 ISOtech 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1 'euro': 0x20ac, # euro sign, U+20AC NEW 'exist': 0x2203, # there exists, U+2203 ISOtech 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech 'forall': 0x2200, # for all, U+2200 ISOtech 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum 'frasl': 0x2044, # fraction slash, U+2044 NEW 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech 'gt': 0x003e, # greater-than sign, U+003E ISOnum 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa 'harr': 0x2194, # left right arrow, U+2194 ISOamsa 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso 'infin': 0x221e, # infinity, U+221E ISOtech 'int': 0x222b, # integral, U+222B ISOtech 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum 'isin': 0x2208, # element of, U+2208 ISOtech 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum 'le': 0x2264, # less-than or equal to, U+2264 ISOtech 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech 'loz': 0x25ca, # lozenge, U+25CA ISOpub 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum 'lt': 0x003c, # less-than sign, U+003C ISOnum 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia 'mdash': 0x2014, # em dash, U+2014 ISOpub 'micro': 0x00b5, # micro sign, U+00B5 ISOnum 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum 'minus': 0x2212, # minus sign, U+2212 ISOtech 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum 'ndash': 0x2013, # en dash, U+2013 ISOpub 'ne': 0x2260, # not equal to, U+2260 ISOtech 'ni': 0x220b, # contains as member, U+220B ISOtech 'not': 0x00ac, # not sign, U+00AC ISOnum 'notin': 0x2209, # not an element of, U+2209 ISOtech 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1 'oline': 0x203e, # overline = spacing overscore, U+203E NEW 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb 'or': 0x2228, # logical or = vee, U+2228 ISOtech 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum 'part': 0x2202, # partial differential, U+2202 ISOtech 'permil': 0x2030, # per mille sign, U+2030 ISOtech 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum 'pound': 0x00a3, # pound sign, U+00A3 ISOnum 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb 'prop': 0x221d, # proportional to, U+221D ISOtech 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech 'radic': 0x221a, # square root = radical sign, U+221A ISOtech 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum 'rfloor': 0x230b, # right floor, U+230B ISOamsc 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb 'sect': 0x00a7, # section sign, U+00A7 ISOnum 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech 'spades': 0x2660, # black spade suit, U+2660 ISOpub 'sub': 0x2282, # subset of, U+2282 ISOtech 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech 'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb 'sup': 0x2283, # superset of, U+2283 ISOtech 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3 'there4': 0x2234, # therefore, U+2234 ISOtech 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW 'thinsp': 0x2009, # thin space, U+2009 ISOpub 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1 'tilde': 0x02dc, # small tilde, U+02DC ISOdia 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum 'trade': 0x2122, # trade mark sign, U+2122 ISOnum 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070 } # maps the Unicode codepoint to the HTML entity name codepoint2name = {} # maps the HTML entity name to the character # (or a character reference if the character is outside the Latin-1 range) entitydefs = {} for (name, codepoint) in name2codepoint.iteritems(): codepoint2name[codepoint] = name if codepoint <= 0xff: entitydefs[name] = chr(codepoint) else: entitydefs[name] = '&#%d;' % codepoint del name, codepoint
gpl-3.0
zephyrplugins/zephyr
zephyr.plugin.jython/jython2.5.2rc3/Lib/test/test_datetime.py
17
131537
"""Test date/time type. See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases """ import os import sys import pickle import cPickle import unittest from test import test_support from datetime import MINYEAR, MAXYEAR from datetime import timedelta from datetime import tzinfo from datetime import time from datetime import date, datetime pickle_choices = [(pickler, unpickler, proto) for pickler in pickle, cPickle for unpickler in pickle, cPickle for proto in range(3)] assert len(pickle_choices) == 2*2*3 # An arbitrary collection of objects of non-datetime types, for testing # mixed-type comparisons. OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ()) ############################################################################# # module tests class TestModule(unittest.TestCase): def test_constants(self): import datetime self.assertEqual(datetime.MINYEAR, 1) self.assertEqual(datetime.MAXYEAR, 9999) ############################################################################# # tzinfo tests class FixedOffset(tzinfo): def __init__(self, offset, name, dstoffset=42): if isinstance(offset, int): offset = timedelta(minutes=offset) if isinstance(dstoffset, int): dstoffset = timedelta(minutes=dstoffset) self.__offset = offset self.__name = name self.__dstoffset = dstoffset def __repr__(self): return self.__name.lower() def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return self.__dstoffset class PicklableFixedOffset(FixedOffset): def __init__(self, offset=None, name=None, dstoffset=None): FixedOffset.__init__(self, offset, name, dstoffset) class TestTZInfo(unittest.TestCase): def test_non_abstractness(self): # In order to allow subclasses to get pickled, the C implementation # wasn't able to get away with having __init__ raise # NotImplementedError. useless = tzinfo() dt = datetime.max self.assertRaises(NotImplementedError, useless.tzname, dt) self.assertRaises(NotImplementedError, useless.utcoffset, dt) self.assertRaises(NotImplementedError, useless.dst, dt) def test_subclass_must_override(self): class NotEnough(tzinfo): def __init__(self, offset, name): self.__offset = offset self.__name = name self.failUnless(issubclass(NotEnough, tzinfo)) ne = NotEnough(3, "NotByALongShot") self.failUnless(isinstance(ne, tzinfo)) dt = datetime.now() self.assertRaises(NotImplementedError, ne.tzname, dt) self.assertRaises(NotImplementedError, ne.utcoffset, dt) self.assertRaises(NotImplementedError, ne.dst, dt) def test_normal(self): fo = FixedOffset(3, "Three") self.failUnless(isinstance(fo, tzinfo)) for dt in datetime.now(), None: self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3)) self.assertEqual(fo.tzname(dt), "Three") self.assertEqual(fo.dst(dt), timedelta(minutes=42)) def test_pickling_base(self): # There's no point to pickling tzinfo objects on their own (they # carry no data), but they need to be picklable anyway else # concrete subclasses can't be pickled. orig = tzinfo.__new__(tzinfo) self.failUnless(type(orig) is tzinfo) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.failUnless(type(derived) is tzinfo) def test_pickling_subclass(self): # Make sure we can pickle/unpickle an instance of a subclass. offset = timedelta(minutes=-300) orig = PicklableFixedOffset(offset, 'cookie') self.failUnless(isinstance(orig, tzinfo)) self.failUnless(type(orig) is PicklableFixedOffset) self.assertEqual(orig.utcoffset(None), offset) self.assertEqual(orig.tzname(None), 'cookie') for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.failUnless(isinstance(derived, tzinfo)) self.failUnless(type(derived) is PicklableFixedOffset) self.assertEqual(derived.utcoffset(None), offset) self.assertEqual(derived.tzname(None), 'cookie') ############################################################################# # Base clase for testing a particular aspect of timedelta, time, date and # datetime comparisons. class HarmlessMixedComparison(unittest.TestCase): # Test that __eq__ and __ne__ don't complain for mixed-type comparisons. # Subclasses must define 'theclass', and theclass(1, 1, 1) must be a # legit constructor. def test_harmless_mixed_comparison(self): me = self.theclass(1, 1, 1) self.failIf(me == ()) self.failUnless(me != ()) self.failIf(() == me) self.failUnless(() != me) self.failUnless(me in [1, 20L, [], me]) self.failIf(me not in [1, 20L, [], me]) self.failUnless([] in [me, 1, 20L, []]) self.failIf([] not in [me, 1, 20L, []]) def test_harmful_mixed_comparison(self): me = self.theclass(1, 1, 1) self.assertRaises(TypeError, lambda: me < ()) self.assertRaises(TypeError, lambda: me <= ()) self.assertRaises(TypeError, lambda: me > ()) self.assertRaises(TypeError, lambda: me >= ()) self.assertRaises(TypeError, lambda: () < me) self.assertRaises(TypeError, lambda: () <= me) self.assertRaises(TypeError, lambda: () > me) self.assertRaises(TypeError, lambda: () >= me) self.assertRaises(TypeError, cmp, (), me) self.assertRaises(TypeError, cmp, me, ()) ############################################################################# # timedelta tests class TestTimeDelta(HarmlessMixedComparison): theclass = timedelta def test_constructor(self): eq = self.assertEqual td = timedelta # Check keyword args to constructor eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0)) eq(td(1), td(days=1)) eq(td(0, 1), td(seconds=1)) eq(td(0, 0, 1), td(microseconds=1)) eq(td(weeks=1), td(days=7)) eq(td(days=1), td(hours=24)) eq(td(hours=1), td(minutes=60)) eq(td(minutes=1), td(seconds=60)) eq(td(seconds=1), td(milliseconds=1000)) eq(td(milliseconds=1), td(microseconds=1000)) # Check float args to constructor eq(td(weeks=1.0/7), td(days=1)) eq(td(days=1.0/24), td(hours=1)) eq(td(hours=1.0/60), td(minutes=1)) eq(td(minutes=1.0/60), td(seconds=1)) eq(td(seconds=0.001), td(milliseconds=1)) eq(td(milliseconds=0.001), td(microseconds=1)) def test_computations(self): eq = self.assertEqual td = timedelta a = td(7) # One week b = td(0, 60) # One minute c = td(0, 0, 1000) # One millisecond eq(a+b+c, td(7, 60, 1000)) eq(a-b, td(6, 24*3600 - 60)) eq(-a, td(-7)) eq(+a, td(7)) eq(-b, td(-1, 24*3600 - 60)) eq(-c, td(-1, 24*3600 - 1, 999000)) eq(abs(a), a) eq(abs(-a), a) eq(td(6, 24*3600), a) eq(td(0, 0, 60*1000000), b) eq(a*10, td(70)) eq(a*10, 10*a) eq(a*10L, 10*a) eq(b*10, td(0, 600)) eq(10*b, td(0, 600)) eq(b*10L, td(0, 600)) eq(c*10, td(0, 0, 10000)) eq(10*c, td(0, 0, 10000)) eq(c*10L, td(0, 0, 10000)) eq(a*-1, -a) eq(b*-2, -b-b) eq(c*-2, -c+-c) eq(b*(60*24), (b*60)*24) eq(b*(60*24), (60*b)*24) eq(c*1000, td(0, 1)) eq(1000*c, td(0, 1)) eq(a//7, td(1)) eq(b//10, td(0, 6)) eq(c//1000, td(0, 0, 1)) eq(a//10, td(0, 7*24*360)) eq(a//3600000, td(0, 0, 7*24*1000)) def test_disallowed_computations(self): a = timedelta(42) # Add/sub ints, longs, floats should be illegal for i in 1, 1L, 1.0: self.assertRaises(TypeError, lambda: a+i) self.assertRaises(TypeError, lambda: a-i) self.assertRaises(TypeError, lambda: i+a) self.assertRaises(TypeError, lambda: i-a) # Mul/div by float isn't supported. x = 2.3 self.assertRaises(TypeError, lambda: a*x) self.assertRaises(TypeError, lambda: x*a) self.assertRaises(TypeError, lambda: a/x) self.assertRaises(TypeError, lambda: x/a) self.assertRaises(TypeError, lambda: a // x) self.assertRaises(TypeError, lambda: x // a) # Divison of int by timedelta doesn't make sense. # Division by zero doesn't make sense. for zero in 0, 0L: self.assertRaises(TypeError, lambda: zero // a) self.assertRaises(ZeroDivisionError, lambda: a // zero) def test_basic_attributes(self): days, seconds, us = 1, 7, 31 td = timedelta(days, seconds, us) self.assertEqual(td.days, days) self.assertEqual(td.seconds, seconds) self.assertEqual(td.microseconds, us) def test_carries(self): t1 = timedelta(days=100, weeks=-7, hours=-24*(100-49), minutes=-3, seconds=12, microseconds=(3*60 - 12) * 1e6 + 1) t2 = timedelta(microseconds=1) self.assertEqual(t1, t2) def test_hash_equality(self): t1 = timedelta(days=100, weeks=-7, hours=-24*(100-49), minutes=-3, seconds=12, microseconds=(3*60 - 12) * 1000000) t2 = timedelta() self.assertEqual(hash(t1), hash(t2)) t1 += timedelta(weeks=7) t2 += timedelta(days=7*7) self.assertEqual(t1, t2) self.assertEqual(hash(t1), hash(t2)) d = {t1: 1} d[t2] = 2 self.assertEqual(len(d), 1) self.assertEqual(d[t1], 2) def test_pickling(self): args = 12, 34, 56 orig = timedelta(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_compare(self): t1 = timedelta(2, 3, 4) t2 = timedelta(2, 3, 4) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for args in (3, 3, 3), (2, 4, 4), (2, 3, 5): t2 = timedelta(*args) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) self.assertEqual(t1 != badarg, True) self.assertEqual(badarg == t1, False) self.assertEqual(badarg != t1, True) self.assertRaises(TypeError, lambda: t1 <= badarg) self.assertRaises(TypeError, lambda: t1 < badarg) self.assertRaises(TypeError, lambda: t1 > badarg) self.assertRaises(TypeError, lambda: t1 >= badarg) self.assertRaises(TypeError, lambda: badarg <= t1) self.assertRaises(TypeError, lambda: badarg < t1) self.assertRaises(TypeError, lambda: badarg > t1) self.assertRaises(TypeError, lambda: badarg >= t1) def test_str(self): td = timedelta eq = self.assertEqual eq(str(td(1)), "1 day, 0:00:00") eq(str(td(-1)), "-1 day, 0:00:00") eq(str(td(2)), "2 days, 0:00:00") eq(str(td(-2)), "-2 days, 0:00:00") eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59") eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04") eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)), "-210 days, 23:12:34") eq(str(td(milliseconds=1)), "0:00:00.001000") eq(str(td(microseconds=3)), "0:00:00.000003") eq(str(td(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999)), "999999999 days, 23:59:59.999999") def test_roundtrip(self): for td in (timedelta(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999), timedelta(days=-999999999), timedelta(days=1, seconds=2, microseconds=3)): # Verify td -> string -> td identity. s = repr(td) self.failUnless(s.startswith('datetime.')) s = s[9:] td2 = eval(s) self.assertEqual(td, td2) # Verify identity via reconstructing from pieces. td2 = timedelta(td.days, td.seconds, td.microseconds) self.assertEqual(td, td2) def test_resolution_info(self): self.assert_(isinstance(timedelta.min, timedelta)) self.assert_(isinstance(timedelta.max, timedelta)) self.assert_(isinstance(timedelta.resolution, timedelta)) self.assert_(timedelta.max > timedelta.min) self.assertEqual(timedelta.min, timedelta(-999999999)) self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1)) self.assertEqual(timedelta.resolution, timedelta(0, 0, 1)) def test_overflow(self): tiny = timedelta.resolution td = timedelta.min + tiny td -= tiny # no problem self.assertRaises(OverflowError, td.__sub__, tiny) self.assertRaises(OverflowError, td.__add__, -tiny) td = timedelta.max - tiny td += tiny # no problem self.assertRaises(OverflowError, td.__add__, tiny) self.assertRaises(OverflowError, td.__sub__, -tiny) self.assertRaises(OverflowError, lambda: -timedelta.max) def test_microsecond_rounding(self): td = timedelta eq = self.assertEqual # Single-field rounding. eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0 eq(td(milliseconds=0.6/1000), td(microseconds=1)) eq(td(milliseconds=-0.6/1000), td(microseconds=-1)) # Rounding due to contributions from more than one field. us_per_hour = 3600e6 us_per_day = us_per_hour * 24 eq(td(days=.4/us_per_day), td(0)) eq(td(hours=.2/us_per_hour), td(0)) eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1)) eq(td(days=-.4/us_per_day), td(0)) eq(td(hours=-.2/us_per_hour), td(0)) eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1)) def test_massive_normalization(self): td = timedelta(microseconds=-1) self.assertEqual((td.days, td.seconds, td.microseconds), (-1, 24*3600-1, 999999)) def test_bool(self): self.failUnless(timedelta(1)) self.failUnless(timedelta(0, 1)) self.failUnless(timedelta(0, 0, 1)) self.failUnless(timedelta(microseconds=1)) self.failUnless(not timedelta(0)) def test_subclass_timedelta(self): class T(timedelta): @staticmethod def from_td(td): return T(td.days, td.seconds, td.microseconds) def as_hours(self): sum = (self.days * 24 + self.seconds / 3600.0 + self.microseconds / 3600e6) return round(sum) t1 = T(days=1) self.assert_(type(t1) is T) self.assertEqual(t1.as_hours(), 24) t2 = T(days=-1, seconds=-3600) self.assert_(type(t2) is T) self.assertEqual(t2.as_hours(), -25) t3 = t1 + t2 self.assert_(type(t3) is timedelta) t4 = T.from_td(t3) self.assert_(type(t4) is T) self.assertEqual(t3.days, t4.days) self.assertEqual(t3.seconds, t4.seconds) self.assertEqual(t3.microseconds, t4.microseconds) self.assertEqual(str(t3), str(t4)) self.assertEqual(t4.as_hours(), -1) ############################################################################# # date tests class TestDateOnly(unittest.TestCase): # Tests here won't pass if also run on datetime objects, so don't # subclass this to test datetimes too. def test_delta_non_days_ignored(self): dt = date(2000, 1, 2) delta = timedelta(days=1, hours=2, minutes=3, seconds=4, microseconds=5) days = timedelta(delta.days) self.assertEqual(days, timedelta(1)) dt2 = dt + delta self.assertEqual(dt2, dt + days) dt2 = delta + dt self.assertEqual(dt2, dt + days) dt2 = dt - delta self.assertEqual(dt2, dt - days) delta = -delta days = timedelta(delta.days) self.assertEqual(days, timedelta(-2)) dt2 = dt + delta self.assertEqual(dt2, dt + days) dt2 = delta + dt self.assertEqual(dt2, dt + days) dt2 = dt - delta self.assertEqual(dt2, dt - days) class SubclassDate(date): sub_var = 1 class TestDate(HarmlessMixedComparison): # Tests here should pass for both dates and datetimes, except for a # few tests that TestDateTime overrides. theclass = date def test_basic_attributes(self): dt = self.theclass(2002, 3, 1) self.assertEqual(dt.year, 2002) self.assertEqual(dt.month, 3) self.assertEqual(dt.day, 1) def test_roundtrip(self): for dt in (self.theclass(1, 2, 3), self.theclass.today()): # Verify dt -> string -> date identity. s = repr(dt) self.failUnless(s.startswith('datetime.')) s = s[9:] dt2 = eval(s) self.assertEqual(dt, dt2) # Verify identity via reconstructing from pieces. dt2 = self.theclass(dt.year, dt.month, dt.day) self.assertEqual(dt, dt2) def test_ordinal_conversions(self): # Check some fixed values. for y, m, d, n in [(1, 1, 1, 1), # calendar origin (1, 12, 31, 365), (2, 1, 1, 366), # first example from "Calendrical Calculations" (1945, 11, 12, 710347)]: d = self.theclass(y, m, d) self.assertEqual(n, d.toordinal()) fromord = self.theclass.fromordinal(n) self.assertEqual(d, fromord) if hasattr(fromord, "hour"): # if we're checking something fancier than a date, verify # the extra fields have been zeroed out self.assertEqual(fromord.hour, 0) self.assertEqual(fromord.minute, 0) self.assertEqual(fromord.second, 0) self.assertEqual(fromord.microsecond, 0) # Check first and last days of year spottily across the whole # range of years supported. for year in xrange(MINYEAR, MAXYEAR+1, 7): # Verify (year, 1, 1) -> ordinal -> y, m, d is identity. d = self.theclass(year, 1, 1) n = d.toordinal() d2 = self.theclass.fromordinal(n) self.assertEqual(d, d2) # Verify that moving back a day gets to the end of year-1. if year > 1: d = self.theclass.fromordinal(n-1) d2 = self.theclass(year-1, 12, 31) self.assertEqual(d, d2) self.assertEqual(d2.toordinal(), n-1) # Test every day in a leap-year and a non-leap year. dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] for year, isleap in (2000, True), (2002, False): n = self.theclass(year, 1, 1).toordinal() for month, maxday in zip(range(1, 13), dim): if month == 2 and isleap: maxday += 1 for day in range(1, maxday+1): d = self.theclass(year, month, day) self.assertEqual(d.toordinal(), n) self.assertEqual(d, self.theclass.fromordinal(n)) n += 1 def test_extreme_ordinals(self): a = self.theclass.min a = self.theclass(a.year, a.month, a.day) # get rid of time parts aord = a.toordinal() b = a.fromordinal(aord) self.assertEqual(a, b) self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1)) b = a + timedelta(days=1) self.assertEqual(b.toordinal(), aord + 1) self.assertEqual(b, self.theclass.fromordinal(aord + 1)) a = self.theclass.max a = self.theclass(a.year, a.month, a.day) # get rid of time parts aord = a.toordinal() b = a.fromordinal(aord) self.assertEqual(a, b) self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1)) b = a - timedelta(days=1) self.assertEqual(b.toordinal(), aord - 1) self.assertEqual(b, self.theclass.fromordinal(aord - 1)) def test_bad_constructor_arguments(self): # bad years self.theclass(MINYEAR, 1, 1) # no exception self.theclass(MAXYEAR, 1, 1) # no exception self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1) self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1) # bad months self.theclass(2000, 1, 1) # no exception self.theclass(2000, 12, 1) # no exception self.assertRaises(ValueError, self.theclass, 2000, 0, 1) self.assertRaises(ValueError, self.theclass, 2000, 13, 1) # bad days self.theclass(2000, 2, 29) # no exception self.theclass(2004, 2, 29) # no exception self.theclass(2400, 2, 29) # no exception self.assertRaises(ValueError, self.theclass, 2000, 2, 30) self.assertRaises(ValueError, self.theclass, 2001, 2, 29) self.assertRaises(ValueError, self.theclass, 2100, 2, 29) self.assertRaises(ValueError, self.theclass, 1900, 2, 29) self.assertRaises(ValueError, self.theclass, 2000, 1, 0) self.assertRaises(ValueError, self.theclass, 2000, 1, 32) def test_hash_equality(self): d = self.theclass(2000, 12, 31) # same thing e = self.theclass(2000, 12, 31) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) d = self.theclass(2001, 1, 1) # same thing e = self.theclass(2001, 1, 1) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) def test_computations(self): a = self.theclass(2002, 1, 31) b = self.theclass(1956, 1, 31) diff = a-b self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4))) self.assertEqual(diff.seconds, 0) self.assertEqual(diff.microseconds, 0) day = timedelta(1) week = timedelta(7) a = self.theclass(2002, 3, 2) self.assertEqual(a + day, self.theclass(2002, 3, 3)) self.assertEqual(day + a, self.theclass(2002, 3, 3)) self.assertEqual(a - day, self.theclass(2002, 3, 1)) self.assertEqual(-day + a, self.theclass(2002, 3, 1)) self.assertEqual(a + week, self.theclass(2002, 3, 9)) self.assertEqual(a - week, self.theclass(2002, 2, 23)) self.assertEqual(a + 52*week, self.theclass(2003, 3, 1)) self.assertEqual(a - 52*week, self.theclass(2001, 3, 3)) self.assertEqual((a + week) - a, week) self.assertEqual((a + day) - a, day) self.assertEqual((a - week) - a, -week) self.assertEqual((a - day) - a, -day) self.assertEqual(a - (a + week), -week) self.assertEqual(a - (a + day), -day) self.assertEqual(a - (a - week), week) self.assertEqual(a - (a - day), day) # Add/sub ints, longs, floats should be illegal for i in 1, 1L, 1.0: self.assertRaises(TypeError, lambda: a+i) self.assertRaises(TypeError, lambda: a-i) self.assertRaises(TypeError, lambda: i+a) self.assertRaises(TypeError, lambda: i-a) # delta - date is senseless. self.assertRaises(TypeError, lambda: day - a) # mixing date and (delta or date) via * or // is senseless self.assertRaises(TypeError, lambda: day * a) self.assertRaises(TypeError, lambda: a * day) self.assertRaises(TypeError, lambda: day // a) self.assertRaises(TypeError, lambda: a // day) self.assertRaises(TypeError, lambda: a * a) self.assertRaises(TypeError, lambda: a // a) # date + date is senseless self.assertRaises(TypeError, lambda: a + a) def test_overflow(self): tiny = self.theclass.resolution dt = self.theclass.min + tiny dt -= tiny # no problem self.assertRaises(OverflowError, dt.__sub__, tiny) self.assertRaises(OverflowError, dt.__add__, -tiny) dt = self.theclass.max - tiny dt += tiny # no problem self.assertRaises(OverflowError, dt.__add__, tiny) self.assertRaises(OverflowError, dt.__sub__, -tiny) def test_fromtimestamp(self): import time # Try an arbitrary fixed value. year, month, day = 1999, 9, 19 ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1)) d = self.theclass.fromtimestamp(ts) self.assertEqual(d.year, year) self.assertEqual(d.month, month) self.assertEqual(d.day, day) def test_insane_fromtimestamp(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.fromtimestamp, insane) def test_today(self): import time # We claim that today() is like fromtimestamp(time.time()), so # prove it. for dummy in range(3): today = self.theclass.today() ts = time.time() todayagain = self.theclass.fromtimestamp(ts) if today == todayagain: break # There are several legit reasons that could fail: # 1. It recently became midnight, between the today() and the # time() calls. # 2. The platform time() has such fine resolution that we'll # never get the same value twice. # 3. The platform time() has poor resolution, and we just # happened to call today() right before a resolution quantum # boundary. # 4. The system clock got fiddled between calls. # In any case, wait a little while and try again. time.sleep(0.1) # It worked or it didn't. If it didn't, assume it's reason #2, and # let the test pass if they're within half a second of each other. self.failUnless(today == todayagain or abs(todayagain - today) < timedelta(seconds=0.5)) def test_weekday(self): for i in range(7): # March 4, 2002 is a Monday self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i) self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1) # January 2, 1956 is a Monday self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i) self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1) def test_isocalendar(self): # Check examples from # http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm for i in range(7): d = self.theclass(2003, 12, 22+i) self.assertEqual(d.isocalendar(), (2003, 52, i+1)) d = self.theclass(2003, 12, 29) + timedelta(i) self.assertEqual(d.isocalendar(), (2004, 1, i+1)) d = self.theclass(2004, 1, 5+i) self.assertEqual(d.isocalendar(), (2004, 2, i+1)) d = self.theclass(2009, 12, 21+i) self.assertEqual(d.isocalendar(), (2009, 52, i+1)) d = self.theclass(2009, 12, 28) + timedelta(i) self.assertEqual(d.isocalendar(), (2009, 53, i+1)) d = self.theclass(2010, 1, 4+i) self.assertEqual(d.isocalendar(), (2010, 1, i+1)) def test_iso_long_years(self): # Calculate long ISO years and compare to table from # http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm ISO_LONG_YEARS_TABLE = """ 4 32 60 88 9 37 65 93 15 43 71 99 20 48 76 26 54 82 105 133 161 189 111 139 167 195 116 144 172 122 150 178 128 156 184 201 229 257 285 207 235 263 291 212 240 268 296 218 246 274 224 252 280 303 331 359 387 308 336 364 392 314 342 370 398 320 348 376 325 353 381 """ iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split()) iso_long_years.sort() L = [] for i in range(400): d = self.theclass(2000+i, 12, 31) d1 = self.theclass(1600+i, 12, 31) self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:]) if d.isocalendar()[1] == 53: L.append(i) self.assertEqual(L, iso_long_years) def test_isoformat(self): t = self.theclass(2, 3, 2) self.assertEqual(t.isoformat(), "0002-03-02") def test_ctime(self): t = self.theclass(2002, 3, 2) self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002") def test_strftime(self): t = self.theclass(2005, 3, 2) self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05") self.assertEqual(t.strftime(""), "") # SF bug #761337 self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784 self.assertRaises(TypeError, t.strftime) # needs an arg self.assertRaises(TypeError, t.strftime, "one", "two") # too many args self.assertRaises(TypeError, t.strftime, 42) # arg wrong type # A naive object replaces %z and %Z w/ empty strings. self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''") def test_resolution_info(self): self.assert_(isinstance(self.theclass.min, self.theclass)) self.assert_(isinstance(self.theclass.max, self.theclass)) self.assert_(isinstance(self.theclass.resolution, timedelta)) self.assert_(self.theclass.max > self.theclass.min) def test_extreme_timedelta(self): big = self.theclass.max - self.theclass.min # 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds # n == 315537897599999999 ~= 2**58.13 justasbig = timedelta(0, 0, n) self.assertEqual(big, justasbig) self.assertEqual(self.theclass.min + big, self.theclass.max) self.assertEqual(self.theclass.max - big, self.theclass.min) def test_timetuple(self): for i in range(7): # January 2, 1956 is a Monday (0) d = self.theclass(1956, 1, 2+i) t = d.timetuple() self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1)) # February 1, 1956 is a Wednesday (2) d = self.theclass(1956, 2, 1+i) t = d.timetuple() self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1)) # March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day # of the year. d = self.theclass(1956, 3, 1+i) t = d.timetuple() self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1)) self.assertEqual(t.tm_year, 1956) self.assertEqual(t.tm_mon, 3) self.assertEqual(t.tm_mday, 1+i) self.assertEqual(t.tm_hour, 0) self.assertEqual(t.tm_min, 0) self.assertEqual(t.tm_sec, 0) self.assertEqual(t.tm_wday, (3+i)%7) self.assertEqual(t.tm_yday, 61+i) self.assertEqual(t.tm_isdst, -1) def test_pickling(self): args = 6, 7, 23 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_compare(self): t1 = self.theclass(2, 3, 4) t2 = self.theclass(2, 3, 4) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for args in (3, 3, 3), (2, 4, 4), (2, 3, 5): t2 = self.theclass(*args) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) self.assertEqual(t1 != badarg, True) self.assertEqual(badarg == t1, False) self.assertEqual(badarg != t1, True) self.assertRaises(TypeError, lambda: t1 < badarg) self.assertRaises(TypeError, lambda: t1 > badarg) self.assertRaises(TypeError, lambda: t1 >= badarg) self.assertRaises(TypeError, lambda: badarg <= t1) self.assertRaises(TypeError, lambda: badarg < t1) self.assertRaises(TypeError, lambda: badarg > t1) self.assertRaises(TypeError, lambda: badarg >= t1) def test_mixed_compare(self): our = self.theclass(2000, 4, 5) self.assertRaises(TypeError, cmp, our, 1) self.assertRaises(TypeError, cmp, 1, our) class AnotherDateTimeClass(object): def __cmp__(self, other): # Return "equal" so calling this can't be confused with # compare-by-address (which never says "equal" for distinct # objects). return 0 # This still errors, because date and datetime comparison raise # TypeError instead of NotImplemented when they don't know what to # do, in order to stop comparison from falling back to the default # compare-by-address. their = AnotherDateTimeClass() self.assertRaises(TypeError, cmp, our, their) # Oops: The next stab raises TypeError in the C implementation, # but not in the Python implementation of datetime. The difference # is due to that the Python implementation defines __cmp__ but # the C implementation defines tp_richcompare. This is more pain # to fix than it's worth, so commenting out the test. # self.assertEqual(cmp(their, our), 0) # But date and datetime comparison return NotImplemented instead if the # other object has a timetuple attr. This gives the other object a # chance to do the comparison. class Comparable(AnotherDateTimeClass): def timetuple(self): return () their = Comparable() self.assertEqual(cmp(our, their), 0) self.assertEqual(cmp(their, our), 0) self.failUnless(our == their) self.failUnless(their == our) def test_bool(self): # All dates are considered true. self.failUnless(self.theclass.min) self.failUnless(self.theclass.max) def test_srftime_out_of_range(self): # For nasty technical reasons, we can't handle years before 1900. cls = self.theclass self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900") for y in 1, 49, 51, 99, 100, 1000, 1899: self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y") def test_replace(self): cls = self.theclass args = [1, 2, 3] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("year", 2), ("month", 3), ("day", 4)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Out of bounds. base = cls(2000, 2, 29) self.assertRaises(ValueError, base.replace, year=2001) def test_subclass_date(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.year + self.month args = 2003, 4, 14 dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.toordinal(), dt2.toordinal()) self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7) def test_pickling_subclass_date(self): args = 6, 7, 23 orig = SubclassDate(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_backdoor_resistance(self): # For fast unpickling, the constructor accepts a pickle string. # This is a low-overhead backdoor. A user can (by intent or # mistake) pass a string directly, which (if it's the right length) # will get treated like a pickle, and bypass the normal sanity # checks in the constructor. This can create insane objects. # The constructor doesn't want to burn the time to validate all # fields, but does check the month field. This stops, e.g., # datetime.datetime('1995-03-25') from yielding an insane object. base = '1995-03-25' if not issubclass(self.theclass, datetime): base = base[:4] for month_byte in '9', chr(0), chr(13), '\xff': self.assertRaises(TypeError, self.theclass, base[:2] + month_byte + base[3:]) for ord_byte in range(1, 13): # This shouldn't blow up because of the month byte alone. If # the implementation changes to do more-careful checking, it may # blow up because other fields are insane. self.theclass(base[:2] + chr(ord_byte) + base[3:]) ############################################################################# # datetime tests class SubclassDatetime(datetime): sub_var = 1 class TestDateTime(TestDate): theclass = datetime def test_basic_attributes(self): dt = self.theclass(2002, 3, 1, 12, 0) self.assertEqual(dt.year, 2002) self.assertEqual(dt.month, 3) self.assertEqual(dt.day, 1) self.assertEqual(dt.hour, 12) self.assertEqual(dt.minute, 0) self.assertEqual(dt.second, 0) self.assertEqual(dt.microsecond, 0) def test_basic_attributes_nonzero(self): # Make sure all attributes are non-zero so bugs in # bit-shifting access show up. dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000) self.assertEqual(dt.year, 2002) self.assertEqual(dt.month, 3) self.assertEqual(dt.day, 1) self.assertEqual(dt.hour, 12) self.assertEqual(dt.minute, 59) self.assertEqual(dt.second, 59) self.assertEqual(dt.microsecond, 8000) def test_roundtrip(self): for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7), self.theclass.now()): # Verify dt -> string -> datetime identity. s = repr(dt) self.failUnless(s.startswith('datetime.')) s = s[9:] dt2 = eval(s) self.assertEqual(dt, dt2) # Verify identity via reconstructing from pieces. dt2 = self.theclass(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond) self.assertEqual(dt, dt2) def test_isoformat(self): t = self.theclass(2, 3, 2, 4, 5, 1, 123) self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123") self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123") self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123") # str is ISO format with the separator forced to a blank. self.assertEqual(str(t), "0002-03-02 04:05:01.000123") t = self.theclass(2, 3, 2) self.assertEqual(t.isoformat(), "0002-03-02T00:00:00") self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00") self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00") # str is ISO format with the separator forced to a blank. self.assertEqual(str(t), "0002-03-02 00:00:00") def test_more_ctime(self): # Test fields that TestDate doesn't touch. import time t = self.theclass(2002, 3, 2, 18, 3, 5, 123) self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002") # Oops! The next line fails on Win2K under MSVC 6, so it's commented # out. The difference is that t.ctime() produces " 2" for the day, # but platform ctime() produces "02" for the day. According to # C99, t.ctime() is correct here. # self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple()))) # So test a case where that difference doesn't matter. t = self.theclass(2002, 3, 22, 18, 3, 5, 123) self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple()))) def test_tz_independent_comparing(self): dt1 = self.theclass(2002, 3, 1, 9, 0, 0) dt2 = self.theclass(2002, 3, 1, 10, 0, 0) dt3 = self.theclass(2002, 3, 1, 9, 0, 0) self.assertEqual(dt1, dt3) self.assert_(dt2 > dt3) # Make sure comparison doesn't forget microseconds, and isn't done # via comparing a float timestamp (an IEEE double doesn't have enough # precision to span microsecond resolution across years 1 thru 9999, # so comparing via timestamp necessarily calls some distinct values # equal). dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998) us = timedelta(microseconds=1) dt2 = dt1 + us self.assertEqual(dt2 - dt1, us) self.assert_(dt1 < dt2) def test_strftime_with_bad_tzname_replace(self): # verify ok if tzinfo.tzname().replace() returns a non-string class MyTzInfo(FixedOffset): def tzname(self, dt): class MyStr(str): def replace(self, *args): return None return MyStr('name') t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name')) self.assertRaises(TypeError, t.strftime, '%Z') def test_bad_constructor_arguments(self): # bad years self.theclass(MINYEAR, 1, 1) # no exception self.theclass(MAXYEAR, 1, 1) # no exception self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1) self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1) # bad months self.theclass(2000, 1, 1) # no exception self.theclass(2000, 12, 1) # no exception self.assertRaises(ValueError, self.theclass, 2000, 0, 1) self.assertRaises(ValueError, self.theclass, 2000, 13, 1) # bad days self.theclass(2000, 2, 29) # no exception self.theclass(2004, 2, 29) # no exception self.theclass(2400, 2, 29) # no exception self.assertRaises(ValueError, self.theclass, 2000, 2, 30) self.assertRaises(ValueError, self.theclass, 2001, 2, 29) self.assertRaises(ValueError, self.theclass, 2100, 2, 29) self.assertRaises(ValueError, self.theclass, 1900, 2, 29) self.assertRaises(ValueError, self.theclass, 2000, 1, 0) self.assertRaises(ValueError, self.theclass, 2000, 1, 32) # bad hours self.theclass(2000, 1, 31, 0) # no exception self.theclass(2000, 1, 31, 23) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24) # bad minutes self.theclass(2000, 1, 31, 23, 0) # no exception self.theclass(2000, 1, 31, 23, 59) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60) # bad seconds self.theclass(2000, 1, 31, 23, 59, 0) # no exception self.theclass(2000, 1, 31, 23, 59, 59) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60) # bad microseconds self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 59, -1) self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 59, 1000000) def test_hash_equality(self): d = self.theclass(2000, 12, 31, 23, 30, 17) e = self.theclass(2000, 12, 31, 23, 30, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) d = self.theclass(2001, 1, 1, 0, 5, 17) e = self.theclass(2001, 1, 1, 0, 5, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) def test_computations(self): a = self.theclass(2002, 1, 31) b = self.theclass(1956, 1, 31) diff = a-b self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4))) self.assertEqual(diff.seconds, 0) self.assertEqual(diff.microseconds, 0) a = self.theclass(2002, 3, 2, 17, 6) millisec = timedelta(0, 0, 1000) hour = timedelta(0, 3600) day = timedelta(1) week = timedelta(7) self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6)) self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6)) self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6)) self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6)) self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6)) self.assertEqual(a - hour, a + -hour) self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6)) self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6)) self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6)) self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6)) self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6)) self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6)) self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6)) self.assertEqual((a + week) - a, week) self.assertEqual((a + day) - a, day) self.assertEqual((a + hour) - a, hour) self.assertEqual((a + millisec) - a, millisec) self.assertEqual((a - week) - a, -week) self.assertEqual((a - day) - a, -day) self.assertEqual((a - hour) - a, -hour) self.assertEqual((a - millisec) - a, -millisec) self.assertEqual(a - (a + week), -week) self.assertEqual(a - (a + day), -day) self.assertEqual(a - (a + hour), -hour) self.assertEqual(a - (a + millisec), -millisec) self.assertEqual(a - (a - week), week) self.assertEqual(a - (a - day), day) self.assertEqual(a - (a - hour), hour) self.assertEqual(a - (a - millisec), millisec) self.assertEqual(a + (week + day + hour + millisec), self.theclass(2002, 3, 10, 18, 6, 0, 1000)) self.assertEqual(a + (week + day + hour + millisec), (((a + week) + day) + hour) + millisec) self.assertEqual(a - (week + day + hour + millisec), self.theclass(2002, 2, 22, 16, 5, 59, 999000)) self.assertEqual(a - (week + day + hour + millisec), (((a - week) - day) - hour) - millisec) # Add/sub ints, longs, floats should be illegal for i in 1, 1L, 1.0: self.assertRaises(TypeError, lambda: a+i) self.assertRaises(TypeError, lambda: a-i) self.assertRaises(TypeError, lambda: i+a) self.assertRaises(TypeError, lambda: i-a) # delta - datetime is senseless. self.assertRaises(TypeError, lambda: day - a) # mixing datetime and (delta or datetime) via * or // is senseless self.assertRaises(TypeError, lambda: day * a) self.assertRaises(TypeError, lambda: a * day) self.assertRaises(TypeError, lambda: day // a) self.assertRaises(TypeError, lambda: a // day) self.assertRaises(TypeError, lambda: a * a) self.assertRaises(TypeError, lambda: a // a) # datetime + datetime is senseless self.assertRaises(TypeError, lambda: a + a) def test_pickling(self): args = 6, 7, 23, 20, 59, 1, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_more_pickling(self): a = self.theclass(2003, 2, 7, 16, 48, 37, 444116) s = pickle.dumps(a) b = pickle.loads(s) self.assertEqual(b.year, 2003) self.assertEqual(b.month, 2) self.assertEqual(b.day, 7) def test_pickling_subclass_datetime(self): args = 6, 7, 23, 20, 59, 1, 64**2 orig = SubclassDatetime(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_more_compare(self): # The test_compare() inherited from TestDate covers the error cases. # We just want to test lexicographic ordering on the members datetime # has that date lacks. args = [2000, 11, 29, 20, 58, 16, 999998] t1 = self.theclass(*args) t2 = self.theclass(*args) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for i in range(len(args)): newargs = args[:] newargs[i] = args[i] + 1 t2 = self.theclass(*newargs) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) # A helper for timestamp constructor tests. def verify_field_equality(self, expected, got): self.assertEqual(expected.tm_year, got.year) self.assertEqual(expected.tm_mon, got.month) self.assertEqual(expected.tm_mday, got.day) self.assertEqual(expected.tm_hour, got.hour) self.assertEqual(expected.tm_min, got.minute) self.assertEqual(expected.tm_sec, got.second) def test_fromtimestamp(self): import time ts = time.time() expected = time.localtime(ts) got = self.theclass.fromtimestamp(ts) self.verify_field_equality(expected, got) def test_utcfromtimestamp(self): import time ts = time.time() expected = time.gmtime(ts) got = self.theclass.utcfromtimestamp(ts) self.verify_field_equality(expected, got) def test_microsecond_rounding(self): # Test whether fromtimestamp "rounds up" floats that are less # than one microsecond smaller than an integer. self.assertEquals(self.theclass.fromtimestamp(0.9999999), self.theclass.fromtimestamp(1)) def test_insane_fromtimestamp(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.fromtimestamp, insane) def test_insane_utcfromtimestamp(self): # It's possible that some platform maps time_t to double, # and that this test will fail there. This test should # exempt such platforms (provided they return reasonable # results!). for insane in -1e200, 1e200: self.assertRaises(ValueError, self.theclass.utcfromtimestamp, insane) def test_negative_float_fromtimestamp(self): # Windows doesn't accept negative timestamps if os.name == "nt": return # The result is tz-dependent; at least test that this doesn't # fail (like it did before bug 1646728 was fixed). self.theclass.fromtimestamp(-1.05) def test_negative_float_utcfromtimestamp(self): # Windows doesn't accept negative timestamps if os.name == "nt": return d = self.theclass.utcfromtimestamp(-1.05) self.assertEquals(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000)) def test_utcnow(self): import time # Call it a success if utcnow() and utcfromtimestamp() are within # a second of each other. tolerance = timedelta(seconds=1) for dummy in range(3): from_now = self.theclass.utcnow() from_timestamp = self.theclass.utcfromtimestamp(time.time()) if abs(from_timestamp - from_now) <= tolerance: break # Else try again a few times. self.failUnless(abs(from_timestamp - from_now) <= tolerance) def test_strptime(self): import time string = '2004-12-01 13:02:47' format = '%Y-%m-%d %H:%M:%S' expected = self.theclass(*(time.strptime(string, format)[0:6])) got = self.theclass.strptime(string, format) self.assertEqual(expected, got) def test_more_timetuple(self): # This tests fields beyond those tested by the TestDate.test_timetuple. t = self.theclass(2004, 12, 31, 6, 22, 33) self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1)) self.assertEqual(t.timetuple(), (t.year, t.month, t.day, t.hour, t.minute, t.second, t.weekday(), t.toordinal() - date(t.year, 1, 1).toordinal() + 1, -1)) tt = t.timetuple() self.assertEqual(tt.tm_year, t.year) self.assertEqual(tt.tm_mon, t.month) self.assertEqual(tt.tm_mday, t.day) self.assertEqual(tt.tm_hour, t.hour) self.assertEqual(tt.tm_min, t.minute) self.assertEqual(tt.tm_sec, t.second) self.assertEqual(tt.tm_wday, t.weekday()) self.assertEqual(tt.tm_yday, t.toordinal() - date(t.year, 1, 1).toordinal() + 1) self.assertEqual(tt.tm_isdst, -1) def test_more_strftime(self): # This tests fields beyond those tested by the TestDate.test_strftime. t = self.theclass(2004, 12, 31, 6, 22, 33) self.assertEqual(t.strftime("%m %d %y %S %M %H %j"), "12 31 04 33 22 06 366") def test_extract(self): dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234) self.assertEqual(dt.date(), date(2002, 3, 4)) self.assertEqual(dt.time(), time(18, 45, 3, 1234)) def test_combine(self): d = date(2002, 3, 4) t = time(18, 45, 3, 1234) expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234) combine = self.theclass.combine dt = combine(d, t) self.assertEqual(dt, expected) dt = combine(time=t, date=d) self.assertEqual(dt, expected) self.assertEqual(d, dt.date()) self.assertEqual(t, dt.time()) self.assertEqual(dt, combine(dt.date(), dt.time())) self.assertRaises(TypeError, combine) # need an arg self.assertRaises(TypeError, combine, d) # need two args self.assertRaises(TypeError, combine, t, d) # args reversed self.assertRaises(TypeError, combine, d, t, 1) # too many args self.assertRaises(TypeError, combine, "date", "time") # wrong types def test_replace(self): cls = self.theclass args = [1, 2, 3, 4, 5, 6, 7] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("year", 2), ("month", 3), ("day", 4), ("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Out of bounds. base = cls(2000, 2, 29) self.assertRaises(ValueError, base.replace, year=2001) def test_astimezone(self): # Pretty boring! The TZ test is more interesting here. astimezone() # simply can't be applied to a naive object. dt = self.theclass.now() f = FixedOffset(44, "") self.assertRaises(TypeError, dt.astimezone) # not enough args self.assertRaises(TypeError, dt.astimezone, f, f) # too many args self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type self.assertRaises(ValueError, dt.astimezone, f) # naive self.assertRaises(ValueError, dt.astimezone, tz=f) # naive class Bogus(tzinfo): def utcoffset(self, dt): return None def dst(self, dt): return timedelta(0) bog = Bogus() self.assertRaises(ValueError, dt.astimezone, bog) # naive class AlsoBogus(tzinfo): def utcoffset(self, dt): return timedelta(0) def dst(self, dt): return None alsobog = AlsoBogus() self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive def test_subclass_datetime(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.year + self.month + self.second args = 2003, 4, 14, 12, 13, 41 dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.toordinal(), dt2.toordinal()) self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month + dt1.second - 7) class SubclassTime(time): sub_var = 1 class TestTime(HarmlessMixedComparison): theclass = time def test_basic_attributes(self): t = self.theclass(12, 0) self.assertEqual(t.hour, 12) self.assertEqual(t.minute, 0) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 0) def test_basic_attributes_nonzero(self): # Make sure all attributes are non-zero so bugs in # bit-shifting access show up. t = self.theclass(12, 59, 59, 8000) self.assertEqual(t.hour, 12) self.assertEqual(t.minute, 59) self.assertEqual(t.second, 59) self.assertEqual(t.microsecond, 8000) def test_roundtrip(self): t = self.theclass(1, 2, 3, 4) # Verify t -> string -> time identity. s = repr(t) self.failUnless(s.startswith('datetime.')) s = s[9:] t2 = eval(s) self.assertEqual(t, t2) # Verify identity via reconstructing from pieces. t2 = self.theclass(t.hour, t.minute, t.second, t.microsecond) self.assertEqual(t, t2) def test_comparing(self): args = [1, 2, 3, 4] t1 = self.theclass(*args) t2 = self.theclass(*args) self.failUnless(t1 == t2) self.failUnless(t1 <= t2) self.failUnless(t1 >= t2) self.failUnless(not t1 != t2) self.failUnless(not t1 < t2) self.failUnless(not t1 > t2) self.assertEqual(cmp(t1, t2), 0) self.assertEqual(cmp(t2, t1), 0) for i in range(len(args)): newargs = args[:] newargs[i] = args[i] + 1 t2 = self.theclass(*newargs) # this is larger than t1 self.failUnless(t1 < t2) self.failUnless(t2 > t1) self.failUnless(t1 <= t2) self.failUnless(t2 >= t1) self.failUnless(t1 != t2) self.failUnless(t2 != t1) self.failUnless(not t1 == t2) self.failUnless(not t2 == t1) self.failUnless(not t1 > t2) self.failUnless(not t2 < t1) self.failUnless(not t1 >= t2) self.failUnless(not t2 <= t1) self.assertEqual(cmp(t1, t2), -1) self.assertEqual(cmp(t2, t1), 1) for badarg in OTHERSTUFF: self.assertEqual(t1 == badarg, False) self.assertEqual(t1 != badarg, True) self.assertEqual(badarg == t1, False) self.assertEqual(badarg != t1, True) self.assertRaises(TypeError, lambda: t1 <= badarg) self.assertRaises(TypeError, lambda: t1 < badarg) self.assertRaises(TypeError, lambda: t1 > badarg) self.assertRaises(TypeError, lambda: t1 >= badarg) self.assertRaises(TypeError, lambda: badarg <= t1) self.assertRaises(TypeError, lambda: badarg < t1) self.assertRaises(TypeError, lambda: badarg > t1) self.assertRaises(TypeError, lambda: badarg >= t1) def test_bad_constructor_arguments(self): # bad hours self.theclass(0, 0) # no exception self.theclass(23, 0) # no exception self.assertRaises(ValueError, self.theclass, -1, 0) self.assertRaises(ValueError, self.theclass, 24, 0) # bad minutes self.theclass(23, 0) # no exception self.theclass(23, 59) # no exception self.assertRaises(ValueError, self.theclass, 23, -1) self.assertRaises(ValueError, self.theclass, 23, 60) # bad seconds self.theclass(23, 59, 0) # no exception self.theclass(23, 59, 59) # no exception self.assertRaises(ValueError, self.theclass, 23, 59, -1) self.assertRaises(ValueError, self.theclass, 23, 59, 60) # bad microseconds self.theclass(23, 59, 59, 0) # no exception self.theclass(23, 59, 59, 999999) # no exception self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1) self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000) def test_hash_equality(self): d = self.theclass(23, 30, 17) e = self.theclass(23, 30, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) d = self.theclass(0, 5, 17) e = self.theclass(0, 5, 17) self.assertEqual(d, e) self.assertEqual(hash(d), hash(e)) dic = {d: 1} dic[e] = 2 self.assertEqual(len(dic), 1) self.assertEqual(dic[d], 2) self.assertEqual(dic[e], 2) def test_isoformat(self): t = self.theclass(4, 5, 1, 123) self.assertEqual(t.isoformat(), "04:05:01.000123") self.assertEqual(t.isoformat(), str(t)) t = self.theclass() self.assertEqual(t.isoformat(), "00:00:00") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=1) self.assertEqual(t.isoformat(), "00:00:00.000001") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=10) self.assertEqual(t.isoformat(), "00:00:00.000010") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=100) self.assertEqual(t.isoformat(), "00:00:00.000100") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=1000) self.assertEqual(t.isoformat(), "00:00:00.001000") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=10000) self.assertEqual(t.isoformat(), "00:00:00.010000") self.assertEqual(t.isoformat(), str(t)) t = self.theclass(microsecond=100000) self.assertEqual(t.isoformat(), "00:00:00.100000") self.assertEqual(t.isoformat(), str(t)) def test_strftime(self): t = self.theclass(1, 2, 3, 4) self.assertEqual(t.strftime('%H %M %S'), "01 02 03") # A naive object replaces %z and %Z with empty strings. self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''") def test_str(self): self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004") self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000") self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000") self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03") self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00") def test_repr(self): name = 'datetime.' + self.theclass.__name__ self.assertEqual(repr(self.theclass(1, 2, 3, 4)), "%s(1, 2, 3, 4)" % name) self.assertEqual(repr(self.theclass(10, 2, 3, 4000)), "%s(10, 2, 3, 4000)" % name) self.assertEqual(repr(self.theclass(0, 2, 3, 400000)), "%s(0, 2, 3, 400000)" % name) self.assertEqual(repr(self.theclass(12, 2, 3, 0)), "%s(12, 2, 3)" % name) self.assertEqual(repr(self.theclass(23, 15, 0, 0)), "%s(23, 15)" % name) def test_resolution_info(self): self.assert_(isinstance(self.theclass.min, self.theclass)) self.assert_(isinstance(self.theclass.max, self.theclass)) self.assert_(isinstance(self.theclass.resolution, timedelta)) self.assert_(self.theclass.max > self.theclass.min) def test_pickling(self): args = 20, 59, 16, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_pickling_subclass_time(self): args = 20, 59, 16, 64**2 orig = SubclassTime(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) def test_bool(self): cls = self.theclass self.failUnless(cls(1)) self.failUnless(cls(0, 1)) self.failUnless(cls(0, 0, 1)) self.failUnless(cls(0, 0, 0, 1)) self.failUnless(not cls(0)) self.failUnless(not cls()) def test_replace(self): cls = self.theclass args = [1, 2, 3, 4] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Out of bounds. base = cls(1) self.assertRaises(ValueError, base.replace, hour=24) self.assertRaises(ValueError, base.replace, minute=-1) self.assertRaises(ValueError, base.replace, second=100) self.assertRaises(ValueError, base.replace, microsecond=1000000) def test_subclass_time(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.hour + self.second args = 4, 5, 6 dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.isoformat(), dt2.isoformat()) self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7) def test_backdoor_resistance(self): # see TestDate.test_backdoor_resistance(). base = '2:59.0' for hour_byte in ' ', '9', chr(24), '\xff': self.assertRaises(TypeError, self.theclass, hour_byte + base[1:]) # A mixin for classes with a tzinfo= argument. Subclasses must define # theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever) # must be legit (which is true for time and datetime). class TZInfoBase(unittest.TestCase): def test_argument_passing(self): cls = self.theclass # A datetime passes itself on, a time passes None. class introspective(tzinfo): def tzname(self, dt): return dt and "real" or "none" def utcoffset(self, dt): return timedelta(minutes = dt and 42 or -42) dst = utcoffset obj = cls(1, 2, 3, tzinfo=introspective()) expected = cls is time and "none" or "real" self.assertEqual(obj.tzname(), expected) expected = timedelta(minutes=(cls is time and -42 or 42)) self.assertEqual(obj.utcoffset(), expected) self.assertEqual(obj.dst(), expected) def test_bad_tzinfo_classes(self): cls = self.theclass self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12) class NiceTry(object): def __init__(self): pass def utcoffset(self, dt): pass self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry) class BetterTry(tzinfo): def __init__(self): pass def utcoffset(self, dt): pass b = BetterTry() t = cls(1, 1, 1, tzinfo=b) self.failUnless(t.tzinfo is b) def test_utc_offset_out_of_bounds(self): class Edgy(tzinfo): def __init__(self, offset): self.offset = timedelta(minutes=offset) def utcoffset(self, dt): return self.offset cls = self.theclass for offset, legit in ((-1440, False), (-1439, True), (1439, True), (1440, False)): if cls is time: t = cls(1, 2, 3, tzinfo=Edgy(offset)) elif cls is datetime: t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset)) else: assert 0, "impossible" if legit: aofs = abs(offset) h, m = divmod(aofs, 60) tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m) if isinstance(t, datetime): t = t.timetz() self.assertEqual(str(t), "01:02:03" + tag) else: self.assertRaises(ValueError, str, t) def test_tzinfo_classes(self): cls = self.theclass class C1(tzinfo): def utcoffset(self, dt): return None def dst(self, dt): return None def tzname(self, dt): return None for t in (cls(1, 1, 1), cls(1, 1, 1, tzinfo=None), cls(1, 1, 1, tzinfo=C1())): self.failUnless(t.utcoffset() is None) self.failUnless(t.dst() is None) self.failUnless(t.tzname() is None) class C3(tzinfo): def utcoffset(self, dt): return timedelta(minutes=-1439) def dst(self, dt): return timedelta(minutes=1439) def tzname(self, dt): return "aname" t = cls(1, 1, 1, tzinfo=C3()) self.assertEqual(t.utcoffset(), timedelta(minutes=-1439)) self.assertEqual(t.dst(), timedelta(minutes=1439)) self.assertEqual(t.tzname(), "aname") # Wrong types. class C4(tzinfo): def utcoffset(self, dt): return "aname" def dst(self, dt): return 7 def tzname(self, dt): return 0 t = cls(1, 1, 1, tzinfo=C4()) self.assertRaises(TypeError, t.utcoffset) self.assertRaises(TypeError, t.dst) self.assertRaises(TypeError, t.tzname) # Offset out of range. class C6(tzinfo): def utcoffset(self, dt): return timedelta(hours=-24) def dst(self, dt): return timedelta(hours=24) t = cls(1, 1, 1, tzinfo=C6()) self.assertRaises(ValueError, t.utcoffset) self.assertRaises(ValueError, t.dst) # Not a whole number of minutes. class C7(tzinfo): def utcoffset(self, dt): return timedelta(seconds=61) def dst(self, dt): return timedelta(microseconds=-81) t = cls(1, 1, 1, tzinfo=C7()) self.assertRaises(ValueError, t.utcoffset) self.assertRaises(ValueError, t.dst) def test_aware_compare(self): cls = self.theclass # Ensure that utcoffset() gets ignored if the comparands have # the same tzinfo member. class OperandDependentOffset(tzinfo): def utcoffset(self, t): if t.minute < 10: # d0 and d1 equal after adjustment return timedelta(minutes=t.minute) else: # d2 off in the weeds return timedelta(minutes=59) base = cls(8, 9, 10, tzinfo=OperandDependentOffset()) d0 = base.replace(minute=3) d1 = base.replace(minute=9) d2 = base.replace(minute=11) for x in d0, d1, d2: for y in d0, d1, d2: got = cmp(x, y) expected = cmp(x.minute, y.minute) self.assertEqual(got, expected) # However, if they're different members, uctoffset is not ignored. # Note that a time can't actually have an operand-depedent offset, # though (and time.utcoffset() passes None to tzinfo.utcoffset()), # so skip this test for time. if cls is not time: d0 = base.replace(minute=3, tzinfo=OperandDependentOffset()) d1 = base.replace(minute=9, tzinfo=OperandDependentOffset()) d2 = base.replace(minute=11, tzinfo=OperandDependentOffset()) for x in d0, d1, d2: for y in d0, d1, d2: got = cmp(x, y) if (x is d0 or x is d1) and (y is d0 or y is d1): expected = 0 elif x is y is d2: expected = 0 elif x is d2: expected = -1 else: assert y is d2 expected = 1 self.assertEqual(got, expected) # Testing time objects with a non-None tzinfo. class TestTimeTZ(TestTime, TZInfoBase): theclass = time def test_empty(self): t = self.theclass() self.assertEqual(t.hour, 0) self.assertEqual(t.minute, 0) self.assertEqual(t.second, 0) self.assertEqual(t.microsecond, 0) self.failUnless(t.tzinfo is None) def test_zones(self): est = FixedOffset(-300, "EST", 1) utc = FixedOffset(0, "UTC", -2) met = FixedOffset(60, "MET", 3) t1 = time( 7, 47, tzinfo=est) t2 = time(12, 47, tzinfo=utc) t3 = time(13, 47, tzinfo=met) t4 = time(microsecond=40) t5 = time(microsecond=40, tzinfo=utc) self.assertEqual(t1.tzinfo, est) self.assertEqual(t2.tzinfo, utc) self.assertEqual(t3.tzinfo, met) self.failUnless(t4.tzinfo is None) self.assertEqual(t5.tzinfo, utc) self.assertEqual(t1.utcoffset(), timedelta(minutes=-300)) self.assertEqual(t2.utcoffset(), timedelta(minutes=0)) self.assertEqual(t3.utcoffset(), timedelta(minutes=60)) self.failUnless(t4.utcoffset() is None) self.assertRaises(TypeError, t1.utcoffset, "no args") self.assertEqual(t1.tzname(), "EST") self.assertEqual(t2.tzname(), "UTC") self.assertEqual(t3.tzname(), "MET") self.failUnless(t4.tzname() is None) self.assertRaises(TypeError, t1.tzname, "no args") self.assertEqual(t1.dst(), timedelta(minutes=1)) self.assertEqual(t2.dst(), timedelta(minutes=-2)) self.assertEqual(t3.dst(), timedelta(minutes=3)) self.failUnless(t4.dst() is None) self.assertRaises(TypeError, t1.dst, "no args") self.assertEqual(hash(t1), hash(t2)) self.assertEqual(hash(t1), hash(t3)) self.assertEqual(hash(t2), hash(t3)) self.assertEqual(t1, t2) self.assertEqual(t1, t3) self.assertEqual(t2, t3) self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive self.assertEqual(str(t1), "07:47:00-05:00") self.assertEqual(str(t2), "12:47:00+00:00") self.assertEqual(str(t3), "13:47:00+01:00") self.assertEqual(str(t4), "00:00:00.000040") self.assertEqual(str(t5), "00:00:00.000040+00:00") self.assertEqual(t1.isoformat(), "07:47:00-05:00") self.assertEqual(t2.isoformat(), "12:47:00+00:00") self.assertEqual(t3.isoformat(), "13:47:00+01:00") self.assertEqual(t4.isoformat(), "00:00:00.000040") self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00") d = 'datetime.time' self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)") self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)") self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)") self.assertEqual(repr(t4), d + "(0, 0, 0, 40)") self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)") self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"), "07:47:00 %Z=EST %z=-0500") self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000") self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100") yuck = FixedOffset(-1439, "%z %Z %%z%%Z") t1 = time(23, 59, tzinfo=yuck) self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"), "23:59 %Z='%z %Z %%z%%Z' %z='-2359'") # Check that an invalid tzname result raises an exception. class Badtzname(tzinfo): def tzname(self, dt): return 42 t = time(2, 3, 4, tzinfo=Badtzname()) self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04") self.assertRaises(TypeError, t.strftime, "%Z") def test_hash_edge_cases(self): # Offsets that overflow a basic time. t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, "")) t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, "")) self.assertEqual(hash(t1), hash(t2)) t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, "")) t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, "")) self.assertEqual(hash(t1), hash(t2)) def test_pickling(self): # Try one without a tzinfo. args = 20, 59, 16, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) # Try one with a tzinfo. tinfo = PicklableFixedOffset(-300, 'cookie') orig = self.theclass(5, 6, 7, tzinfo=tinfo) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) self.failUnless(isinstance(derived.tzinfo, PicklableFixedOffset)) self.assertEqual(derived.utcoffset(), timedelta(minutes=-300)) self.assertEqual(derived.tzname(), 'cookie') def test_more_bool(self): # Test cases with non-None tzinfo. cls = self.theclass t = cls(0, tzinfo=FixedOffset(-300, "")) self.failUnless(t) t = cls(5, tzinfo=FixedOffset(-300, "")) self.failUnless(t) t = cls(5, tzinfo=FixedOffset(300, "")) self.failUnless(not t) t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, "")) self.failUnless(not t) # Mostly ensuring this doesn't overflow internally. t = cls(0, tzinfo=FixedOffset(23*60 + 59, "")) self.failUnless(t) # But this should yield a value error -- the utcoffset is bogus. t = cls(0, tzinfo=FixedOffset(24*60, "")) self.assertRaises(ValueError, lambda: bool(t)) # Likewise. t = cls(0, tzinfo=FixedOffset(-24*60, "")) self.assertRaises(ValueError, lambda: bool(t)) def test_replace(self): cls = self.theclass z100 = FixedOffset(100, "+100") zm200 = FixedOffset(timedelta(minutes=-200), "-200") args = [1, 2, 3, 4, z100] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8), ("tzinfo", zm200)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) self.failUnless(base2.tzinfo is None) self.failUnless(base2.tzname() is None) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) self.failUnless(base.tzinfo is base3.tzinfo) # Out of bounds. base = cls(1) self.assertRaises(ValueError, base.replace, hour=24) self.assertRaises(ValueError, base.replace, minute=-1) self.assertRaises(ValueError, base.replace, second=100) self.assertRaises(ValueError, base.replace, microsecond=1000000) def test_mixed_compare(self): t1 = time(1, 2, 3) t2 = time(1, 2, 3) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=None) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(None, "")) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(0, "")) self.assertRaises(TypeError, lambda: t1 == t2) # In time w/ identical tzinfo objects, utcoffset is ignored. class Varies(tzinfo): def __init__(self): self.offset = timedelta(minutes=22) def utcoffset(self, t): self.offset += timedelta(minutes=1) return self.offset v = Varies() t1 = t2.replace(tzinfo=v) t2 = t2.replace(tzinfo=v) self.assertEqual(t1.utcoffset(), timedelta(minutes=23)) self.assertEqual(t2.utcoffset(), timedelta(minutes=24)) self.assertEqual(t1, t2) # But if they're not identical, it isn't ignored. t2 = t2.replace(tzinfo=Varies()) self.failUnless(t1 < t2) # t1's offset counter still going up def test_subclass_timetz(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.hour + self.second args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1) dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.utcoffset(), dt2.utcoffset()) self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7) # Testing datetime objects with a non-None tzinfo. class TestDateTimeTZ(TestDateTime, TZInfoBase): theclass = datetime def test_trivial(self): dt = self.theclass(1, 2, 3, 4, 5, 6, 7) self.assertEqual(dt.year, 1) self.assertEqual(dt.month, 2) self.assertEqual(dt.day, 3) self.assertEqual(dt.hour, 4) self.assertEqual(dt.minute, 5) self.assertEqual(dt.second, 6) self.assertEqual(dt.microsecond, 7) self.assertEqual(dt.tzinfo, None) def test_even_more_compare(self): # The test_compare() and test_more_compare() inherited from TestDate # and TestDateTime covered non-tzinfo cases. # Smallest possible after UTC adjustment. t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "")) # Largest possible after UTC adjustment. t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=FixedOffset(-1439, "")) # Make sure those compare correctly, and w/o overflow. self.failUnless(t1 < t2) self.failUnless(t1 != t2) self.failUnless(t2 > t1) self.failUnless(t1 == t1) self.failUnless(t2 == t2) # Equal afer adjustment. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, "")) t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, "")) self.assertEqual(t1, t2) # Change t1 not to subtract a minute, and t1 should be larger. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, "")) self.failUnless(t1 > t2) # Change t1 to subtract 2 minutes, and t1 should be smaller. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, "")) self.failUnless(t1 < t2) # Back to the original t1, but make seconds resolve it. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""), second=1) self.failUnless(t1 > t2) # Likewise, but make microseconds resolve it. t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""), microsecond=1) self.failUnless(t1 > t2) # Make t2 naive and it should fail. t2 = self.theclass.min self.assertRaises(TypeError, lambda: t1 == t2) self.assertEqual(t2, t2) # It's also naive if it has tzinfo but tzinfo.utcoffset() is None. class Naive(tzinfo): def utcoffset(self, dt): return None t2 = self.theclass(5, 6, 7, tzinfo=Naive()) self.assertRaises(TypeError, lambda: t1 == t2) self.assertEqual(t2, t2) # OTOH, it's OK to compare two of these mixing the two ways of being # naive. t1 = self.theclass(5, 6, 7) self.assertEqual(t1, t2) # Try a bogus uctoffset. class Bogus(tzinfo): def utcoffset(self, dt): return timedelta(minutes=1440) # out of bounds t1 = self.theclass(2, 2, 2, tzinfo=Bogus()) t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, "")) self.assertRaises(ValueError, lambda: t1 == t2) def test_pickling(self): # Try one without a tzinfo. args = 6, 7, 23, 20, 59, 1, 64**2 orig = self.theclass(*args) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) # Try one with a tzinfo. tinfo = PicklableFixedOffset(-300, 'cookie') orig = self.theclass(*args, **{'tzinfo': tinfo}) derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0)) for pickler, unpickler, proto in pickle_choices: green = pickler.dumps(orig, proto) derived = unpickler.loads(green) self.assertEqual(orig, derived) self.failUnless(isinstance(derived.tzinfo, PicklableFixedOffset)) self.assertEqual(derived.utcoffset(), timedelta(minutes=-300)) self.assertEqual(derived.tzname(), 'cookie') def test_extreme_hashes(self): # If an attempt is made to hash these via subtracting the offset # then hashing a datetime object, OverflowError results. The # Python implementation used to blow up here. t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "")) hash(t) t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=FixedOffset(-1439, "")) hash(t) # OTOH, an OOB offset should blow up. t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, "")) self.assertRaises(ValueError, hash, t) def test_zones(self): est = FixedOffset(-300, "EST") utc = FixedOffset(0, "UTC") met = FixedOffset(60, "MET") t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est) t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc) t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met) self.assertEqual(t1.tzinfo, est) self.assertEqual(t2.tzinfo, utc) self.assertEqual(t3.tzinfo, met) self.assertEqual(t1.utcoffset(), timedelta(minutes=-300)) self.assertEqual(t2.utcoffset(), timedelta(minutes=0)) self.assertEqual(t3.utcoffset(), timedelta(minutes=60)) self.assertEqual(t1.tzname(), "EST") self.assertEqual(t2.tzname(), "UTC") self.assertEqual(t3.tzname(), "MET") self.assertEqual(hash(t1), hash(t2)) self.assertEqual(hash(t1), hash(t3)) self.assertEqual(hash(t2), hash(t3)) self.assertEqual(t1, t2) self.assertEqual(t1, t3) self.assertEqual(t2, t3) self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00") self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00") self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00") d = 'datetime.datetime(2002, 3, 19, ' self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)") self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)") self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)") def test_combine(self): met = FixedOffset(60, "MET") d = date(2002, 3, 4) tz = time(18, 45, 3, 1234, tzinfo=met) dt = datetime.combine(d, tz) self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)) def test_extract(self): met = FixedOffset(60, "MET") dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met) self.assertEqual(dt.date(), date(2002, 3, 4)) self.assertEqual(dt.time(), time(18, 45, 3, 1234)) self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met)) def test_tz_aware_arithmetic(self): import random now = self.theclass.now() tz55 = FixedOffset(-330, "west 5:30") timeaware = now.time().replace(tzinfo=tz55) nowaware = self.theclass.combine(now.date(), timeaware) self.failUnless(nowaware.tzinfo is tz55) self.assertEqual(nowaware.timetz(), timeaware) # Can't mix aware and non-aware. self.assertRaises(TypeError, lambda: now - nowaware) self.assertRaises(TypeError, lambda: nowaware - now) # And adding datetime's doesn't make sense, aware or not. self.assertRaises(TypeError, lambda: now + nowaware) self.assertRaises(TypeError, lambda: nowaware + now) self.assertRaises(TypeError, lambda: nowaware + nowaware) # Subtracting should yield 0. self.assertEqual(now - now, timedelta(0)) self.assertEqual(nowaware - nowaware, timedelta(0)) # Adding a delta should preserve tzinfo. delta = timedelta(weeks=1, minutes=12, microseconds=5678) nowawareplus = nowaware + delta self.failUnless(nowaware.tzinfo is tz55) nowawareplus2 = delta + nowaware self.failUnless(nowawareplus2.tzinfo is tz55) self.assertEqual(nowawareplus, nowawareplus2) # that - delta should be what we started with, and that - what we # started with should be delta. diff = nowawareplus - delta self.failUnless(diff.tzinfo is tz55) self.assertEqual(nowaware, diff) self.assertRaises(TypeError, lambda: delta - nowawareplus) self.assertEqual(nowawareplus - nowaware, delta) # Make up a random timezone. tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone") # Attach it to nowawareplus. nowawareplus = nowawareplus.replace(tzinfo=tzr) self.failUnless(nowawareplus.tzinfo is tzr) # Make sure the difference takes the timezone adjustments into account. got = nowaware - nowawareplus # Expected: (nowaware base - nowaware offset) - # (nowawareplus base - nowawareplus offset) = # (nowaware base - nowawareplus base) + # (nowawareplus offset - nowaware offset) = # -delta + nowawareplus offset - nowaware offset expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta self.assertEqual(got, expected) # Try max possible difference. min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min")) max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=FixedOffset(-1439, "max")) maxdiff = max - min self.assertEqual(maxdiff, self.theclass.max - self.theclass.min + timedelta(minutes=2*1439)) def test_tzinfo_now(self): meth = self.theclass.now # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth() # Try with and without naming the keyword. off42 = FixedOffset(42, "42") another = meth(off42) again = meth(tz=off42) self.failUnless(another.tzinfo is again.tzinfo) self.assertEqual(another.utcoffset(), timedelta(minutes=42)) # Bad argument with and w/o naming the keyword. self.assertRaises(TypeError, meth, 16) self.assertRaises(TypeError, meth, tzinfo=16) # Bad keyword name. self.assertRaises(TypeError, meth, tinfo=off42) # Too many args. self.assertRaises(TypeError, meth, off42, off42) # We don't know which time zone we're in, and don't have a tzinfo # class to represent it, so seeing whether a tz argument actually # does a conversion is tricky. weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0) utc = FixedOffset(0, "utc", 0) for dummy in range(3): now = datetime.now(weirdtz) self.failUnless(now.tzinfo is weirdtz) utcnow = datetime.utcnow().replace(tzinfo=utc) now2 = utcnow.astimezone(weirdtz) if abs(now - now2) < timedelta(seconds=30): break # Else the code is broken, or more than 30 seconds passed between # calls; assuming the latter, just try again. else: # Three strikes and we're out. self.fail("utcnow(), now(tz), or astimezone() may be broken") def test_tzinfo_fromtimestamp(self): import time meth = self.theclass.fromtimestamp ts = time.time() # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth(ts) # Try with and without naming the keyword. off42 = FixedOffset(42, "42") another = meth(ts, off42) again = meth(ts, tz=off42) self.failUnless(another.tzinfo is again.tzinfo) self.assertEqual(another.utcoffset(), timedelta(minutes=42)) # Bad argument with and w/o naming the keyword. self.assertRaises(TypeError, meth, ts, 16) self.assertRaises(TypeError, meth, ts, tzinfo=16) # Bad keyword name. self.assertRaises(TypeError, meth, ts, tinfo=off42) # Too many args. self.assertRaises(TypeError, meth, ts, off42, off42) # Too few args. self.assertRaises(TypeError, meth) # Try to make sure tz= actually does some conversion. timestamp = 1000000000 utcdatetime = datetime.utcfromtimestamp(timestamp) # In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take. # But on some flavor of Mac, it's nowhere near that. So we can't have # any idea here what time that actually is, we can only test that # relative changes match. utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero tz = FixedOffset(utcoffset, "tz", 0) expected = utcdatetime + utcoffset got = datetime.fromtimestamp(timestamp, tz) self.assertEqual(expected, got.replace(tzinfo=None)) def test_tzinfo_utcnow(self): meth = self.theclass.utcnow # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth() # Try with and without naming the keyword; for whatever reason, # utcnow() doesn't accept a tzinfo argument. off42 = FixedOffset(42, "42") self.assertRaises(TypeError, meth, off42) self.assertRaises(TypeError, meth, tzinfo=off42) def test_tzinfo_utcfromtimestamp(self): import time meth = self.theclass.utcfromtimestamp ts = time.time() # Ensure it doesn't require tzinfo (i.e., that this doesn't blow up). base = meth(ts) # Try with and without naming the keyword; for whatever reason, # utcfromtimestamp() doesn't accept a tzinfo argument. off42 = FixedOffset(42, "42") self.assertRaises(TypeError, meth, ts, off42) self.assertRaises(TypeError, meth, ts, tzinfo=off42) def test_tzinfo_timetuple(self): # TestDateTime tested most of this. datetime adds a twist to the # DST flag. class DST(tzinfo): def __init__(self, dstvalue): if isinstance(dstvalue, int): dstvalue = timedelta(minutes=dstvalue) self.dstvalue = dstvalue def dst(self, dt): return self.dstvalue cls = self.theclass for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1): d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue)) t = d.timetuple() self.assertEqual(1, t.tm_year) self.assertEqual(1, t.tm_mon) self.assertEqual(1, t.tm_mday) self.assertEqual(10, t.tm_hour) self.assertEqual(20, t.tm_min) self.assertEqual(30, t.tm_sec) self.assertEqual(0, t.tm_wday) self.assertEqual(1, t.tm_yday) self.assertEqual(flag, t.tm_isdst) # dst() returns wrong type. self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple) # dst() at the edge. self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1) self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1) # dst() out of range. self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple) self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple) def test_utctimetuple(self): class DST(tzinfo): def __init__(self, dstvalue): if isinstance(dstvalue, int): dstvalue = timedelta(minutes=dstvalue) self.dstvalue = dstvalue def dst(self, dt): return self.dstvalue cls = self.theclass # This can't work: DST didn't implement utcoffset. self.assertRaises(NotImplementedError, cls(1, 1, 1, tzinfo=DST(0)).utcoffset) class UOFS(DST): def __init__(self, uofs, dofs=None): DST.__init__(self, dofs) self.uofs = timedelta(minutes=uofs) def utcoffset(self, dt): return self.uofs # Ensure tm_isdst is 0 regardless of what dst() says: DST is never # in effect for a UTC time. for dstvalue in -33, 33, 0, None: d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue)) t = d.utctimetuple() self.assertEqual(d.year, t.tm_year) self.assertEqual(d.month, t.tm_mon) self.assertEqual(d.day, t.tm_mday) self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm self.assertEqual(13, t.tm_min) self.assertEqual(d.second, t.tm_sec) self.assertEqual(d.weekday(), t.tm_wday) self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1, t.tm_yday) self.assertEqual(0, t.tm_isdst) # At the edges, UTC adjustment can normalize into years out-of-range # for a datetime object. Ensure that a correct timetuple is # created anyway. tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439)) # That goes back 1 minute less than a full day. t = tiny.utctimetuple() self.assertEqual(t.tm_year, MINYEAR-1) self.assertEqual(t.tm_mon, 12) self.assertEqual(t.tm_mday, 31) self.assertEqual(t.tm_hour, 0) self.assertEqual(t.tm_min, 1) self.assertEqual(t.tm_sec, 37) self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year self.assertEqual(t.tm_isdst, 0) huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439)) # That goes forward 1 minute less than a full day. t = huge.utctimetuple() self.assertEqual(t.tm_year, MAXYEAR+1) self.assertEqual(t.tm_mon, 1) self.assertEqual(t.tm_mday, 1) self.assertEqual(t.tm_hour, 23) self.assertEqual(t.tm_min, 58) self.assertEqual(t.tm_sec, 37) self.assertEqual(t.tm_yday, 1) self.assertEqual(t.tm_isdst, 0) def test_tzinfo_isoformat(self): zero = FixedOffset(0, "+00:00") plus = FixedOffset(220, "+03:40") minus = FixedOffset(-231, "-03:51") unknown = FixedOffset(None, "") cls = self.theclass datestr = '0001-02-03' for ofs in None, zero, plus, minus, unknown: for us in 0, 987001: d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs) timestr = '04:05:59' + (us and '.987001' or '') ofsstr = ofs is not None and d.tzname() or '' tailstr = timestr + ofsstr iso = d.isoformat() self.assertEqual(iso, datestr + 'T' + tailstr) self.assertEqual(iso, d.isoformat('T')) self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr) self.assertEqual(str(d), datestr + ' ' + tailstr) def test_replace(self): cls = self.theclass z100 = FixedOffset(100, "+100") zm200 = FixedOffset(timedelta(minutes=-200), "-200") args = [1, 2, 3, 4, 5, 6, 7, z100] base = cls(*args) self.assertEqual(base, base.replace()) i = 0 for name, newval in (("year", 2), ("month", 3), ("day", 4), ("hour", 5), ("minute", 6), ("second", 7), ("microsecond", 8), ("tzinfo", zm200)): newargs = args[:] newargs[i] = newval expected = cls(*newargs) got = base.replace(**{name: newval}) self.assertEqual(expected, got) i += 1 # Ensure we can get rid of a tzinfo. self.assertEqual(base.tzname(), "+100") base2 = base.replace(tzinfo=None) self.failUnless(base2.tzinfo is None) self.failUnless(base2.tzname() is None) # Ensure we can add one. base3 = base2.replace(tzinfo=z100) self.assertEqual(base, base3) self.failUnless(base.tzinfo is base3.tzinfo) # Out of bounds. base = cls(2000, 2, 29) self.assertRaises(ValueError, base.replace, year=2001) def test_more_astimezone(self): # The inherited test_astimezone covered some trivial and error cases. fnone = FixedOffset(None, "None") f44m = FixedOffset(44, "44") fm5h = FixedOffset(-timedelta(hours=5), "m300") dt = self.theclass.now(tz=f44m) self.failUnless(dt.tzinfo is f44m) # Replacing with degenerate tzinfo raises an exception. self.assertRaises(ValueError, dt.astimezone, fnone) # Ditto with None tz. self.assertRaises(TypeError, dt.astimezone, None) # Replacing with same tzinfo makes no change. x = dt.astimezone(dt.tzinfo) self.failUnless(x.tzinfo is f44m) self.assertEqual(x.date(), dt.date()) self.assertEqual(x.time(), dt.time()) # Replacing with different tzinfo does adjust. got = dt.astimezone(fm5h) self.failUnless(got.tzinfo is fm5h) self.assertEqual(got.utcoffset(), timedelta(hours=-5)) expected = dt - dt.utcoffset() # in effect, convert to UTC expected += fm5h.utcoffset(dt) # and from there to local time expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo self.assertEqual(got.date(), expected.date()) self.assertEqual(got.time(), expected.time()) self.assertEqual(got.timetz(), expected.timetz()) self.failUnless(got.tzinfo is expected.tzinfo) self.assertEqual(got, expected) def test_aware_subtract(self): cls = self.theclass # Ensure that utcoffset() is ignored when the operands have the # same tzinfo member. class OperandDependentOffset(tzinfo): def utcoffset(self, t): if t.minute < 10: # d0 and d1 equal after adjustment return timedelta(minutes=t.minute) else: # d2 off in the weeds return timedelta(minutes=59) base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset()) d0 = base.replace(minute=3) d1 = base.replace(minute=9) d2 = base.replace(minute=11) for x in d0, d1, d2: for y in d0, d1, d2: got = x - y expected = timedelta(minutes=x.minute - y.minute) self.assertEqual(got, expected) # OTOH, if the tzinfo members are distinct, utcoffsets aren't # ignored. base = cls(8, 9, 10, 11, 12, 13, 14) d0 = base.replace(minute=3, tzinfo=OperandDependentOffset()) d1 = base.replace(minute=9, tzinfo=OperandDependentOffset()) d2 = base.replace(minute=11, tzinfo=OperandDependentOffset()) for x in d0, d1, d2: for y in d0, d1, d2: got = x - y if (x is d0 or x is d1) and (y is d0 or y is d1): expected = timedelta(0) elif x is y is d2: expected = timedelta(0) elif x is d2: expected = timedelta(minutes=(11-59)-0) else: assert y is d2 expected = timedelta(minutes=0-(11-59)) self.assertEqual(got, expected) def test_mixed_compare(self): t1 = datetime(1, 2, 3, 4, 5, 6, 7) t2 = datetime(1, 2, 3, 4, 5, 6, 7) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=None) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(None, "")) self.assertEqual(t1, t2) t2 = t2.replace(tzinfo=FixedOffset(0, "")) self.assertRaises(TypeError, lambda: t1 == t2) # In datetime w/ identical tzinfo objects, utcoffset is ignored. class Varies(tzinfo): def __init__(self): self.offset = timedelta(minutes=22) def utcoffset(self, t): self.offset += timedelta(minutes=1) return self.offset v = Varies() t1 = t2.replace(tzinfo=v) t2 = t2.replace(tzinfo=v) self.assertEqual(t1.utcoffset(), timedelta(minutes=23)) self.assertEqual(t2.utcoffset(), timedelta(minutes=24)) self.assertEqual(t1, t2) # But if they're not identical, it isn't ignored. t2 = t2.replace(tzinfo=Varies()) self.failUnless(t1 < t2) # t1's offset counter still going up def test_subclass_datetimetz(self): class C(self.theclass): theAnswer = 42 def __new__(cls, *args, **kws): temp = kws.copy() extra = temp.pop('extra') result = self.theclass.__new__(cls, *args, **temp) result.extra = extra return result def newmeth(self, start): return start + self.hour + self.year args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1) dt1 = self.theclass(*args) dt2 = C(*args, **{'extra': 7}) self.assertEqual(dt2.__class__, C) self.assertEqual(dt2.theAnswer, 42) self.assertEqual(dt2.extra, 7) self.assertEqual(dt1.utcoffset(), dt2.utcoffset()) self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7) # Pain to set up DST-aware tzinfo classes. def first_sunday_on_or_after(dt): days_to_go = 6 - dt.weekday() if days_to_go: dt += timedelta(days_to_go) return dt ZERO = timedelta(0) HOUR = timedelta(hours=1) DAY = timedelta(days=1) # In the US, DST starts at 2am (standard time) on the first Sunday in April. DSTSTART = datetime(1, 4, 1, 2) # and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct, # which is the first Sunday on or after Oct 25. Because we view 1:MM as # being standard time on that day, there is no spelling in local time of # the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time). DSTEND = datetime(1, 10, 25, 1) class USTimeZone(tzinfo): def __init__(self, hours, reprname, stdname, dstname): self.stdoffset = timedelta(hours=hours) self.reprname = reprname self.stdname = stdname self.dstname = dstname def __repr__(self): return self.reprname def tzname(self, dt): if self.dst(dt): return self.dstname else: return self.stdname def utcoffset(self, dt): return self.stdoffset + self.dst(dt) def dst(self, dt): if dt is None or dt.tzinfo is None: # An exception instead may be sensible here, in one or more of # the cases. return ZERO assert dt.tzinfo is self # Find first Sunday in April. start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year)) assert start.weekday() == 6 and start.month == 4 and start.day <= 7 # Find last Sunday in October. end = first_sunday_on_or_after(DSTEND.replace(year=dt.year)) assert end.weekday() == 6 and end.month == 10 and end.day >= 25 # Can't compare naive to aware objects, so strip the timezone from # dt first. if start <= dt.replace(tzinfo=None) < end: return HOUR else: return ZERO Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") Central = USTimeZone(-6, "Central", "CST", "CDT") Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") Pacific = USTimeZone(-8, "Pacific", "PST", "PDT") utc_real = FixedOffset(0, "UTC", 0) # For better test coverage, we want another flavor of UTC that's west of # the Eastern and Pacific timezones. utc_fake = FixedOffset(-12*60, "UTCfake", 0) class TestTimezoneConversions(unittest.TestCase): # The DST switch times for 2002, in std time. dston = datetime(2002, 4, 7, 2) dstoff = datetime(2002, 10, 27, 1) theclass = datetime # Check a time that's inside DST. def checkinside(self, dt, tz, utc, dston, dstoff): self.assertEqual(dt.dst(), HOUR) # Conversion to our own timezone is always an identity. self.assertEqual(dt.astimezone(tz), dt) asutc = dt.astimezone(utc) there_and_back = asutc.astimezone(tz) # Conversion to UTC and back isn't always an identity here, # because there are redundant spellings (in local time) of # UTC time when DST begins: the clock jumps from 1:59:59 # to 3:00:00, and a local time of 2:MM:SS doesn't really # make sense then. The classes above treat 2:MM:SS as # daylight time then (it's "after 2am"), really an alias # for 1:MM:SS standard time. The latter form is what # conversion back from UTC produces. if dt.date() == dston.date() and dt.hour == 2: # We're in the redundant hour, and coming back from # UTC gives the 1:MM:SS standard-time spelling. self.assertEqual(there_and_back + HOUR, dt) # Although during was considered to be in daylight # time, there_and_back is not. self.assertEqual(there_and_back.dst(), ZERO) # They're the same times in UTC. self.assertEqual(there_and_back.astimezone(utc), dt.astimezone(utc)) else: # We're not in the redundant hour. self.assertEqual(dt, there_and_back) # Because we have a redundant spelling when DST begins, there is # (unforunately) an hour when DST ends that can't be spelled at all in # local time. When DST ends, the clock jumps from 1:59 back to 1:00 # again. The hour 1:MM DST has no spelling then: 1:MM is taken to be # standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be # daylight time. The hour 1:MM daylight == 0:MM standard can't be # expressed in local time. Nevertheless, we want conversion back # from UTC to mimic the local clock's "repeat an hour" behavior. nexthour_utc = asutc + HOUR nexthour_tz = nexthour_utc.astimezone(tz) if dt.date() == dstoff.date() and dt.hour == 0: # We're in the hour before the last DST hour. The last DST hour # is ineffable. We want the conversion back to repeat 1:MM. self.assertEqual(nexthour_tz, dt.replace(hour=1)) nexthour_utc += HOUR nexthour_tz = nexthour_utc.astimezone(tz) self.assertEqual(nexthour_tz, dt.replace(hour=1)) else: self.assertEqual(nexthour_tz - dt, HOUR) # Check a time that's outside DST. def checkoutside(self, dt, tz, utc): self.assertEqual(dt.dst(), ZERO) # Conversion to our own timezone is always an identity. self.assertEqual(dt.astimezone(tz), dt) # Converting to UTC and back is an identity too. asutc = dt.astimezone(utc) there_and_back = asutc.astimezone(tz) self.assertEqual(dt, there_and_back) def convert_between_tz_and_utc(self, tz, utc): dston = self.dston.replace(tzinfo=tz) # Because 1:MM on the day DST ends is taken as being standard time, # there is no spelling in tz for the last hour of daylight time. # For purposes of the test, the last hour of DST is 0:MM, which is # taken as being daylight time (and 1:MM is taken as being standard # time). dstoff = self.dstoff.replace(tzinfo=tz) for delta in (timedelta(weeks=13), DAY, HOUR, timedelta(minutes=1), timedelta(microseconds=1)): self.checkinside(dston, tz, utc, dston, dstoff) for during in dston + delta, dstoff - delta: self.checkinside(during, tz, utc, dston, dstoff) self.checkoutside(dstoff, tz, utc) for outside in dston - delta, dstoff + delta: self.checkoutside(outside, tz, utc) def test_easy(self): # Despite the name of this test, the endcases are excruciating. self.convert_between_tz_and_utc(Eastern, utc_real) self.convert_between_tz_and_utc(Pacific, utc_real) self.convert_between_tz_and_utc(Eastern, utc_fake) self.convert_between_tz_and_utc(Pacific, utc_fake) # The next is really dancing near the edge. It works because # Pacific and Eastern are far enough apart that their "problem # hours" don't overlap. self.convert_between_tz_and_utc(Eastern, Pacific) self.convert_between_tz_and_utc(Pacific, Eastern) # OTOH, these fail! Don't enable them. The difficulty is that # the edge case tests assume that every hour is representable in # the "utc" class. This is always true for a fixed-offset tzinfo # class (lke utc_real and utc_fake), but not for Eastern or Central. # For these adjacent DST-aware time zones, the range of time offsets # tested ends up creating hours in the one that aren't representable # in the other. For the same reason, we would see failures in the # Eastern vs Pacific tests too if we added 3*HOUR to the list of # offset deltas in convert_between_tz_and_utc(). # # self.convert_between_tz_and_utc(Eastern, Central) # can't work # self.convert_between_tz_and_utc(Central, Eastern) # can't work def test_tricky(self): # 22:00 on day before daylight starts. fourback = self.dston - timedelta(hours=4) ninewest = FixedOffset(-9*60, "-0900", 0) fourback = fourback.replace(tzinfo=ninewest) # 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after # 2", we should get the 3 spelling. # If we plug 22:00 the day before into Eastern, it "looks like std # time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4 # to 22:00 lands on 2:00, which makes no sense in local time (the # local clock jumps from 1 to 3). The point here is to make sure we # get the 3 spelling. expected = self.dston.replace(hour=3) got = fourback.astimezone(Eastern).replace(tzinfo=None) self.assertEqual(expected, got) # Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that # case we want the 1:00 spelling. sixutc = self.dston.replace(hour=6, tzinfo=utc_real) # Now 6:00 "looks like daylight", so the offset wrt Eastern is -4, # and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST # spelling. expected = self.dston.replace(hour=1) got = sixutc.astimezone(Eastern).replace(tzinfo=None) self.assertEqual(expected, got) # Now on the day DST ends, we want "repeat an hour" behavior. # UTC 4:MM 5:MM 6:MM 7:MM checking these # EST 23:MM 0:MM 1:MM 2:MM # EDT 0:MM 1:MM 2:MM 3:MM # wall 0:MM 1:MM 1:MM 2:MM against these for utc in utc_real, utc_fake: for tz in Eastern, Pacific: first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM # Convert that to UTC. first_std_hour -= tz.utcoffset(None) # Adjust for possibly fake UTC. asutc = first_std_hour + utc.utcoffset(None) # First UTC hour to convert; this is 4:00 when utc=utc_real & # tz=Eastern. asutcbase = asutc.replace(tzinfo=utc) for tzhour in (0, 1, 1, 2): expectedbase = self.dstoff.replace(hour=tzhour) for minute in 0, 30, 59: expected = expectedbase.replace(minute=minute) asutc = asutcbase.replace(minute=minute) astz = asutc.astimezone(tz) self.assertEqual(astz.replace(tzinfo=None), expected) asutcbase += HOUR def test_bogus_dst(self): class ok(tzinfo): def utcoffset(self, dt): return HOUR def dst(self, dt): return HOUR now = self.theclass.now().replace(tzinfo=utc_real) # Doesn't blow up. now.astimezone(ok()) # Does blow up. class notok(ok): def dst(self, dt): return None self.assertRaises(ValueError, now.astimezone, notok()) def test_fromutc(self): self.assertRaises(TypeError, Eastern.fromutc) # not enough args now = datetime.utcnow().replace(tzinfo=utc_real) self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo now = now.replace(tzinfo=Eastern) # insert correct tzinfo enow = Eastern.fromutc(now) # doesn't blow up self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type # Always converts UTC to standard time. class FauxUSTimeZone(USTimeZone): def fromutc(self, dt): return dt + self.stdoffset FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT") # UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM # EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM # EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM # Check around DST start. start = self.dston.replace(hour=4, tzinfo=Eastern) fstart = start.replace(tzinfo=FEastern) for wall in 23, 0, 1, 3, 4, 5: expected = start.replace(hour=wall) if wall == 23: expected -= timedelta(days=1) got = Eastern.fromutc(start) self.assertEqual(expected, got) expected = fstart + FEastern.stdoffset got = FEastern.fromutc(fstart) self.assertEqual(expected, got) # Ensure astimezone() calls fromutc() too. got = fstart.replace(tzinfo=utc_real).astimezone(FEastern) self.assertEqual(expected, got) start += HOUR fstart += HOUR # Check around DST end. start = self.dstoff.replace(hour=4, tzinfo=Eastern) fstart = start.replace(tzinfo=FEastern) for wall in 0, 1, 1, 2, 3, 4: expected = start.replace(hour=wall) got = Eastern.fromutc(start) self.assertEqual(expected, got) expected = fstart + FEastern.stdoffset got = FEastern.fromutc(fstart) self.assertEqual(expected, got) # Ensure astimezone() calls fromutc() too. got = fstart.replace(tzinfo=utc_real).astimezone(FEastern) self.assertEqual(expected, got) start += HOUR fstart += HOUR ############################################################################# # oddballs class Oddballs(unittest.TestCase): def test_bug_1028306(self): # Trying to compare a date to a datetime should act like a mixed- # type comparison, despite that datetime is a subclass of date. as_date = date.today() as_datetime = datetime.combine(as_date, time()) self.assert_(as_date != as_datetime) self.assert_(as_datetime != as_date) self.assert_(not as_date == as_datetime) self.assert_(not as_datetime == as_date) self.assertRaises(TypeError, lambda: as_date < as_datetime) self.assertRaises(TypeError, lambda: as_datetime < as_date) self.assertRaises(TypeError, lambda: as_date <= as_datetime) self.assertRaises(TypeError, lambda: as_datetime <= as_date) self.assertRaises(TypeError, lambda: as_date > as_datetime) self.assertRaises(TypeError, lambda: as_datetime > as_date) self.assertRaises(TypeError, lambda: as_date >= as_datetime) self.assertRaises(TypeError, lambda: as_datetime >= as_date) # Neverthelss, comparison should work with the base-class (date) # projection if use of a date method is forced. self.assert_(as_date.__eq__(as_datetime)) different_day = (as_date.day + 1) % 20 + 1 self.assert_(not as_date.__eq__(as_datetime.replace(day= different_day))) # And date should compare with other subclasses of date. If a # subclass wants to stop this, it's up to the subclass to do so. date_sc = SubclassDate(as_date.year, as_date.month, as_date.day) self.assertEqual(as_date, date_sc) self.assertEqual(date_sc, as_date) # Ditto for datetimes. datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month, as_date.day, 0, 0, 0) self.assertEqual(as_datetime, datetime_sc) self.assertEqual(datetime_sc, as_datetime) def test_suite(): allsuites = [unittest.makeSuite(klass, 'test') for klass in (TestModule, TestTZInfo, TestTimeDelta, TestDateOnly, TestDate, TestDateTime, TestTime, TestTimeTZ, TestDateTimeTZ, TestTimezoneConversions, Oddballs, ) ] return unittest.TestSuite(allsuites) def test_main(): import gc import sys thesuite = test_suite() lastrc = None while True: test_support.run_suite(thesuite) if 1: # change to 0, under a debug build, for some leak detection break gc.collect() if gc.garbage: raise SystemError("gc.garbage not empty after test run: %r" % gc.garbage) if hasattr(sys, 'gettotalrefcount'): thisrc = sys.gettotalrefcount() print >> sys.stderr, '*' * 10, 'total refs:', thisrc, if lastrc: print >> sys.stderr, 'delta:', thisrc - lastrc else: print >> sys.stderr lastrc = thisrc if __name__ == "__main__": test_main()
epl-1.0
mkhutornenko/incubator-aurora
src/main/python/apache/aurora/client/binding_helper.py
1
4930
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import abstractmethod, abstractproperty from twitter.common.lang import Interface __all__ = ( 'BindingHelper', 'CachingBindingHelper', 'apply_all', 'clear_binding_caches', 'unregister_all', ) # The registry for binding helpers. _BINDING_HELPERS = [] # TODO(wickman) Update the pydocs to remove references to common_internal components. class BindingHelper(Interface): """A component which resolves some set of pseudo-bindings in a config. Many bindings are too complex to resolve with bindings using the standard mechanisms, because they require some python computation to determine how to bind them. For example, for references like {{packer[role][pkg][version]}}, we need to talk to the packer to figure out the correct packer call for the desired cluster. A BindingHelper is responsible for resolving one of these types of pseudo-bindings. PackerBindingHelper will resolve "packer" bindings; BuildBindingHelper will resolve "build" bindings, JenkinsBindingHelper will resolve "jenkins" bindings, etc. A BindingHelper can be registered by calling "BindingHelper.register(Helper)". Instead of explicitly calling "inject" methods in populate_namespaces, it will compute the set of open bindings, and then call the appropriate helpers for each. The bindings can be computed either from scratch, or from a binding dictionary. A binding dictionary can be computed from live data, and then passed over an RPC connection, so that the bindings can be recomputed on the server. Each helper is responsible for computing its own binding dict. The data in the dict should meet two requirements: it should be enough data to allow it to produce exactly the same result as the scratch binding, and the data should provide information that makes the binding comprehensible for a human debugging a job. For example, a packer helper's binding dict should provide enough information to identify the HDFS file that should be used, but also the version number of the binary in packer, (because a human reader wants to know the version of the package, not the meaningless HDFS URL. """ @classmethod def register(cls, helper): _BINDING_HELPERS.append(helper) def apply(self, config, env=None, binding_dict=None): for match in self.matcher.match(config.raw()): self.bind(config, match, env, binding_dict or config.binding_dicts[self.name]) @abstractproperty def name(self): """Returns the name of this BindingHelper. Typically it is the first component of the matcher, e.g. if the matcher matches {{git[sha]}}, return "git".""" @abstractproperty def matcher(self): """Returns the pystachio matcher for refs that this binding helper binds.""" @abstractmethod def bind(self, config, match, env, binding_dict): """Resolves a ref, adding a binding to the config.""" class CachingBindingHelper(BindingHelper): """A binding helper implementation that caches binding results""" def __init__(self): self.cache = {} def flush_cache(self): self.cache = {} def bind(self, config, match, env, binding_dict): if match not in self.cache: self.cache[match] = self.uncached_bind(config, match, env, binding_dict) config.bind(self.cache[match]) @abstractmethod def uncached_bind(self, config, match, env, binding_dict): """Compute the binding for a ref that hasn't been seen before.""" def unregister_all(): _BINDING_HELPERS[:] = [] def apply_all(config, env=None, binding_dict=None): """Computes a set of bindings and applies them to the config. :param config: the config whose bindings need to be computed. :param env: the python environment where the configuration was evaluated. :param binding_dict: an optional dictionary containing data to be used to compute the bindings. If this is provided, then data from the dictionary should be used in preference over live data. :return: a binding dictionary with data that can be used to recompute the bindings. The config is updated in-place. """ for helper in _BINDING_HELPERS: helper.apply(config, env, binding_dict or config.binding_dicts[helper.name]) def clear_binding_caches(): """Clear the binding helper's caches for testing.""" for helper in _BINDING_HELPERS: if isinstance(helper, CachingBindingHelper): helper.flush_cache()
apache-2.0
hogarthj/ansible
lib/ansible/modules/network/nxos/nxos_interface_ospf.py
29
17076
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = ''' --- module: nxos_interface_ospf extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages configuration of an OSPF interface instance. description: - Manages configuration of an OSPF interface instance. author: Gabriele Gerbino (@GGabriele) notes: - Tested against NXOSv 7.3.(0)D1(1) on VIRL - Default, where supported, restores params default value. - To remove an existing authentication configuration you should use C(message_digest_key_id=default) plus all other options matching their existing values. - C(state=absent) removes the whole OSPF interface configuration. options: interface: description: - Name of this cisco_interface resource. Valid value is a string. required: true ospf: description: - Name of the ospf instance. required: true area: description: - Ospf area associated with this cisco_interface_ospf instance. Valid values are a string, formatted as an IP address (i.e. "0.0.0.0") or as an integer. required: true cost: description: - The cost associated with this cisco_interface_ospf instance. hello_interval: description: - Time between sending successive hello packets. Valid values are an integer or the keyword 'default'. dead_interval: description: - Time interval an ospf neighbor waits for a hello packet before tearing down adjacencies. Valid values are an integer or the keyword 'default'. passive_interface: description: - Setting to true will prevent this interface from receiving HELLO packets. type: bool message_digest: description: - Enables or disables the usage of message digest authentication. type: bool message_digest_key_id: description: - Md5 authentication key-id associated with the ospf instance. If this is present, message_digest_encryption_type, message_digest_algorithm_type and message_digest_password are mandatory. Valid value is an integer and 'default'. message_digest_algorithm_type: description: - Algorithm used for authentication among neighboring routers within an area. Valid values are 'md5' and 'default'. choices: ['md5', 'default'] message_digest_encryption_type: description: - Specifies the scheme used for encrypting message_digest_password. Valid values are '3des' or 'cisco_type_7' encryption or 'default'. choices: ['cisco_type_7','3des', 'default'] message_digest_password: description: - Specifies the message_digest password. Valid value is a string. state: description: - Determines whether the config should be present or not on the device. default: present choices: ['present','absent'] ''' EXAMPLES = ''' - nxos_interface_ospf: interface: ethernet1/32 ospf: 1 area: 1 cost: default ''' RETURN = ''' commands: description: commands sent to the device returned: always type: list sample: ["interface Ethernet1/32", "ip router ospf 1 area 0.0.0.1"] ''' import re import struct import socket from ansible.module_utils.network.nxos.nxos import get_config, load_config from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.common.config import CustomNetworkConfig BOOL_PARAMS = [ 'passive_interface', 'message_digest' ] PARAM_TO_COMMAND_KEYMAP = { 'interface': '', 'cost': 'ip ospf cost', 'ospf': 'ip router ospf', 'area': 'ip router ospf', 'hello_interval': 'ip ospf hello-interval', 'dead_interval': 'ip ospf dead-interval', 'passive_interface': 'ip ospf passive-interface', 'message_digest': 'ip ospf authentication message-digest', 'message_digest_key_id': 'ip ospf message-digest-key', 'message_digest_algorithm_type': 'ip ospf message-digest-key', 'message_digest_encryption_type': 'ip ospf message-digest-key', 'message_digest_password': 'ip ospf message-digest-key', } def get_value(arg, config, module): command = PARAM_TO_COMMAND_KEYMAP[arg] has_command = re.search(r'\s+{0}\s*$'.format(command), config, re.M) has_command_val = re.search(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M) if command == 'ip router ospf': value = '' if has_command_val: value_list = has_command_val.group('value').split() if arg == 'ospf': value = value_list[0] elif arg == 'area': value = value_list[2] value = normalize_area(value, module) elif command == 'ip ospf message-digest-key': value = '' if has_command_val: value_list = has_command_val.group('value').split() if arg == 'message_digest_key_id': value = value_list[0] elif arg == 'message_digest_algorithm_type': value = value_list[1] elif arg == 'message_digest_encryption_type': value = value_list[2] if value == '3': value = '3des' elif value == '7': value = 'cisco_type_7' elif arg == 'message_digest_password': value = value_list[3] elif arg == 'passive_interface': has_no_command = re.search(r'\s+no\s+{0}\s*$'.format(command), config, re.M) value = False if has_command and not has_no_command: value = True elif arg in BOOL_PARAMS: value = bool(has_command) else: value = '' if has_command_val: value = has_command_val.group('value') return value def get_existing(module, args): existing = {} netcfg = CustomNetworkConfig(indent=2, contents=get_config(module)) if module.params['interface'].startswith('loopback') or module.params['interface'].startswith('port-channel'): parents = ['interface {0}'.format(module.params['interface'])] else: parents = ['interface {0}'.format(module.params['interface'].capitalize())] config = netcfg.get_section(parents) if 'ospf' in config: for arg in args: if arg not in ['interface']: existing[arg] = get_value(arg, config, module) existing['interface'] = module.params['interface'] return existing def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: new_dict[new_key] = value return new_dict def get_default_commands(existing, proposed, existing_commands, key, module): commands = list() existing_value = existing_commands.get(key) if key.startswith('ip ospf message-digest-key'): check = False for param in ['message_digest_encryption_type', 'message_digest_algorithm_type', 'message_digest_password']: if existing[param] == proposed[param]: check = True if check: if existing['message_digest_encryption_type'] == '3des': encryption_type = '3' elif existing['message_digest_encryption_type'] == 'cisco_type_7': encryption_type = '7' command = 'no {0} {1} {2} {3} {4}'.format( key, existing['message_digest_key_id'], existing['message_digest_algorithm_type'], encryption_type, existing['message_digest_password']) commands.append(command) else: commands.append('no {0} {1}'.format(key, existing_value)) return commands def get_custom_command(existing_cmd, proposed, key, module): commands = list() if key == 'ip router ospf': command = '{0} {1} area {2}'.format(key, proposed['ospf'], proposed['area']) if command not in existing_cmd: commands.append(command) elif key.startswith('ip ospf message-digest-key'): if (proposed['message_digest_key_id'] != 'default' and 'options' not in key): if proposed['message_digest_encryption_type'] == '3des': encryption_type = '3' elif proposed['message_digest_encryption_type'] == 'cisco_type_7': encryption_type = '7' command = '{0} {1} {2} {3} {4}'.format( key, proposed['message_digest_key_id'], proposed['message_digest_algorithm_type'], encryption_type, proposed['message_digest_password']) commands.append(command) return commands def state_present(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) for key, value in proposed_commands.items(): if existing_commands.get(key): if key == 'ip router ospf': if proposed['area'] == existing['area']: continue if existing_commands[key] == proposed_commands[key]: continue if key == 'ip ospf passive-interface' and module.params.get('interface').upper().startswith('LO'): module.fail_json(msg='loopback interface does not support passive_interface') if value is True: commands.append(key) elif value is False: commands.append('no {0}'.format(key)) elif value == 'default': if existing_commands.get(key): commands.extend(get_default_commands(existing, proposed, existing_commands, key, module)) else: if (key == 'ip router ospf' or key.startswith('ip ospf message-digest-key')): commands.extend(get_custom_command(commands, proposed, key, module)) else: command = '{0} {1}'.format(key, value.lower()) commands.append(command) if commands: parents = ['interface {0}'.format(module.params['interface'].capitalize())] candidate.add(commands, parents=parents) def state_absent(module, existing, proposed, candidate): commands = [] parents = ['interface {0}'.format(module.params['interface'].capitalize())] existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) for key, value in existing_commands.items(): if value: if key.startswith('ip ospf message-digest-key'): if 'options' not in key: if existing['message_digest_encryption_type'] == '3des': encryption_type = '3' elif existing['message_digest_encryption_type'] == 'cisco_type_7': encryption_type = '7' command = 'no {0} {1} {2} {3} {4}'.format( key, existing['message_digest_key_id'], existing['message_digest_algorithm_type'], encryption_type, existing['message_digest_password']) commands.append(command) elif key in ['ip ospf authentication message-digest', 'ip ospf passive-interface']: if value: commands.append('no {0}'.format(key)) elif key == 'ip router ospf': command = 'no {0} {1} area {2}'.format(key, proposed['ospf'], proposed['area']) if command not in commands: commands.append(command) else: existing_value = existing_commands.get(key) commands.append('no {0} {1}'.format(key, existing_value)) candidate.add(commands, parents=parents) def normalize_area(area, module): try: area = int(area) area = socket.inet_ntoa(struct.pack('!L', area)) except ValueError: splitted_area = area.split('.') if len(splitted_area) != 4: module.fail_json(msg='Incorrect Area ID format', area=area) return area def main(): argument_spec = dict( interface=dict(required=True, type='str'), ospf=dict(required=True, type='str'), area=dict(required=True, type='str'), cost=dict(required=False, type='str'), hello_interval=dict(required=False, type='str'), dead_interval=dict(required=False, type='str'), passive_interface=dict(required=False, type='bool'), message_digest=dict(required=False, type='bool'), message_digest_key_id=dict(required=False, type='str'), message_digest_algorithm_type=dict(required=False, type='str', choices=['md5', 'default']), message_digest_encryption_type=dict(required=False, type='str', choices=['cisco_type_7', '3des', 'default']), message_digest_password=dict(required=False, type='str', no_log=True), state=dict(choices=['present', 'absent'], default='present', required=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, required_together=[['message_digest_key_id', 'message_digest_algorithm_type', 'message_digest_encryption_type', 'message_digest_password']], supports_check_mode=True) # Normalize interface input data. # # * For port-channel and loopback interfaces expection is all lower case names. # * All other interfaces the expectation is an uppercase leading character # followed by lower case characters. # if re.match(r'(port-channel|loopback)', module.params['interface'], re.I): module.params['interface'] = module.params['interface'].lower() else: module.params['interface'] = module.params['interface'].capitalize() warnings = list() check_args(module, warnings) result = {'changed': False, 'commands': [], 'warnings': warnings} for param in ['message_digest_encryption_type', 'message_digest_algorithm_type', 'message_digest_password']: if module.params[param] == 'default' and module.params['message_digest_key_id'] != 'default': module.exit_json(msg='Use message_digest_key_id=default to remove an existing authentication configuration') state = module.params['state'] args = PARAM_TO_COMMAND_KEYMAP.keys() existing = get_existing(module, args) proposed_args = dict((k, v) for k, v in module.params.items() if v is not None and k in args) proposed = {} for key, value in proposed_args.items(): if key != 'interface': if str(value).lower() == 'true': value = True elif str(value).lower() == 'false': value = False elif str(value).lower() == 'default': value = 'default' if existing.get(key) or (not existing.get(key) and value): proposed[key] = value proposed['area'] = normalize_area(proposed['area'], module) if 'hello_interval' in proposed and proposed['hello_interval'] == '10': proposed['hello_interval'] = 'default' candidate = CustomNetworkConfig(indent=3) if state == 'present': state_present(module, existing, proposed, candidate) elif state == 'absent' and existing.get('ospf') == proposed['ospf'] and existing.get('area') == proposed['area']: state_absent(module, existing, proposed, candidate) if candidate: candidate = candidate.items_text() load_config(module, candidate) result['changed'] = True result['commands'] = candidate module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
sublime1809/django
tests/generic_views/test_detail.py
19
5272
from __future__ import unicode_literals from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.test import TestCase, override_settings from django.views.generic.base import View from .models import Artist, Author, Page @override_settings(ROOT_URLCONF='generic_views.urls') class DetailViewTest(TestCase): fixtures = ['generic-views-test-data.json'] def test_simple_object(self): res = self.client.get('/detail/obj/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], {'foo': 'bar'}) self.assertIsInstance(res.context['view'], View) self.assertTemplateUsed(res, 'generic_views/detail.html') def test_detail_by_pk(self): res = self.client.get('/detail/author/1/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=1)) self.assertEqual(res.context['author'], Author.objects.get(pk=1)) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_missing_object(self): res = self.client.get('/detail/author/500/') self.assertEqual(res.status_code, 404) def test_detail_object_does_not_exist(self): self.assertRaises(ObjectDoesNotExist, self.client.get, '/detail/doesnotexist/1/') def test_detail_by_custom_pk(self): res = self.client.get('/detail/author/bycustompk/1/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=1)) self.assertEqual(res.context['author'], Author.objects.get(pk=1)) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_slug(self): res = self.client.get('/detail/author/byslug/scott-rosenberg/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg')) self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg')) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_custom_slug(self): res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg')) self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg')) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_verbose_name(self): res = self.client.get('/detail/artist/1/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Artist.objects.get(pk=1)) self.assertEqual(res.context['artist'], Artist.objects.get(pk=1)) self.assertTemplateUsed(res, 'generic_views/artist_detail.html') def test_template_name(self): res = self.client.get('/detail/author/1/template_name/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=1)) self.assertEqual(res.context['author'], Author.objects.get(pk=1)) self.assertTemplateUsed(res, 'generic_views/about.html') def test_template_name_suffix(self): res = self.client.get('/detail/author/1/template_name_suffix/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=1)) self.assertEqual(res.context['author'], Author.objects.get(pk=1)) self.assertTemplateUsed(res, 'generic_views/author_view.html') def test_template_name_field(self): res = self.client.get('/detail/page/1/field/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Page.objects.get(pk=1)) self.assertEqual(res.context['page'], Page.objects.get(pk=1)) self.assertTemplateUsed(res, 'generic_views/page_template.html') def test_context_object_name(self): res = self.client.get('/detail/author/1/context_object_name/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=1)) self.assertEqual(res.context['thingy'], Author.objects.get(pk=1)) self.assertFalse('author' in res.context) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_duplicated_context_object_name(self): res = self.client.get('/detail/author/1/dupe_context_object_name/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(pk=1)) self.assertFalse('author' in res.context) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_invalid_url(self): self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/') def test_invalid_queryset(self): self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/') def test_non_model_object_with_meta(self): res = self.client.get('/detail/nonmodel/1/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'].id, "non_model_1")
bsd-3-clause
pombredanne/drf-generators
drf_generators/templates/viewset.py
2
2003
__all__ = ['VIEW_SET_URL', 'VIEW_SET_VIEW'] VIEW_SET_URL = """from rest_framework.routers import SimpleRouter from {{ app }} import views router = SimpleRouter() {% for model in models %} router.register(r'{{ model | lower }}', views.{{ model }}ViewSet, '{{model}}'){% endfor %} urlpatterns = router.urls """ VIEW_SET_VIEW = """from django.shortcuts import get_object_or_404 from rest_framework.viewsets import ViewSet from rest_framework.response import Response from {{ app }}.serializers import {{ serializers|join:', ' }} from {{ app }}.models import {{ models|join:', ' }} {% for model in models %} class {{ model }}ViewSet(ViewSet): def list(self, request): queryset = {{ model }}.objects.all() serializer = {{ model }}Serializer(queryset, many=True) return Response(serializer.data) def create(self, request): serializer = {{ model }}Serializer(data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=201) return Response(serializer.errors, status=400) def retrieve(self, request, pk=None): queryset = {{ model }}.objects.all() item = get_object_or_404(queryset, pk=pk) serializer = {{ model }}Serializer(item) return Response(serializer.data) def update(self, request, pk=None): try: item = {{ model }}.objects.get(pk=pk) except {{ model }}.DoesNotExist: return Response(status=404) serializer = {{ model }}Serializer(item, data=request.data) if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=400) def destroy(self, request, pk=None): try: item = {{ model }}.objects.get(pk=pk) except {{ model }}.DoesNotExist: return Response(status=404) item.delete() return Response(status=204) {% endfor %}"""
mit
mioann47/mobile-app-privacy-analyzer
mypythonscripts/AndroBugs_Framework-master/tools/modified/androguard/core/analysis/ganalysis.py
7
112699
# This file is part of Androguard. # # Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from xml.sax.saxutils import escape from tools.modified.androguard.core import bytecode from tools.modified.androguard.core.bytecodes.dvm_permissions import DVM_PERMISSIONS from tools.modified.androguard.core.analysis.risk import PERMISSIONS_RISK, INTERNET_RISK, PRIVACY_RISK, PHONE_RISK, SMS_RISK, MONEY_RISK from tools.modified.androguard.core.analysis.analysis import PathVar, TAINTED_PACKAGE_CREATE """Base class for undirected graphs. The Graph class allows any hashable object as a node and can associate key/value attribute pairs with each undirected edge. Self-loops are allowed but multiple edges are not (see MultiGraph). For directed graphs see DiGraph and MultiDiGraph. """ # Copyright (C) 2004-2011 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. from copy import deepcopy __author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)', 'Pieter Swart (swart@lanl.gov)', 'Dan Schult(dschult@colgate.edu)']) class Graph(object): """ Base class for undirected graphs. A Graph stores nodes and edges with optional data, or attributes. Graphs hold undirected edges. Self loops are allowed but multiple (parallel) edges are not. Nodes can be arbitrary (hashable) Python objects with optional key/value attributes. Edges are represented as links between nodes with optional key/value attributes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a NumPy matrix or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to graph as key=value pairs. See Also -------- DiGraph MultiGraph MultiDiGraph Examples -------- Create an empty graph structure (a "null graph") with no nodes and no edges. >>> G = nx.Graph() G can be grown in several ways. **Nodes:** Add one node at a time: >>> G.add_node(1) Add the nodes from any container (a list, dict, set or even the lines from a file or the nodes from another graph). >>> G.add_nodes_from([2,3]) >>> G.add_nodes_from(range(100,110)) >>> H=nx.Graph() >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) >>> G.add_nodes_from(H) In addition to strings and integers any hashable Python object (except None) can represent a node, e.g. a customized node object, or even another Graph. >>> G.add_node(H) **Edges:** G can also be grown by adding edges. Add one edge, >>> G.add_edge(1, 2) a list of edges, >>> G.add_edges_from([(1,2),(1,3)]) or a collection of edges, >>> G.add_edges_from(H.edges()) If some edges connect nodes not yet in the graph, the nodes are added automatically. There are no errors when adding nodes or edges that already exist. **Attributes:** Each graph, node, and edge can hold key/value attribute pairs in an associated attribute dictionary (the keys must be hashable). By default these are empty, but can be added or changed using add_edge, add_node or direct manipulation of the attribute dictionaries named graph, node and edge respectively. >>> G = nx.Graph(day="Friday") >>> G.graph {'day': 'Friday'} Add node attributes using add_node(), add_nodes_from() or G.node >>> G.add_node(1, time='5pm') >>> G.add_nodes_from([3], time='2pm') >>> G.node[1] {'time': '5pm'} >>> G.node[1]['room'] = 714 >>> del G.node[1]['room'] # remove attribute >>> G.nodes(data=True) [(1, {'time': '5pm'}), (3, {'time': '2pm'})] Warning: adding a node to G.node does not add it to the graph. Add edge attributes using add_edge(), add_edges_from(), subscript notation, or G.edge. >>> G.add_edge(1, 2, weight=4.7 ) >>> G.add_edges_from([(3,4),(4,5)], color='red') >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) >>> G[1][2]['weight'] = 4.7 >>> G.edge[1][2]['weight'] = 4 **Shortcuts:** Many common graph features allow python syntax to speed reporting. >>> 1 in G # check if node in graph True >>> [n for n in G if n<3] # iterate through nodes [1, 2] >>> len(G) # number of nodes in graph 5 >>> G[1] # adjacency dict keyed by neighbor to edge attributes ... # Note: you should not change this dict manually! {2: {'color': 'blue', 'weight': 4}} The fastest way to traverse all edges of a graph is via adjacency_iter(), but the edges() method is often more convenient. >>> for n,nbrsdict in G.adjacency_iter(): ... for nbr,eattr in nbrsdict.items(): ... if 'weight' in eattr: ... (n,nbr,eattr['weight']) (1, 2, 4) (2, 1, 4) (2, 3, 8) (3, 2, 8) >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ] [(1, 2, 4), (2, 3, 8)] **Reporting:** Simple graph information is obtained using methods. Iterator versions of many reporting methods exist for efficiency. Methods exist for reporting nodes(), edges(), neighbors() and degree() as well as the number of nodes and edges. For details on these and other miscellaneous methods, see below. """ def __init__(self, data=None, **attr): """Initialize a graph with edges, name, graph attributes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a NumPy matrix or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. name : string, optional (default='') An optional name for the graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to graph as key=value pairs. See Also -------- convert Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G = nx.Graph(name='my graph') >>> e = [(1,2),(2,3),(3,4)] # list of edges >>> G = nx.Graph(e) Arbitrary graph attribute pairs (key=value) may be assigned >>> G=nx.Graph(e, day="Friday") >>> G.graph {'day': 'Friday'} """ self.graph = {} # dictionary for graph attributes self.node = {} # empty node dict (created before convert) self.adj = {} # empty adjacency dict # attempt to load graph with data if data is not None: convert.to_networkx_graph(data,create_using=self) # load graph attributes (must be after convert) self.graph.update(attr) self.edge = self.adj @property def name(self): return self.graph.get('name','') @name.setter def name(self, s): self.graph['name']=s def __str__(self): """Return the graph name. Returns ------- name : string The name of the graph. Examples -------- >>> G = nx.Graph(name='foo') >>> str(G) 'foo' """ return self.name def __iter__(self): """Iterate over the nodes. Use the expression 'for n in G'. Returns ------- niter : iterator An iterator over all nodes in the graph. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) """ return iter(self.node) def __contains__(self,n): """Return True if n is a node, False otherwise. Use the expression 'n in G'. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> 1 in G True """ try: return n in self.node except TypeError: return False def __len__(self): """Return the number of nodes. Use the expression 'len(G)'. Returns ------- nnodes : int The number of nodes in the graph. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> len(G) 4 """ return len(self.node) def __getitem__(self, n): """Return a dict of neighbors of node n. Use the expression 'G[n]'. Parameters ---------- n : node A node in the graph. Returns ------- adj_dict : dictionary The adjacency dictionary for nodes connected to n. Notes ----- G[n] is similar to G.neighbors(n) but the internal data dictionary is returned instead of a list. Assigning G[n] will corrupt the internal graph data structure. Use G[n] for reading data only. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G[0] {1: {}} """ return self.adj[n] def add_node(self, n, attr_dict=None, **attr): """Add a single node n and update node attributes. Parameters ---------- n : node A node can be any hashable Python object except None. attr_dict : dictionary, optional (default= no attributes) Dictionary of node attributes. Key/value pairs will update existing data associated with the node. attr : keyword arguments, optional Set or change attributes using key=value. See Also -------- add_nodes_from Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_node(1) >>> G.add_node('Hello') >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) >>> G.add_node(K3) >>> G.number_of_nodes() 3 Use keywords set/change node attributes: >>> G.add_node(1,size=10) >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) Notes ----- A hashable object is one that can be used as a key in a Python dictionary. This includes strings, numbers, tuples of strings and numbers, etc. On many platforms hashable items also include mutables such as NetworkX Graphs, though one should be careful that the hash doesn't change on mutables. """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") if n not in self.node: self.adj[n] = {} self.node[n] = attr_dict else: # update attr even if node already exists self.node[n].update(attr_dict) def add_nodes_from(self, nodes, **attr): """Add multiple nodes. Parameters ---------- nodes : iterable container A container of nodes (list, dict, set, etc.). OR A container of (node, attribute dict) tuples. Node attributes are updated using the attribute dict. attr : keyword arguments, optional (default= no attributes) Update attributes for all nodes in nodes. Node attributes specified in nodes as a tuple take precedence over attributes specified generally. See Also -------- add_node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_nodes_from('Hello') >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) >>> G.add_nodes_from(K3) >>> sorted(G.nodes(),key=str) [0, 1, 2, 'H', 'e', 'l', 'o'] Use keywords to update specific node attributes for every node. >>> G.add_nodes_from([1,2], size=10) >>> G.add_nodes_from([3,4], weight=0.4) Use (node, attrdict) tuples to update attributes for specific nodes. >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) >>> G.node[1]['size'] 11 >>> H = nx.Graph() >>> H.add_nodes_from(G.nodes(data=True)) >>> H.node[1]['size'] 11 """ for n in nodes: try: newnode=n not in self.node except TypeError: nn,ndict = n if nn not in self.node: self.adj[nn] = {} newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict else: olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) continue if newnode: self.adj[n] = {} self.node[n] = attr.copy() else: self.node[n].update(attr) def remove_node(self,n): """Remove node n. Removes the node n and all adjacent edges. Attempting to remove a non-existent node will raise an exception. Parameters ---------- n : node A node in the graph Raises ------- NetworkXError If n is not in the graph. See Also -------- remove_nodes_from Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> G.edges() [(0, 1), (1, 2)] >>> G.remove_node(1) >>> G.edges() [] """ adj = self.adj try: nbrs = list(adj[n].keys()) # keys handles self-loops (allow mutation later) del self.node[n] except KeyError: # NetworkXError if n not in self raise NetworkXError("The node %s is not in the graph."%(n,)) for u in nbrs: del adj[u][n] # remove all edges n-u in graph del adj[n] # now remove node def remove_nodes_from(self, nodes): """Remove multiple nodes. Parameters ---------- nodes : iterable container A container of nodes (list, dict, set, etc.). If a node in the container is not in the graph it is silently ignored. See Also -------- remove_node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> e = G.nodes() >>> e [0, 1, 2] >>> G.remove_nodes_from(e) >>> G.nodes() [] """ adj = self.adj for n in nodes: try: del self.node[n] for u in list(adj[n].keys()): # keys() handles self-loops del adj[u][n] #(allows mutation of dict in loop) del adj[n] except KeyError: pass def nodes_iter(self, data=False): """Return an iterator over the nodes. Parameters ---------- data : boolean, optional (default=False) If False the iterator returns nodes. If True return a two-tuple of node and node data dictionary Returns ------- niter : iterator An iterator over nodes. If data=True the iterator gives two-tuples containing (node, node data, dictionary) Notes ----- If the node data is not required it is simpler and equivalent to use the expression 'for n in G'. >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> [d for n,d in G.nodes_iter(data=True)] [{}, {}, {}] """ if data: return iter(self.node.items()) return iter(self.node) def nodes(self, data=False): """Return a list of the nodes in the graph. Parameters ---------- data : boolean, optional (default=False) If False return a list of nodes. If True return a two-tuple of node and node data dictionary Returns ------- nlist : list A list of nodes. If data=True a list of two-tuples containing (node, node data dictionary). Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> G.nodes() [0, 1, 2] >>> G.add_node(1, time='5pm') >>> G.nodes(data=True) [(0, {}), (1, {'time': '5pm'}), (2, {})] """ return list(self.nodes_iter(data=data)) def number_of_nodes(self): """Return the number of nodes in the graph. Returns ------- nnodes : int The number of nodes in the graph. See Also -------- order, __len__ which are identical Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> len(G) 3 """ return len(self.node) def order(self): """Return the number of nodes in the graph. Returns ------- nnodes : int The number of nodes in the graph. See Also -------- number_of_nodes, __len__ which are identical """ return len(self.node) def has_node(self, n): """Return True if the graph contains the node n. Parameters ---------- n : node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> G.has_node(0) True It is more readable and simpler to use >>> 0 in G True """ try: return n in self.node except TypeError: return False def add_edge(self, u, v, attr_dict=None, **attr): """Add an edge between u and v. The nodes u and v will be automatically added if they are not already in the graph. Edge attributes can be specified with keywords or by providing a dictionary with key/value pairs. See examples below. Parameters ---------- u,v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with the edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edges_from : add a collection of edges Notes ----- Adding an edge that already exists updates the edge data. Many NetworkX algorithms designed for weighted graphs use as the edge weight a numerical value assigned to a keyword which by default is 'weight'. Examples -------- The following all add the edge e=(1,2) to graph G: >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> e = (1,2) >>> G.add_edge(1, 2) # explicit two-node form >>> G.add_edge(*e) # single edge as tuple of two nodes >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container Associate data to edges using keywords: >>> G.add_edge(1, 2, weight=3) >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) """ # set up attribute dictionary if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") # add nodes if u not in self.node: self.adj[u] = {} self.node[u] = {} if v not in self.node: self.adj[v] = {} self.node[v] = {} # add the edge datadict=self.adj[u].get(v,{}) datadict.update(attr_dict) self.adj[u][v] = datadict self.adj[v][u] = datadict def add_edges_from(self, ebunch, attr_dict=None, **attr): """Add all the edges in ebunch. Parameters ---------- ebunch : container of edges Each edge given in the container will be added to the graph. The edges must be given as as 2-tuples (u,v) or 3-tuples (u,v,d) where d is a dictionary containing edge data. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with each edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples >>> e = zip(range(0,3),range(1,4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1,2),(2,3)], weight=3) >>> G.add_edges_from([(3,4),(1,4)], label='WN2898') """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") # process ebunch for e in ebunch: ne=len(e) if ne==3: u,v,dd = e elif ne==2: u,v = e dd = {} else: raise NetworkXError(\ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,)) if u not in self.node: self.adj[u] = {} self.node[u] = {} if v not in self.node: self.adj[v] = {} self.node[v] = {} datadict=self.adj[u].get(v,{}) datadict.update(attr_dict) datadict.update(dd) self.adj[u][v] = datadict self.adj[v][u] = datadict def add_weighted_edges_from(self, ebunch, weight='weight', **attr): """Add all the edges in ebunch as weighted edges with specified weights. Parameters ---------- ebunch : container of edges Each edge given in the list or container will be added to the graph. The edges must be given as 3-tuples (u,v,w) where w is a number. weight : string, optional (default= 'weight') The attribute name for the edge weights to be added. attr : keyword arguments, optional (default= no attributes) Edge attributes to add/update for all edges. See Also -------- add_edge : add a single edge add_edges_from : add multiple edges Notes ----- Adding the same edge twice for Graph/DiGraph simply updates the edge data. For MultiGraph/MultiDiGraph, duplicate edges are stored. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)]) """ self.add_edges_from(((u,v,{weight:d}) for u,v,d in ebunch),**attr) def remove_edge(self, u, v): """Remove the edge between u and v. Parameters ---------- u,v: nodes Remove the edge between nodes u and v. Raises ------ NetworkXError If there is not an edge between u and v. See Also -------- remove_edges_from : remove a collection of edges Examples -------- >>> G = nx.Graph() # or DiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.remove_edge(0,1) >>> e = (1,2) >>> G.remove_edge(*e) # unpacks e from an edge tuple >>> e = (2,3,{'weight':7}) # an edge with attribute data >>> G.remove_edge(*e[:2]) # select first part of edge tuple """ try: del self.adj[u][v] if u != v: # self-loop needs only one entry removed del self.adj[v][u] except KeyError: raise NetworkXError("The edge %s-%s is not in the graph"%(u,v)) def remove_edges_from(self, ebunch): """Remove all edges specified in ebunch. Parameters ---------- ebunch: list or container of edge tuples Each edge given in the list or container will be removed from the graph. The edges can be: - 2-tuples (u,v) edge between u and v. - 3-tuples (u,v,k) where k is ignored. See Also -------- remove_edge : remove a single edge Notes ----- Will fail silently if an edge in ebunch is not in the graph. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> ebunch=[(1,2),(2,3)] >>> G.remove_edges_from(ebunch) """ adj=self.adj for e in ebunch: u,v = e[:2] # ignore edge data if present if u in adj and v in adj[u]: del adj[u][v] if u != v: # self loop needs only one entry removed del adj[v][u] def has_edge(self, u, v): """Return True if the edge (u,v) is in the graph. Parameters ---------- u,v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. Returns ------- edge_ind : bool True if edge is in the graph, False otherwise. Examples -------- Can be called either using two nodes u,v or edge tuple (u,v) >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.has_edge(0,1) # using two nodes True >>> e = (0,1) >>> G.has_edge(*e) # e is a 2-tuple (u,v) True >>> e = (0,1,{'weight':7}) >>> G.has_edge(*e[:2]) # e is a 3-tuple (u,v,data_dictionary) True The following syntax are all equivalent: >>> G.has_edge(0,1) True >>> 1 in G[0] # though this gives KeyError if 0 not in G True """ try: return v in self.adj[u] except KeyError: return False def neighbors(self, n): """Return a list of the nodes connected to the node n. Parameters ---------- n : node A node in the graph Returns ------- nlist : list A list of nodes that are adjacent to n. Raises ------ NetworkXError If the node n is not in the graph. Notes ----- It is usually more convenient (and faster) to access the adjacency dictionary as G[n]: >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edge('a','b',weight=7) >>> G['a'] {'b': {'weight': 7}} Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.neighbors(0) [1] """ try: return list(self.adj[n]) except KeyError: raise NetworkXError("The node %s is not in the graph."%(n,)) def neighbors_iter(self, n): """Return an iterator over all neighbors of node n. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> [n for n in G.neighbors_iter(0)] [1] Notes ----- It is faster to use the idiom "in G[0]", e.g. >>> G = nx.path_graph(4) >>> [n for n in G[0]] [1] """ try: return iter(self.adj[n]) except KeyError: raise NetworkXError("The node %s is not in the graph."%(n,)) def edges(self, nbunch=None, data=False): """Return a list of edges. Edges are returned as tuples with optional data in the order (node, neighbor, data). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. data : bool, optional (default=False) Return two tuples (u,v) (False) or three-tuples (u,v,data) (True). Returns -------- edge_list: list of edge tuples Edges that are adjacent to any node in nbunch, or a list of all edges if nbunch is not specified. See Also -------- edges_iter : return an iterator over the edges Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-edges. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.edges() [(0, 1), (1, 2), (2, 3)] >>> G.edges(data=True) # default edge data is {} (empty dictionary) [(0, 1, {}), (1, 2, {}), (2, 3, {})] >>> G.edges([0,3]) [(0, 1), (3, 2)] >>> G.edges(0) [(0, 1)] """ return list(self.edges_iter(nbunch, data)) def edges_iter(self, nbunch=None, data=False): """Return an iterator over the edges. Edges are returned as tuples with optional data in the order (node, neighbor, data). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. data : bool, optional (default=False) If True, return edge attribute dict in 3-tuple (u,v,data). Returns ------- edge_iter : iterator An iterator of (u,v) or (u,v,d) tuples of edges. See Also -------- edges : return a list of edges Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-edges. Examples -------- >>> G = nx.Graph() # or MultiGraph, etc >>> G.add_path([0,1,2,3]) >>> [e for e in G.edges_iter()] [(0, 1), (1, 2), (2, 3)] >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) [(0, 1, {}), (1, 2, {}), (2, 3, {})] >>> list(G.edges_iter([0,3])) [(0, 1), (3, 2)] >>> list(G.edges_iter(0)) [(0, 1)] """ seen={} # helper dict to keep track of multiply stored edges if nbunch is None: nodes_nbrs = self.adj.items() else: nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) if data: for n,nbrs in nodes_nbrs: for nbr,data in nbrs.items(): if nbr not in seen: yield (n,nbr,data) seen[n]=1 else: for n,nbrs in nodes_nbrs: for nbr in nbrs: if nbr not in seen: yield (n,nbr) seen[n] = 1 del seen def get_edge_data(self, u, v, default=None): """Return the attribute dictionary associated with edge (u,v). Parameters ---------- u,v : nodes default: any Python object (default=None) Value to return if the edge (u,v) is not found. Returns ------- edge_dict : dictionary The edge attribute dictionary. Notes ----- It is faster to use G[u][v]. >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G[0][1] {} Warning: Assigning G[u][v] corrupts the graph data structure. But it is safe to assign attributes to that dictionary, >>> G[0][1]['weight'] = 7 >>> G[0][1]['weight'] 7 >>> G[1][0]['weight'] 7 Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.get_edge_data(0,1) # default edge data is {} {} >>> e = (0,1) >>> G.get_edge_data(*e) # tuple form {} >>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0 0 """ try: return self.adj[u][v] except KeyError: return default def adjacency_list(self): """Return an adjacency list representation of the graph. The output adjacency list is in the order of G.nodes(). For directed graphs, only outgoing adjacencies are included. Returns ------- adj_list : lists of lists The adjacency structure of the graph as a list of lists. See Also -------- adjacency_iter Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.adjacency_list() # in order given by G.nodes() [[1], [0, 2], [1, 3], [2]] """ return list(map(list,iter(self.adj.values()))) def adjacency_iter(self): """Return an iterator of (node, adjacency dict) tuples for all nodes. This is the fastest way to look at every edge. For directed graphs, only outgoing adjacencies are included. Returns ------- adj_iter : iterator An iterator of (node, adjacency dictionary) for all nodes in the graph. See Also -------- adjacency_list Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> [(n,nbrdict) for n,nbrdict in G.adjacency_iter()] [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})] """ return iter(self.adj.items()) def degree(self, nbunch=None, weight=None): """Return the degree of a node or nodes. The node degree is the number of edges adjacent to that node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd : dictionary, or number A dictionary with nodes as keys and degree as values or a number if a single node is specified. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.degree(0) 1 >>> G.degree([0,1]) {0: 1, 1: 2} >>> list(G.degree([0,1]).values()) [1, 2] """ if nbunch in self: # return a single node return next(self.degree_iter(nbunch,weight))[1] else: # return a dict return dict(self.degree_iter(nbunch,weight)) def degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, degree). The node degree is the number of edges adjacent to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, degree). See Also -------- degree Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> list(G.degree_iter(0)) # node 0 with degree 1 [(0, 1)] >>> list(G.degree_iter([0,1])) [(0, 1), (1, 2)] """ if nbunch is None: nodes_nbrs = self.adj.items() else: nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree) else: # edge weighted graph - degree is sum of nbr edge weights for n,nbrs in nodes_nbrs: yield (n, sum((nbrs[nbr].get(weight,1) for nbr in nbrs)) + (n in nbrs and nbrs[n].get(weight,1))) def clear(self): """Remove all nodes and edges from the graph. This also removes the name, and all graph, node, and edge attributes. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.clear() >>> G.nodes() [] >>> G.edges() [] """ self.name = '' self.adj.clear() self.node.clear() self.graph.clear() def copy(self): """Return a copy of the graph. Returns ------- G : Graph A copy of the graph. See Also -------- to_directed: return a directed copy of the graph. Notes ----- This makes a complete copy of the graph including all of the node or edge attributes. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> H = G.copy() """ return deepcopy(self) def is_multigraph(self): """Return True if graph is a multigraph, False otherwise.""" return False def is_directed(self): """Return True if graph is directed, False otherwise.""" return False def to_directed(self): """Return a directed representation of the graph. Returns ------- G : DiGraph A directed graph with the same name, same nodes, and with each edge (u,v,data) replaced by two directed edges (u,v,data) and (v,u,data). Notes ----- This returns a "deepcopy" of the edge, node, and graph attributes which attempts to completely copy all of the data and references. This is in contrast to the similar D=DiGraph(G) which returns a shallow copy of the data. See the Python copy module for more information on shallow and deep copies, http://docs.python.org/library/copy.html. Examples -------- >>> G = nx.Graph() # or MultiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1), (1, 0)] If already directed, return a (deep) copy >>> G = nx.DiGraph() # or MultiDiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1)] """ from networkx import DiGraph G=DiGraph() G.name=self.name G.add_nodes_from(self) G.add_edges_from( ((u,v,deepcopy(data)) for u,nbrs in self.adjacency_iter() for v,data in nbrs.items()) ) G.graph=deepcopy(self.graph) G.node=deepcopy(self.node) return G def to_undirected(self): """Return an undirected copy of the graph. Returns ------- G : Graph/MultiGraph A deepcopy of the graph. See Also -------- copy, add_edge, add_edges_from Notes ----- This returns a "deepcopy" of the edge, node, and graph attributes which attempts to completely copy all of the data and references. This is in contrast to the similar G=DiGraph(D) which returns a shallow copy of the data. See the Python copy module for more information on shallow and deep copies, http://docs.python.org/library/copy.html. Examples -------- >>> G = nx.Graph() # or MultiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1), (1, 0)] >>> G2 = H.to_undirected() >>> G2.edges() [(0, 1)] """ return deepcopy(self) def subgraph(self, nbunch): """Return the subgraph induced on nodes in nbunch. The induced subgraph of the graph contains the nodes in nbunch and the edges between those nodes. Parameters ---------- nbunch : list, iterable A container of nodes which will be iterated through once. Returns ------- G : Graph A subgraph of the graph with the same edge attributes. Notes ----- The graph, edge or node attributes just point to the original graph. So changes to the node or edge structure will not be reflected in the original graph while changes to the attributes will. To create a subgraph with its own copy of the edge/node attributes use: nx.Graph(G.subgraph(nbunch)) If edge attributes are containers, a deep copy can be obtained using: G.subgraph(nbunch).copy() For an inplace reduction of a graph to a subgraph you can remove nodes: G.remove_nodes_from([ n in G if n not in set(nbunch)]) Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> H = G.subgraph([0,1,2]) >>> H.edges() [(0, 1), (1, 2)] """ bunch =self.nbunch_iter(nbunch) # create new graph and copy subgraph into it H = self.__class__() # copy node and attribute dictionaries for n in bunch: H.node[n]=self.node[n] # namespace shortcuts for speed H_adj=H.adj self_adj=self.adj # add nodes and edges (undirected method) for n in H.node: Hnbrs={} H_adj[n]=Hnbrs for nbr,d in self_adj[n].items(): if nbr in H_adj: # add both representations of edge: n-nbr and nbr-n Hnbrs[nbr]=d H_adj[nbr][n]=d H.graph=self.graph return H def nodes_with_selfloops(self): """Return a list of nodes with self loops. A node with a self loop has an edge with both ends adjacent to that node. Returns ------- nodelist : list A list of nodes with self loops. See Also -------- selfloop_edges, number_of_selfloops Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edge(1,1) >>> G.add_edge(1,2) >>> G.nodes_with_selfloops() [1] """ return [ n for n,nbrs in self.adj.items() if n in nbrs ] def selfloop_edges(self, data=False): """Return a list of selfloop edges. A selfloop edge has the same node at both ends. Parameters ----------- data : bool, optional (default=False) Return selfloop edges as two tuples (u,v) (data=False) or three-tuples (u,v,data) (data=True) Returns ------- edgelist : list of edge tuples A list of all selfloop edges. See Also -------- nodes_with_selfloops, number_of_selfloops Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edge(1,1) >>> G.add_edge(1,2) >>> G.selfloop_edges() [(1, 1)] >>> G.selfloop_edges(data=True) [(1, 1, {})] """ if data: return [ (n,n,nbrs[n]) for n,nbrs in self.adj.items() if n in nbrs ] else: return [ (n,n) for n,nbrs in self.adj.items() if n in nbrs ] def number_of_selfloops(self): """Return the number of selfloop edges. A selfloop edge has the same node at both ends. Returns ------- nloops : int The number of selfloops. See Also -------- nodes_with_selfloops, selfloop_edges Examples -------- >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edge(1,1) >>> G.add_edge(1,2) >>> G.number_of_selfloops() 1 """ return len(self.selfloop_edges()) def size(self, weight=None): """Return the number of edges. Parameters ---------- weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. Returns ------- nedges : int The number of edges of sum of edge weights in the graph. See Also -------- number_of_edges Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.size() 3 >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edge('a','b',weight=2) >>> G.add_edge('b','c',weight=4) >>> G.size() 2 >>> G.size(weight='weight') 6.0 """ s=sum(self.degree(weight=weight).values())/2 if weight is None: return int(s) else: return float(s) def number_of_edges(self, u=None, v=None): """Return the number of edges between two nodes. Parameters ---------- u,v : nodes, optional (default=all edges) If u and v are specified, return the number of edges between u and v. Otherwise return the total number of all edges. Returns ------- nedges : int The number of edges in the graph. If nodes u and v are specified return the number of edges between those nodes. See Also -------- size Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.number_of_edges() 3 >>> G.number_of_edges(0,1) 1 >>> e = (0,1) >>> G.number_of_edges(*e) 1 """ if u is None: return int(self.size()) if v in self.adj[u]: return 1 else: return 0 def add_star(self, nodes, **attr): """Add a star. The first node in nodes is the middle of the star. It is connected to all other nodes. Parameters ---------- nodes : iterable container A container of nodes. attr : keyword arguments, optional (default= no attributes) Attributes to add to every edge in star. See Also -------- add_path, add_cycle Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_star([0,1,2,3]) >>> G.add_star([10,11,12],weight=2) """ nlist = list(nodes) v=nlist[0] edges=((v,n) for n in nlist[1:]) self.add_edges_from(edges, **attr) def add_path(self, nodes, **attr): """Add a path. Parameters ---------- nodes : iterable container A container of nodes. A path will be constructed from the nodes (in order) and added to the graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to every edge in path. See Also -------- add_star, add_cycle Examples -------- >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.add_path([10,11,12],weight=7) """ nlist = list(nodes) edges=zip(nlist[:-1],nlist[1:]) self.add_edges_from(edges, **attr) def add_cycle(self, nodes, **attr): """Add a cycle. Parameters ---------- nodes: iterable container A container of nodes. A cycle will be constructed from the nodes (in order) and added to the graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to every edge in cycle. See Also -------- add_path, add_star Examples -------- >>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_cycle([0,1,2,3]) >>> G.add_cycle([10,11,12],weight=7) """ nlist = list(nodes) edges=zip(nlist,nlist[1:]+[nlist[0]]) self.add_edges_from(edges, **attr) def nbunch_iter(self, nbunch=None): """Return an iterator of nodes contained in nbunch that are also in the graph. The nodes in nbunch are checked for membership in the graph and if not are silently ignored. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. Returns ------- niter : iterator An iterator over nodes in nbunch that are also in the graph. If nbunch is None, iterate over all nodes in the graph. Raises ------ NetworkXError If nbunch is not a node or or sequence of nodes. If a node in nbunch is not hashable. See Also -------- Graph.__iter__ Notes ----- When nbunch is an iterator, the returned iterator yields values directly from nbunch, becoming exhausted when nbunch is exhausted. To test whether nbunch is a single node, one can use "if nbunch in self:", even after processing with this routine. If nbunch is not a node or a (possibly empty) sequence/iterator or None, a NetworkXError is raised. Also, if any object in nbunch is not hashable, a NetworkXError is raised. """ if nbunch is None: # include all nodes via iterator bunch=iter(self.adj.keys()) elif nbunch in self: # if nbunch is a single node bunch=iter([nbunch]) else: # if nbunch is a sequence of nodes def bunch_iter(nlist,adj): try: for n in nlist: if n in adj: yield n except TypeError as e: message=e.args[0] import sys sys.stdout.write(message) # capture error for non-sequence/iterator nbunch. if 'iter' in message: raise NetworkXError(\ "nbunch is not a node or a sequence of nodes.") # capture error for unhashable node. elif 'hashable' in message: raise NetworkXError(\ "Node %s in the sequence nbunch is not a valid node."%n) else: raise bunch=bunch_iter(nbunch,self.adj) return bunch """Base class for directed graphs.""" # Copyright (C) 2004-2011 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. from copy import deepcopy class DiGraph(Graph): """ Base class for directed graphs. A DiGraph stores nodes and edges with optional data, or attributes. DiGraphs hold directed edges. Self loops are allowed but multiple (parallel) edges are not. Nodes can be arbitrary (hashable) Python objects with optional key/value attributes. Edges are represented as links between nodes with optional key/value attributes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a NumPy matrix or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to graph as key=value pairs. See Also -------- Graph MultiGraph MultiDiGraph Examples -------- Create an empty graph structure (a "null graph") with no nodes and no edges. >>> G = nx.DiGraph() G can be grown in several ways. **Nodes:** Add one node at a time: >>> G.add_node(1) Add the nodes from any container (a list, dict, set or even the lines from a file or the nodes from another graph). >>> G.add_nodes_from([2,3]) >>> G.add_nodes_from(range(100,110)) >>> H=nx.Graph() >>> H.add_path([0,1,2,3,4,5,6,7,8,9]) >>> G.add_nodes_from(H) In addition to strings and integers any hashable Python object (except None) can represent a node, e.g. a customized node object, or even another Graph. >>> G.add_node(H) **Edges:** G can also be grown by adding edges. Add one edge, >>> G.add_edge(1, 2) a list of edges, >>> G.add_edges_from([(1,2),(1,3)]) or a collection of edges, >>> G.add_edges_from(H.edges()) If some edges connect nodes not yet in the graph, the nodes are added automatically. There are no errors when adding nodes or edges that already exist. **Attributes:** Each graph, node, and edge can hold key/value attribute pairs in an associated attribute dictionary (the keys must be hashable). By default these are empty, but can be added or changed using add_edge, add_node or direct manipulation of the attribute dictionaries named graph, node and edge respectively. >>> G = nx.DiGraph(day="Friday") >>> G.graph {'day': 'Friday'} Add node attributes using add_node(), add_nodes_from() or G.node >>> G.add_node(1, time='5pm') >>> G.add_nodes_from([3], time='2pm') >>> G.node[1] {'time': '5pm'} >>> G.node[1]['room'] = 714 >>> del G.node[1]['room'] # remove attribute >>> G.nodes(data=True) [(1, {'time': '5pm'}), (3, {'time': '2pm'})] Warning: adding a node to G.node does not add it to the graph. Add edge attributes using add_edge(), add_edges_from(), subscript notation, or G.edge. >>> G.add_edge(1, 2, weight=4.7 ) >>> G.add_edges_from([(3,4),(4,5)], color='red') >>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})]) >>> G[1][2]['weight'] = 4.7 >>> G.edge[1][2]['weight'] = 4 **Shortcuts:** Many common graph features allow python syntax to speed reporting. >>> 1 in G # check if node in graph True >>> [n for n in G if n<3] # iterate through nodes [1, 2] >>> len(G) # number of nodes in graph 5 >>> G[1] # adjacency dict keyed by neighbor to edge attributes ... # Note: you should not change this dict manually! {2: {'color': 'blue', 'weight': 4}} The fastest way to traverse all edges of a graph is via adjacency_iter(), but the edges() method is often more convenient. >>> for n,nbrsdict in G.adjacency_iter(): ... for nbr,eattr in nbrsdict.items(): ... if 'weight' in eattr: ... (n,nbr,eattr['weight']) (1, 2, 4) (2, 3, 8) >>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ] [(1, 2, 4), (2, 3, 8)] **Reporting:** Simple graph information is obtained using methods. Iterator versions of many reporting methods exist for efficiency. Methods exist for reporting nodes(), edges(), neighbors() and degree() as well as the number of nodes and edges. For details on these and other miscellaneous methods, see below. """ def __init__(self, data=None, **attr): """Initialize a graph with edges, name, graph attributes. Parameters ---------- data : input graph Data to initialize graph. If data=None (default) an empty graph is created. The data can be an edge list, or any NetworkX graph object. If the corresponding optional Python packages are installed the data can also be a NumPy matrix or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph. name : string, optional (default='') An optional name for the graph. attr : keyword arguments, optional (default= no attributes) Attributes to add to graph as key=value pairs. See Also -------- convert Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G = nx.Graph(name='my graph') >>> e = [(1,2),(2,3),(3,4)] # list of edges >>> G = nx.Graph(e) Arbitrary graph attribute pairs (key=value) may be assigned >>> G=nx.Graph(e, day="Friday") >>> G.graph {'day': 'Friday'} """ self.graph = {} # dictionary for graph attributes self.node = {} # dictionary for node attributes # We store two adjacency lists: # the predecessors of node n are stored in the dict self.pred # the successors of node n are stored in the dict self.succ=self.adj self.adj = {} # empty adjacency dictionary self.pred = {} # predecessor self.succ = self.adj # successor # attempt to load graph with data if data is not None: convert.to_networkx_graph(data,create_using=self) # load graph attributes (must be after convert) self.graph.update(attr) self.edge=self.adj def add_node(self, n, attr_dict=None, **attr): """Add a single node n and update node attributes. Parameters ---------- n : node A node can be any hashable Python object except None. attr_dict : dictionary, optional (default= no attributes) Dictionary of node attributes. Key/value pairs will update existing data associated with the node. attr : keyword arguments, optional Set or change attributes using key=value. See Also -------- add_nodes_from Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_node(1) >>> G.add_node('Hello') >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) >>> G.add_node(K3) >>> G.number_of_nodes() 3 Use keywords set/change node attributes: >>> G.add_node(1,size=10) >>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649)) Notes ----- A hashable object is one that can be used as a key in a Python dictionary. This includes strings, numbers, tuples of strings and numbers, etc. On many platforms hashable items also include mutables such as NetworkX Graphs, though one should be careful that the hash doesn't change on mutables. """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") if n not in self.succ: self.succ[n] = {} self.pred[n] = {} self.node[n] = attr_dict else: # update attr even if node already exists self.node[n].update(attr_dict) def add_nodes_from(self, nodes, **attr): """Add multiple nodes. Parameters ---------- nodes : iterable container A container of nodes (list, dict, set, etc.). OR A container of (node, attribute dict) tuples. Node attributes are updated using the attribute dict. attr : keyword arguments, optional (default= no attributes) Update attributes for all nodes in nodes. Node attributes specified in nodes as a tuple take precedence over attributes specified generally. See Also -------- add_node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_nodes_from('Hello') >>> K3 = nx.Graph([(0,1),(1,2),(2,0)]) >>> G.add_nodes_from(K3) >>> sorted(G.nodes(),key=str) [0, 1, 2, 'H', 'e', 'l', 'o'] Use keywords to update specific node attributes for every node. >>> G.add_nodes_from([1,2], size=10) >>> G.add_nodes_from([3,4], weight=0.4) Use (node, attrdict) tuples to update attributes for specific nodes. >>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})]) >>> G.node[1]['size'] 11 >>> H = nx.Graph() >>> H.add_nodes_from(G.nodes(data=True)) >>> H.node[1]['size'] 11 """ for n in nodes: try: newnode=n not in self.succ except TypeError: nn,ndict = n if nn not in self.succ: self.succ[nn] = {} self.pred[nn] = {} newdict = attr.copy() newdict.update(ndict) self.node[nn] = newdict else: olddict = self.node[nn] olddict.update(attr) olddict.update(ndict) continue if newnode: self.succ[n] = {} self.pred[n] = {} self.node[n] = attr.copy() else: self.node[n].update(attr) def remove_node(self, n): """Remove node n. Removes the node n and all adjacent edges. Attempting to remove a non-existent node will raise an exception. Parameters ---------- n : node A node in the graph Raises ------- NetworkXError If n is not in the graph. See Also -------- remove_nodes_from Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> G.edges() [(0, 1), (1, 2)] >>> G.remove_node(1) >>> G.edges() [] """ try: nbrs=self.succ[n] del self.node[n] except KeyError: # NetworkXError if n not in self raise NetworkXError("The node %s is not in the digraph."%(n,)) for u in nbrs: del self.pred[u][n] # remove all edges n-u in digraph del self.succ[n] # remove node from succ for u in self.pred[n]: del self.succ[u][n] # remove all edges n-u in digraph del self.pred[n] # remove node from pred def remove_nodes_from(self, nbunch): """Remove multiple nodes. Parameters ---------- nodes : iterable container A container of nodes (list, dict, set, etc.). If a node in the container is not in the graph it is silently ignored. See Also -------- remove_node Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2]) >>> e = G.nodes() >>> e [0, 1, 2] >>> G.remove_nodes_from(e) >>> G.nodes() [] """ for n in nbunch: try: succs=self.succ[n] del self.node[n] for u in succs: del self.pred[u][n] # remove all edges n-u in digraph del self.succ[n] # now remove node for u in self.pred[n]: del self.succ[u][n] # remove all edges n-u in digraph del self.pred[n] # now remove node except KeyError: pass # silent failure on remove def add_edge(self, u, v, attr_dict=None, **attr): """Add an edge between u and v. The nodes u and v will be automatically added if they are not already in the graph. Edge attributes can be specified with keywords or by providing a dictionary with key/value pairs. See examples below. Parameters ---------- u,v : nodes Nodes can be, for example, strings or numbers. Nodes must be hashable (and not None) Python objects. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with the edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edges_from : add a collection of edges Notes ----- Adding an edge that already exists updates the edge data. Many NetworkX algorithms designed for weighted graphs use as the edge weight a numerical value assigned to a keyword which by default is 'weight'. Examples -------- The following all add the edge e=(1,2) to graph G: >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> e = (1,2) >>> G.add_edge(1, 2) # explicit two-node form >>> G.add_edge(*e) # single edge as tuple of two nodes >>> G.add_edges_from( [(1,2)] ) # add edges from iterable container Associate data to edges using keywords: >>> G.add_edge(1, 2, weight=3) >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dictionary.") # add nodes if u not in self.succ: self.succ[u]={} self.pred[u]={} self.node[u] = {} if v not in self.succ: self.succ[v]={} self.pred[v]={} self.node[v] = {} # add the edge datadict=self.adj[u].get(v,{}) datadict.update(attr_dict) self.succ[u][v]=datadict self.pred[v][u]=datadict def add_edges_from(self, ebunch, attr_dict=None, **attr): """Add all the edges in ebunch. Parameters ---------- ebunch : container of edges Each edge given in the container will be added to the graph. The edges must be given as as 2-tuples (u,v) or 3-tuples (u,v,d) where d is a dictionary containing edge data. attr_dict : dictionary, optional (default= no attributes) Dictionary of edge attributes. Key/value pairs will update existing data associated with each edge. attr : keyword arguments, optional Edge data (or labels or objects) can be assigned using keyword arguments. See Also -------- add_edge : add a single edge add_weighted_edges_from : convenient way to add weighted edges Notes ----- Adding the same edge twice has no effect but any edge data will be updated when each duplicate edge is added. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples >>> e = zip(range(0,3),range(1,4)) >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 Associate data to edges >>> G.add_edges_from([(1,2),(2,3)], weight=3) >>> G.add_edges_from([(3,4),(1,4)], label='WN2898') """ # set up attribute dict if attr_dict is None: attr_dict=attr else: try: attr_dict.update(attr) except AttributeError: raise NetworkXError(\ "The attr_dict argument must be a dict.") # process ebunch for e in ebunch: ne = len(e) if ne==3: u,v,dd = e assert hasattr(dd,"update") elif ne==2: u,v = e dd = {} else: raise NetworkXError(\ "Edge tuple %s must be a 2-tuple or 3-tuple."%(e,)) if u not in self.succ: self.succ[u] = {} self.pred[u] = {} self.node[u] = {} if v not in self.succ: self.succ[v] = {} self.pred[v] = {} self.node[v] = {} datadict=self.adj[u].get(v,{}) datadict.update(attr_dict) datadict.update(dd) self.succ[u][v] = datadict self.pred[v][u] = datadict def remove_edge(self, u, v): """Remove the edge between u and v. Parameters ---------- u,v: nodes Remove the edge between nodes u and v. Raises ------ NetworkXError If there is not an edge between u and v. See Also -------- remove_edges_from : remove a collection of edges Examples -------- >>> G = nx.Graph() # or DiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.remove_edge(0,1) >>> e = (1,2) >>> G.remove_edge(*e) # unpacks e from an edge tuple >>> e = (2,3,{'weight':7}) # an edge with attribute data >>> G.remove_edge(*e[:2]) # select first part of edge tuple """ try: del self.succ[u][v] del self.pred[v][u] except KeyError: raise NetworkXError("The edge %s-%s not in graph."%(u,v)) def remove_edges_from(self, ebunch): """Remove all edges specified in ebunch. Parameters ---------- ebunch: list or container of edge tuples Each edge given in the list or container will be removed from the graph. The edges can be: - 2-tuples (u,v) edge between u and v. - 3-tuples (u,v,k) where k is ignored. See Also -------- remove_edge : remove a single edge Notes ----- Will fail silently if an edge in ebunch is not in the graph. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> ebunch=[(1,2),(2,3)] >>> G.remove_edges_from(ebunch) """ for e in ebunch: (u,v)=e[:2] # ignore edge data if u in self.succ and v in self.succ[u]: del self.succ[u][v] del self.pred[v][u] def has_successor(self, u, v): """Return True if node u has successor v. This is true if graph has the edge u->v. """ return (u in self.succ and v in self.succ[u]) def has_predecessor(self, u, v): """Return True if node u has predecessor v. This is true if graph has the edge u<-v. """ return (u in self.pred and v in self.pred[u]) def successors_iter(self,n): """Return an iterator over successor nodes of n. neighbors_iter() and successors_iter() are the same. """ try: return iter(self.succ[n]) except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) def predecessors_iter(self,n): """Return an iterator over predecessor nodes of n.""" try: return iter(self.pred[n]) except KeyError: raise NetworkXError("The node %s is not in the digraph."%(n,)) def successors(self, n): """Return a list of successor nodes of n. neighbors() and successors() are the same function. """ return list(self.successors_iter(n)) def predecessors(self, n): """Return a list of predecessor nodes of n.""" return list(self.predecessors_iter(n)) # digraph definitions neighbors = successors neighbors_iter = successors_iter def edges_iter(self, nbunch=None, data=False): """Return an iterator over the edges. Edges are returned as tuples with optional data in the order (node, neighbor, data). Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. data : bool, optional (default=False) If True, return edge attribute dict in 3-tuple (u,v,data). Returns ------- edge_iter : iterator An iterator of (u,v) or (u,v,d) tuples of edges. See Also -------- edges : return a list of edges Notes ----- Nodes in nbunch that are not in the graph will be (quietly) ignored. For directed graphs this returns the out-edges. Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> [e for e in G.edges_iter()] [(0, 1), (1, 2), (2, 3)] >>> list(G.edges_iter(data=True)) # default data is {} (empty dict) [(0, 1, {}), (1, 2, {}), (2, 3, {})] >>> list(G.edges_iter([0,2])) [(0, 1), (2, 3)] >>> list(G.edges_iter(0)) [(0, 1)] """ if nbunch is None: nodes_nbrs=self.adj.items() else: nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch)) if data: for n,nbrs in nodes_nbrs: for nbr,data in nbrs.items(): yield (n,nbr,data) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: yield (n,nbr) # alias out_edges to edges out_edges_iter=edges_iter out_edges=Graph.edges def in_edges_iter(self, nbunch=None, data=False): """Return an iterator over the incoming edges. Parameters ---------- nbunch : iterable container, optional (default= all nodes) A container of nodes. The container will be iterated through once. data : bool, optional (default=False) If True, return edge attribute dict in 3-tuple (u,v,data). Returns ------- in_edge_iter : iterator An iterator of (u,v) or (u,v,d) tuples of incoming edges. See Also -------- edges_iter : return an iterator of edges """ if nbunch is None: nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) if data: for n,nbrs in nodes_nbrs: for nbr,data in nbrs.items(): yield (nbr,n,data) else: for n,nbrs in nodes_nbrs: for nbr in nbrs: yield (nbr,n) def in_edges(self, nbunch=None, data=False): """Return a list of the incoming edges. See Also -------- edges : return a list of edges """ return list(self.in_edges_iter(nbunch, data)) def degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, degree). The node degree is the number of edges adjacent to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, degree). See Also -------- degree, in_degree, out_degree, in_degree_iter, out_degree_iter Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph >>> G.add_path([0,1,2,3]) >>> list(G.degree_iter(0)) # node 0 with degree 1 [(0, 1)] >>> list(G.degree_iter([0,1])) [(0, 1), (1, 2)] """ if nbunch is None: nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items())) else: nodes_nbrs=zip( ((n,self.succ[n]) for n in self.nbunch_iter(nbunch)), ((n,self.pred[n]) for n in self.nbunch_iter(nbunch))) if weight is None: for (n,succ),(n2,pred) in nodes_nbrs: yield (n,len(succ)+len(pred)) else: # edge weighted graph - degree is sum of edge weights for (n,succ),(n2,pred) in nodes_nbrs: yield (n, sum((succ[nbr].get(weight,1) for nbr in succ))+ sum((pred[nbr].get(weight,1) for nbr in pred))) def in_degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, in-degree). The node in-degree is the number of edges pointing in to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, in-degree). See Also -------- degree, in_degree, out_degree, out_degree_iter Examples -------- >>> G = nx.DiGraph() >>> G.add_path([0,1,2,3]) >>> list(G.in_degree_iter(0)) # node 0 with degree 0 [(0, 0)] >>> list(G.in_degree_iter([0,1])) [(0, 0), (1, 1)] """ if nbunch is None: nodes_nbrs=self.pred.items() else: nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)) else: # edge weighted graph - degree is sum of edge weights for n,nbrs in nodes_nbrs: yield (n, sum(data.get(weight,1) for data in nbrs.values())) def out_degree_iter(self, nbunch=None, weight=None): """Return an iterator for (node, out-degree). The node out-degree is the number of edges pointing out of the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd_iter : an iterator The iterator returns two-tuples of (node, out-degree). See Also -------- degree, in_degree, out_degree, in_degree_iter Examples -------- >>> G = nx.DiGraph() >>> G.add_path([0,1,2,3]) >>> list(G.out_degree_iter(0)) # node 0 with degree 1 [(0, 1)] >>> list(G.out_degree_iter([0,1])) [(0, 1), (1, 1)] """ if nbunch is None: nodes_nbrs=self.succ.items() else: nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch)) if weight is None: for n,nbrs in nodes_nbrs: yield (n,len(nbrs)) else: # edge weighted graph - degree is sum of edge weights for n,nbrs in nodes_nbrs: yield (n, sum(data.get(weight,1) for data in nbrs.values())) def in_degree(self, nbunch=None, weight=None): """Return the in-degree of a node or nodes. The node in-degree is the number of edges pointing in to the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd : dictionary, or number A dictionary with nodes as keys and in-degree as values or a number if a single node is specified. See Also -------- degree, out_degree, in_degree_iter Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph >>> G.add_path([0,1,2,3]) >>> G.in_degree(0) 0 >>> G.in_degree([0,1]) {0: 0, 1: 1} >>> list(G.in_degree([0,1]).values()) [0, 1] """ if nbunch in self: # return a single node return next(self.in_degree_iter(nbunch,weight))[1] else: # return a dict return dict(self.in_degree_iter(nbunch,weight)) def out_degree(self, nbunch=None, weight=None): """Return the out-degree of a node or nodes. The node out-degree is the number of edges pointing out of the node. Parameters ---------- nbunch : iterable container, optional (default=all nodes) A container of nodes. The container will be iterated through once. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. Returns ------- nd : dictionary, or number A dictionary with nodes as keys and out-degree as values or a number if a single node is specified. Examples -------- >>> G = nx.DiGraph() # or MultiDiGraph >>> G.add_path([0,1,2,3]) >>> G.out_degree(0) 1 >>> G.out_degree([0,1]) {0: 1, 1: 1} >>> list(G.out_degree([0,1]).values()) [1, 1] """ if nbunch in self: # return a single node return next(self.out_degree_iter(nbunch,weight))[1] else: # return a dict return dict(self.out_degree_iter(nbunch,weight)) def clear(self): """Remove all nodes and edges from the graph. This also removes the name, and all graph, node, and edge attributes. Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> G.clear() >>> G.nodes() [] >>> G.edges() [] """ self.succ.clear() self.pred.clear() self.node.clear() self.graph.clear() def is_multigraph(self): """Return True if graph is a multigraph, False otherwise.""" return False def is_directed(self): """Return True if graph is directed, False otherwise.""" return True def to_directed(self): """Return a directed copy of the graph. Returns ------- G : DiGraph A deepcopy of the graph. Notes ----- This returns a "deepcopy" of the edge, node, and graph attributes which attempts to completely copy all of the data and references. This is in contrast to the similar D=DiGraph(G) which returns a shallow copy of the data. See the Python copy module for more information on shallow and deep copies, http://docs.python.org/library/copy.html. Examples -------- >>> G = nx.Graph() # or MultiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1), (1, 0)] If already directed, return a (deep) copy >>> G = nx.DiGraph() # or MultiDiGraph, etc >>> G.add_path([0,1]) >>> H = G.to_directed() >>> H.edges() [(0, 1)] """ return deepcopy(self) def to_undirected(self, reciprocal=False): """Return an undirected representation of the digraph. Parameters ---------- reciprocal : bool (optional) If True only keep edges that appear in both directions in the original digraph. Returns ------- G : Graph An undirected graph with the same name and nodes and with edge (u,v,data) if either (u,v,data) or (v,u,data) is in the digraph. If both edges exist in digraph and their edge data is different, only one edge is created with an arbitrary choice of which edge data to use. You must check and correct for this manually if desired. Notes ----- If edges in both directions (u,v) and (v,u) exist in the graph, attributes for the new undirected edge will be a combination of the attributes of the directed edges. The edge data is updated in the (arbitrary) order that the edges are encountered. For more customized control of the edge attributes use add_edge(). This returns a "deepcopy" of the edge, node, and graph attributes which attempts to completely copy all of the data and references. This is in contrast to the similar G=DiGraph(D) which returns a shallow copy of the data. See the Python copy module for more information on shallow and deep copies, http://docs.python.org/library/copy.html. """ H=Graph() H.name=self.name H.add_nodes_from(self) if reciprocal is True: H.add_edges_from( (u,v,deepcopy(d)) for u,nbrs in self.adjacency_iter() for v,d in nbrs.items() if v in self.pred[u]) else: H.add_edges_from( (u,v,deepcopy(d)) for u,nbrs in self.adjacency_iter() for v,d in nbrs.items() ) H.graph=deepcopy(self.graph) H.node=deepcopy(self.node) return H def reverse(self, copy=True): """Return the reverse of the graph. The reverse is a graph with the same nodes and edges but with the directions of the edges reversed. Parameters ---------- copy : bool optional (default=True) If True, return a new DiGraph holding the reversed edges. If False, reverse the reverse graph is created using the original graph (this changes the original graph). """ if copy: H = self.__class__(name="Reverse of (%s)"%self.name) H.add_nodes_from(self) H.add_edges_from( (v,u,deepcopy(d)) for u,v,d in self.edges(data=True) ) H.graph=deepcopy(self.graph) H.node=deepcopy(self.node) else: self.pred,self.succ=self.succ,self.pred self.adj=self.succ H=self return H def subgraph(self, nbunch): """Return the subgraph induced on nodes in nbunch. The induced subgraph of the graph contains the nodes in nbunch and the edges between those nodes. Parameters ---------- nbunch : list, iterable A container of nodes which will be iterated through once. Returns ------- G : Graph A subgraph of the graph with the same edge attributes. Notes ----- The graph, edge or node attributes just point to the original graph. So changes to the node or edge structure will not be reflected in the original graph while changes to the attributes will. To create a subgraph with its own copy of the edge/node attributes use: nx.Graph(G.subgraph(nbunch)) If edge attributes are containers, a deep copy can be obtained using: G.subgraph(nbunch).copy() For an inplace reduction of a graph to a subgraph you can remove nodes: G.remove_nodes_from([ n in G if n not in set(nbunch)]) Examples -------- >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc >>> G.add_path([0,1,2,3]) >>> H = G.subgraph([0,1,2]) >>> H.edges() [(0, 1), (1, 2)] """ bunch = self.nbunch_iter(nbunch) # create new graph and copy subgraph into it H = self.__class__() # copy node and attribute dictionaries for n in bunch: H.node[n]=self.node[n] # namespace shortcuts for speed H_succ=H.succ H_pred=H.pred self_succ=self.succ # add nodes for n in H: H_succ[n]={} H_pred[n]={} # add edges for u in H_succ: Hnbrs=H_succ[u] for v,datadict in self_succ[u].items(): if v in H_succ: # add both representations of edge: u-v and v-u Hnbrs[v]=datadict H_pred[v][u]=datadict H.graph=self.graph return H DEFAULT_RISKS = { INTERNET_RISK : ( "INTERNET_RISK", (195, 255, 0) ), PRIVACY_RISK : ( "PRIVACY_RISK", (255, 255, 51) ), PHONE_RISK : ( "PHONE_RISK", ( 255, 216, 0 ) ), SMS_RISK : ( "SMS_RISK", ( 255, 93, 0 ) ), MONEY_RISK : ( "MONEY_RISK", ( 255, 0, 0 ) ), } DEXCLASSLOADER_COLOR = (0, 0, 0) ACTIVITY_COLOR = (51, 255, 51) SERVICE_COLOR = (0, 204, 204) RECEIVER_COLOR = (204, 51, 204) ID_ATTRIBUTES = { "type" : 0, "class_name" : 1, "method_name" : 2, "descriptor" : 3, "permissions" : 4, "permissions_level" : 5, "dynamic_code" : 6, } class GVMAnalysis(object): def __init__(self, vmx, apk): self.vmx = vmx self.vm = self.vmx.get_vm() self.nodes = {} self.nodes_id = {} self.entry_nodes = [] self.G = DiGraph() self.GI = DiGraph() for j in self.vmx.get_tainted_packages().get_internal_packages(): src_class_name, src_method_name, src_descriptor = j.get_src(self.vm.get_class_manager()) dst_class_name, dst_method_name, dst_descriptor = j.get_dst(self.vm.get_class_manager()) n1 = self._get_node(src_class_name, src_method_name, src_descriptor) n2 = self._get_node(dst_class_name, dst_method_name, dst_descriptor) self.G.add_edge(n1.id, n2.id) n1.add_edge(n2, j) internal_new_packages = self.vmx.tainted_packages.get_internal_new_packages() for j in internal_new_packages: for path in internal_new_packages[j]: src_class_name, src_method_name, src_descriptor = path.get_src(self.vm.get_class_manager()) n1 = self._get_node(src_class_name, src_method_name, src_descriptor) n2 = self._get_node(j, "", "") self.GI.add_edge(n2.id, n1.id) n1.add_edge(n2, path) if apk != None: for i in apk.get_activities(): j = bytecode.FormatClassToJava(i) n1 = self._get_exist_node( j, "onCreate", "(Landroid/os/Bundle;)V" ) if n1 != None: n1.set_attributes( { "type" : "activity" } ) n1.set_attributes( { "color" : ACTIVITY_COLOR } ) n2 = self._get_new_node_from( n1, "ACTIVITY" ) n2.set_attributes( { "color" : ACTIVITY_COLOR } ) self.G.add_edge( n2.id, n1.id ) self.entry_nodes.append( n1.id ) for i in apk.get_services(): j = bytecode.FormatClassToJava(i) n1 = self._get_exist_node( j, "onCreate", "()V" ) if n1 != None: n1.set_attributes( { "type" : "service" } ) n1.set_attributes( { "color" : SERVICE_COLOR } ) n2 = self._get_new_node_from( n1, "SERVICE" ) n2.set_attributes( { "color" : SERVICE_COLOR } ) self.G.add_edge( n2.id, n1.id ) self.entry_nodes.append( n1.id ) for i in apk.get_receivers(): j = bytecode.FormatClassToJava(i) n1 = self._get_exist_node( j, "onReceive", "(Landroid/content/Context; Landroid/content/Intent;)V" ) if n1 != None: n1.set_attributes( { "type" : "receiver" } ) n1.set_attributes( { "color" : RECEIVER_COLOR } ) n2 = self._get_new_node_from( n1, "RECEIVER" ) n2.set_attributes( { "color" : RECEIVER_COLOR } ) self.G.add_edge( n2.id, n1.id ) self.entry_nodes.append( n1.id ) # Specific Java/Android library for c in self.vm.get_classes(): #if c.get_superclassname() == "Landroid/app/Service;": # n1 = self._get_node( c.get_name(), "<init>", "()V" ) # n2 = self._get_node( c.get_name(), "onCreate", "()V" ) # self.G.add_edge( n1.id, n2.id ) if c.get_superclassname() == "Ljava/lang/Thread;" or c.get_superclassname() == "Ljava/util/TimerTask;": for i in self.vm.get_method("run"): if i.get_class_name() == c.get_name(): n1 = self._get_node( i.get_class_name(), i.get_name(), i.get_descriptor() ) n2 = self._get_node( i.get_class_name(), "start", i.get_descriptor() ) # link from start to run self.G.add_edge( n2.id, n1.id ) n2.add_edge( n1, {} ) # link from init to start for init in self.vm.get_method("<init>"): if init.get_class_name() == c.get_name(): n3 = self._get_node( init.get_class_name(), "<init>", init.get_descriptor() ) #n3 = self._get_node( i.get_class_name(), "<init>", i.get_descriptor() ) self.G.add_edge( n3.id, n2.id ) n3.add_edge( n2, {} ) #elif c.get_superclassname() == "Landroid/os/AsyncTask;": # for i in self.vm.get_method("doInBackground"): # if i.get_class_name() == c.get_name(): # n1 = self._get_node( i.get_class_name(), i.get_name(), i.get_descriptor() ) # n2 = self._get_exist_node( i.get_class_name(), "execute", i.get_descriptor() ) # print n1, n2, i.get_descriptor() #for j in self.vm.get_method("doInBackground"): # n2 = self._get_exist_node( i.get_class_name(), j.get_name(), j.get_descriptor() ) # print n1, n2 # n2 = self._get_node( i.get_class_name(), " # raise("ooo") #for j in self.vmx.tainted_packages.get_internal_new_packages(): # print "\t %s %s %s %x ---> %s %s %s" % (j.get_method().get_class_name(), j.get_method().get_name(), j.get_method().get_descriptor(), \ # j.get_bb().start + j.get_idx(), \ # j.get_class_name(), j.get_name(), j.get_descriptor()) list_permissions = self.vmx.get_permissions([]) for x in list_permissions: for j in list_permissions[x]: if isinstance(j, PathVar): continue src_class_name, src_method_name, src_descriptor = j.get_src( self.vm.get_class_manager() ) dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.vm.get_class_manager() ) n1 = self._get_exist_node( dst_class_name, dst_method_name, dst_descriptor ) if n1 == None: continue n1.set_attributes( { "permissions" : 1 } ) n1.set_attributes( { "permissions_level" : DVM_PERMISSIONS[ "MANIFEST_PERMISSION" ][ x ][0] } ) n1.set_attributes( { "permissions_details" : x } ) try: for tmp_perm in PERMISSIONS_RISK[ x ]: if tmp_perm in DEFAULT_RISKS: n2 = self._get_new_node( dst_class_name, dst_method_name, dst_descriptor + " " + DEFAULT_RISKS[ tmp_perm ][0], DEFAULT_RISKS[ tmp_perm ][0] ) n2.set_attributes( { "color" : DEFAULT_RISKS[ tmp_perm ][1] } ) self.G.add_edge( n2.id, n1.id ) n1.add_risk( DEFAULT_RISKS[ tmp_perm ][0] ) n1.add_api( x, src_class_name + "-" + src_method_name + "-" + src_descriptor ) except KeyError: pass # Tag DexClassLoader for m, _ in self.vmx.get_tainted_packages().get_packages(): if m.get_name() == "Ldalvik/system/DexClassLoader;": for path in m.get_paths(): if path.get_access_flag() == TAINTED_PACKAGE_CREATE: src_class_name, src_method_name, src_descriptor = path.get_src( self.vm.get_class_manager() ) n1 = self._get_exist_node( src_class_name, src_method_name, src_descriptor ) n2 = self._get_new_node( dst_class_name, dst_method_name, dst_descriptor + " " + "DEXCLASSLOADER", "DEXCLASSLOADER" ) n1.set_attributes( { "dynamic_code" : "true" } ) n2.set_attributes( { "color" : DEXCLASSLOADER_COLOR } ) self.G.add_edge( n2.id, n1.id ) n1.add_risk( "DEXCLASSLOADER" ) def _get_exist_node(self, class_name, method_name, descriptor): key = "%s %s %s" % (class_name, method_name, descriptor) try: return self.nodes[ key ] except KeyError: return None def _get_node(self, class_name, method_name, descriptor): if method_name == "" and descriptor == "": key = class_name else: key = "%s %s %s" % (class_name, method_name, descriptor) if key not in self.nodes: self.nodes[key] = NodeF(len(self.nodes), class_name, method_name, descriptor) self.nodes_id[self.nodes[key].id] = self.nodes[key] return self.nodes[key] def _get_new_node_from(self, n, label): return self._get_new_node( n.class_name, n.method_name, n.descriptor + label, label ) def _get_new_node(self, class_name, method_name, descriptor, label): key = "%s %s %s" % (class_name, method_name, descriptor) if key not in self.nodes: self.nodes[ key ] = NodeF( len(self.nodes), class_name, method_name, descriptor, label, False ) self.nodes_id[ self.nodes[ key ].id ] = self.nodes[ key ] return self.nodes[ key ] def set_new_attributes(self, cm): for i in self.G.nodes(): n1 = self.nodes_id[ i ] m1 = self.vm.get_method_descriptor( n1.class_name, n1.method_name, n1.descriptor ) H = cm( self.vmx, m1 ) n1.set_attributes( H ) def export_to_gexf(self): buff = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" buff += "<gexf xmlns=\"http://www.gephi.org/gexf\" xmlns:viz=\"http://www.gephi.org/gexf/viz\">\n" buff += "<graph type=\"static\">\n" buff += "<attributes class=\"node\" type=\"static\">\n" buff += "<attribute default=\"normal\" id=\"%d\" title=\"type\" type=\"string\"/>\n" % ID_ATTRIBUTES[ "type"] buff += "<attribute id=\"%d\" title=\"class_name\" type=\"string\"/>\n" % ID_ATTRIBUTES[ "class_name"] buff += "<attribute id=\"%d\" title=\"method_name\" type=\"string\"/>\n" % ID_ATTRIBUTES[ "method_name"] buff += "<attribute id=\"%d\" title=\"descriptor\" type=\"string\"/>\n" % ID_ATTRIBUTES[ "descriptor"] buff += "<attribute default=\"0\" id=\"%d\" title=\"permissions\" type=\"integer\"/>\n" % ID_ATTRIBUTES[ "permissions"] buff += "<attribute default=\"normal\" id=\"%d\" title=\"permissions_level\" type=\"string\"/>\n" % ID_ATTRIBUTES[ "permissions_level"] buff += "<attribute default=\"false\" id=\"%d\" title=\"dynamic_code\" type=\"boolean\"/>\n" % ID_ATTRIBUTES[ "dynamic_code"] buff += "</attributes>\n" buff += "<nodes>\n" for node in self.G.nodes(): buff += "<node id=\"%d\" label=\"%s\">\n" % (node, escape(self.nodes_id[ node ].label)) buff += self.nodes_id[ node ].get_attributes_gexf() buff += "</node>\n" buff += "</nodes>\n" buff += "<edges>\n" nb = 0 for edge in self.G.edges(): buff += "<edge id=\"%d\" source=\"%d\" target=\"%d\"/>\n" % (nb, edge[0], edge[1]) nb += 1 buff += "</edges>\n" buff += "</graph>\n" buff += "</gexf>\n" return buff def export_to_gml(self): buff = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n" buff += "<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xmlns:yed=\"http://www.yworks.com/xml/yed/3\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd\">\n" buff += "<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"d5\"/>\n" buff += "<key for=\"node\" id=\"d6\" yfiles.type=\"nodegraphics\"/>\n" buff += "<graph edgedefault=\"directed\" id=\"G\">\n" for node in self.G.nodes(): buff += "<node id=\"%d\">\n" % (node) #fd.write( "<node id=\"%d\" label=\"%s\">\n" % (node, escape(self.nodes_id[ node ].label)) ) buff += self.nodes_id[ node ].get_attributes_gml() buff += "</node>\n" nb = 0 for edge in self.G.edges(): buff += "<edge id=\"%d\" source=\"%d\" target=\"%d\"/>\n" % (nb, edge[0], edge[1]) nb += 1 buff += "</graph>\n" buff += "</graphml>\n" return buff DEFAULT_NODE_TYPE = "normal" DEFAULT_NODE_PERM = 0 DEFAULT_NODE_PERM_LEVEL = -1 PERMISSIONS_LEVEL = { "dangerous" : 3, "signatureOrSystem" : 2, "signature" : 1, "normal" : 0, } COLOR_PERMISSIONS_LEVEL = { "dangerous" : (255, 0, 0), "signatureOrSystem" : (255, 63, 63), "signature" : (255, 132, 132), "normal" : (255, 181, 181), } class NodeF(object): def __init__(self, id, class_name, method_name, descriptor, label=None, real=True): self.class_name = class_name self.method_name = method_name self.descriptor = descriptor self.id = id self.real = real self.risks = [] self.api = {} self.edges = {} if label == None: self.label = "%s %s %s" % (class_name, method_name, descriptor) else: self.label = label self.attributes = {"type": DEFAULT_NODE_TYPE, "color": None, "permissions": DEFAULT_NODE_PERM, "permissions_level": DEFAULT_NODE_PERM_LEVEL, "permissions_details": set(), "dynamic_code": "false", } def add_edge(self, n, idx): try: self.edges[n].append(idx) except KeyError: self.edges[n] = [] self.edges[n].append(idx) def get_attributes_gexf(self): buff = "" if self.attributes[ "color" ] != None: buff += "<viz:color r=\"%d\" g=\"%d\" b=\"%d\"/>\n" % (self.attributes[ "color" ][0], self.attributes[ "color" ][1], self.attributes[ "color" ][2]) buff += "<attvalues>\n" buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["class_name"], escape(self.class_name)) buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["method_name"], escape(self.method_name)) buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["descriptor"], escape(self.descriptor)) if self.attributes[ "type" ] != DEFAULT_NODE_TYPE: buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["type"], self.attributes[ "type" ]) if self.attributes[ "permissions" ] != DEFAULT_NODE_PERM: buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["permissions"], self.attributes[ "permissions" ]) buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["permissions_level"], self.attributes[ "permissions_level_name" ]) buff += "<attvalue id=\"%d\" value=\"%s\"/>\n" % (ID_ATTRIBUTES["dynamic_code"], self.attributes[ "dynamic_code" ]) buff += "</attvalues>\n" return buff def get_attributes_gml(self): buff = "" buff += "<data key=\"d6\">\n" buff += "<y:ShapeNode>\n" height = 10 width = max(len(self.class_name), len(self.method_name)) width = max(width, len(self.descriptor)) buff += "<y:Geometry height=\"%f\" width=\"%f\"/>\n" % (16 * height, 8 * width) if self.attributes[ "color" ] != None: buff += "<y:Fill color=\"#%02x%02x%02x\" transparent=\"false\"/>\n" % (self.attributes[ "color" ][0], self.attributes[ "color" ][1], self.attributes[ "color" ][2]) buff += "<y:NodeLabel alignment=\"left\" autoSizePolicy=\"content\" fontFamily=\"Dialog\" fontSize=\"13\" fontStyle=\"plain\" hasBackgroundColor=\"false\" hasLineColor=\"false\" modelName=\"internal\" modelPosition=\"c\" textColor=\"#000000\" visible=\"true\">\n" label = self.class_name + "\n" + self.method_name + "\n" + self.descriptor buff += escape(label) buff += "</y:NodeLabel>\n" buff += "</y:ShapeNode>\n" buff += "</data>\n" return buff def get_attributes(self): return self.attributes def get_attribute(self, name): return self.attributes[ name ] def set_attributes(self, values): for i in values: if i == "permissions": self.attributes[ "permissions" ] += values[i] elif i == "permissions_level": if values[i] > self.attributes[ "permissions_level" ]: self.attributes[ "permissions_level" ] = PERMISSIONS_LEVEL[ values[i] ] self.attributes[ "permissions_level_name" ] = values[i] self.attributes[ "color" ] = COLOR_PERMISSIONS_LEVEL[ values[i] ] elif i == "permissions_details": self.attributes[ i ].add( values[i] ) else: self.attributes[ i ] = values[i] def add_risk(self, risk): if risk not in self.risks: self.risks.append( risk ) def add_api(self, perm, api): if perm not in self.api: self.api[ perm ] = [] if api not in self.api[ perm ]: self.api[ perm ].append( api )
gpl-3.0
eggplantbren/DNest4
python/dnest4/builder.py
1
7221
from __future__ import print_function import numpy as np from .distributions import * __all__ = ["Model", "Node", "data_declaration", "data_definition",\ "generate_h", "generate_cpp"] class Model: def __init__(self): self.nodes = [] self.indices = {} self.num_params = 0 def __getitem__(self, item): return self.nodes[self.indices[item]] def add_node(self, node): self.nodes.append(node) self.indices[node.name] = len(self.nodes)-1 if node.observed == False and type(node.distribution) != Delta: self.num_params += 1 def declaration(self): s = "" for node in self.nodes: if not node.observed: if type(node.distribution) is Delta: s += node.cpp_type +\ " {x};\n".format(x=node.name) else: s += node.cpp_type +\ " _{x}, {x};\n".format(x=node.name) return s def from_prior(self): s = "" for node in self.nodes: if node.observed == False and type(node.distribution) is not Delta: s += "_{x} = rng.rand();\n".format(x=node.name) s += "\n" for node in self.nodes: if node.observed == False: s += "" + node.distribution.from_uniform().format(x=node.name) return s def perturb(self): s = "" s += "double logH = 0.0;\n\n" s += "int which;\n" s += "int reps = 1;\n" s += "if(rng.rand() <= 0.5)\n" s += " reps = (int)pow(10.0, 2*rng.rand());\n" s += "for(int i=0; i<reps; ++i)\n" s += "{\n" s += "which = rng.rand_int(" + str(self.num_params) + ");\n" k = 0 for node in self.nodes: if node.observed == False and type(node.distribution) != Delta: s += "if(which == {k})\n{{\n".format(k=k) s += "_{x}".format(x=node.name) + " += rng.randh();\n" s += "DNest4::wrap(_{x}, 0.0, 1.0);\n}}\n"\ .format(x=node.name); k += 1 s += "}\n\n" for node in self.nodes: if node.observed == False: s += "" + node.distribution.from_uniform()\ .format(x=node.name) s += "\nreturn logH;\n\n" return s def log_likelihood(self): s = "" s += "double logp = 0.0;\n\n" for node in self.nodes: if node.observed and type(node.distribution) != Delta: s += node.distribution.log_prob().format(x=node.name) s += "if(std::isnan(logp) || std::isinf(logp))\n" s += " logp = -1E300;\n" s += "\nreturn logp;\n" return s def print(self): s = "" for node in self.nodes: if not node.observed: s += "out<<" + node.name + "<<\' \';\n" return s def description(self): s = "" s += "return string(\"" for node in self.nodes: if not node.observed: s += node.name + ", " s = s[0:-2] s += "\");" return s class Node: """ Represents a node in the graph. """ def __init__(self, name, distribution, observed=False): self.cpp_type = distribution.cpp_type self.name = name self.distribution = distribution self.observed = observed def data_declaration(data): # Static variables for anything which is data or prior info s = "" for name in data: if type(data[name]) == int: s += "static constexpr int " + name + ";\n" elif type(data[name]) == float: s += "static constexpr double " + name + ";\n" elif type(data[name] == np.array) and\ data[name].dtype.name == 'int64': for i in range(0, len(data[name])): s += "static constexpr int " + name + str(i) + ";\n" elif type(data[name] == np.array) and\ data[name].dtype.name == 'float64': for i in range(0, len(data[name])): s += "static constexpr double " + name + str(i) + ";\n" return s def data_declaration(data): # Static variables for anything which is data or prior info s = "" for name in data: if type(data[name]) == int: s += "static constexpr int " + name + " = "\ + str(data[name]) + ";\n" elif type(data[name]) == float: s += "static constexpr double " + name + " = "\ + str(data[name]) + ";\n" elif type(data[name] == np.array) and\ data[name].dtype.name == 'int64': for i in range(0, len(data[name])): s += "static constexpr int " + name + str(i) + " = " + str(data[name][i]) + ";\n" elif type(data[name] == np.array) and\ data[name].dtype.name == 'float64': for i in range(0, len(data[name])): s += "static constexpr double " + name + str(i) + " = " + str(data[name][i]) + ";\n" return s def data_definition(data): s = "" # # Static variables for anything which is data or prior info # for name in data: # if type(data[name]) == int: # s += "constexpr int MyModel::" + name + " = "\ # + str(data[name]) + ";\n" # elif type(data[name]) == float: # s += "constexpr double MyModel::" + name + " = "\ # + str(data[name]) + ";\n" # elif type(data[name] == np.array) and\ # data[name].dtype.name == 'int64': # for i in range(0, len(data[name])): # s += "constexpr int MyModel::" + name + str(i)\ # + " = " + str(data[name][i]) + ";\n" # elif type(data[name] == np.array) and\ # data[name].dtype.name == 'float64': # for i in range(0, len(data[name])): # s += "constexpr double MyModel::" + name + str(i)\ # + " = " + str(data[name][i]) + ";\n" return s def generate_h(model, data): f = open("MyModel.h.template") s = "".join(f.readlines()) f.close() s = s.replace("{DECLARATIONS}", model.declaration() + data_declaration(data)) f = open("MyModel.h", "w") f.write(s) f.close() def generate_cpp(model, data): f = open("MyModel.cpp.template") s = "".join(f.readlines()) f.close() s = s.replace("{STATICS}", data_definition(data)) s = s.replace("{FROM_PRIOR}", model.from_prior()) s = s.replace("{PERTURB}", model.perturb()) s = s.replace("{LOG_LIKELIHOOD}", model.log_likelihood()) s = s.replace("{PRINT}", model.print()) s = s.replace("{DESCRIPTION}", model.description()) f = open("MyModel.cpp", "w") f.write(s) f.close()
mit
drpaneas/linuxed.gr
lib/python2.7/site-packages/paramiko/sftp_file.py
34
19081
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ SFTP file object """ from __future__ import with_statement from binascii import hexlify from collections import deque import socket import threading import time from paramiko.common import DEBUG from paramiko.file import BufferedFile from paramiko.py3compat import long from paramiko.sftp import CMD_CLOSE, CMD_READ, CMD_DATA, SFTPError, CMD_WRITE, \ CMD_STATUS, CMD_FSTAT, CMD_ATTRS, CMD_FSETSTAT, CMD_EXTENDED from paramiko.sftp_attr import SFTPAttributes class SFTPFile (BufferedFile): """ Proxy object for a file on the remote server, in client mode SFTP. Instances of this class may be used as context managers in the same way that built-in Python file objects are. """ # Some sftp servers will choke if you send read/write requests larger than # this size. MAX_REQUEST_SIZE = 32768 def __init__(self, sftp, handle, mode='r', bufsize=-1): BufferedFile.__init__(self) self.sftp = sftp self.handle = handle BufferedFile._set_mode(self, mode, bufsize) self.pipelined = False self._prefetching = False self._prefetch_done = False self._prefetch_data = {} self._prefetch_extents = {} self._prefetch_lock = threading.Lock() self._saved_exception = None self._reqs = deque() def __del__(self): self._close(async=True) def close(self): """ Close the file. """ self._close(async=False) def _close(self, async=False): # We allow double-close without signaling an error, because real # Python file objects do. However, we must protect against actually # sending multiple CMD_CLOSE packets, because after we close our # handle, the same handle may be re-allocated by the server, and we # may end up mysteriously closing some random other file. (This is # especially important because we unconditionally call close() from # __del__.) if self._closed: return self.sftp._log(DEBUG, 'close(%s)' % hexlify(self.handle)) if self.pipelined: self.sftp._finish_responses(self) BufferedFile.close(self) try: if async: # GC'd file handle could be called from an arbitrary thread -- don't wait for a response self.sftp._async_request(type(None), CMD_CLOSE, self.handle) else: self.sftp._request(CMD_CLOSE, self.handle) except EOFError: # may have outlived the Transport connection pass except (IOError, socket.error): # may have outlived the Transport connection pass def _data_in_prefetch_requests(self, offset, size): k = [x for x in list(self._prefetch_extents.values()) if x[0] <= offset] if len(k) == 0: return False k.sort(key=lambda x: x[0]) buf_offset, buf_size = k[-1] if buf_offset + buf_size <= offset: # prefetch request ends before this one begins return False if buf_offset + buf_size >= offset + size: # inclusive return True # well, we have part of the request. see if another chunk has the rest. return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size) def _data_in_prefetch_buffers(self, offset): """ if a block of data is present in the prefetch buffers, at the given offset, return the offset of the relevant prefetch buffer. otherwise, return None. this guarantees nothing about the number of bytes collected in the prefetch buffer so far. """ k = [i for i in self._prefetch_data.keys() if i <= offset] if len(k) == 0: return None index = max(k) buf_offset = offset - index if buf_offset >= len(self._prefetch_data[index]): # it's not here return None return index def _read_prefetch(self, size): """ read data out of the prefetch buffer, if possible. if the data isn't in the buffer, return None. otherwise, behaves like a normal read. """ # while not closed, and haven't fetched past the current position, and haven't reached EOF... while True: offset = self._data_in_prefetch_buffers(self._realpos) if offset is not None: break if self._prefetch_done or self._closed: break self.sftp._read_response() self._check_exception() if offset is None: self._prefetching = False return None prefetch = self._prefetch_data[offset] del self._prefetch_data[offset] buf_offset = self._realpos - offset if buf_offset > 0: self._prefetch_data[offset] = prefetch[:buf_offset] prefetch = prefetch[buf_offset:] if size < len(prefetch): self._prefetch_data[self._realpos + size] = prefetch[size:] prefetch = prefetch[:size] return prefetch def _read(self, size): size = min(size, self.MAX_REQUEST_SIZE) if self._prefetching: data = self._read_prefetch(size) if data is not None: return data t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size)) if t != CMD_DATA: raise SFTPError('Expected data') return msg.get_string() def _write(self, data): # may write less than requested if it would exceed max packet size chunk = min(len(data), self.MAX_REQUEST_SIZE) self._reqs.append(self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos), data[:chunk])) if not self.pipelined or (len(self._reqs) > 100 and self.sftp.sock.recv_ready()): while len(self._reqs): req = self._reqs.popleft() t, msg = self.sftp._read_response(req) if t != CMD_STATUS: raise SFTPError('Expected status') # convert_status already called return chunk def settimeout(self, timeout): """ Set a timeout on read/write operations on the underlying socket or ssh `.Channel`. :param float timeout: seconds to wait for a pending read/write operation before raising ``socket.timeout``, or ``None`` for no timeout .. seealso:: `.Channel.settimeout` """ self.sftp.sock.settimeout(timeout) def gettimeout(self): """ Returns the timeout in seconds (as a `float`) associated with the socket or ssh `.Channel` used for this file. .. seealso:: `.Channel.gettimeout` """ return self.sftp.sock.gettimeout() def setblocking(self, blocking): """ Set blocking or non-blocking mode on the underiying socket or ssh `.Channel`. :param int blocking: 0 to set non-blocking mode; non-0 to set blocking mode. .. seealso:: `.Channel.setblocking` """ self.sftp.sock.setblocking(blocking) def seek(self, offset, whence=0): self.flush() if whence == self.SEEK_SET: self._realpos = self._pos = offset elif whence == self.SEEK_CUR: self._pos += offset self._realpos = self._pos else: self._realpos = self._pos = self._get_size() + offset self._rbuffer = bytes() def stat(self): """ Retrieve information about this file from the remote system. This is exactly like `.SFTPClient.stat`, except that it operates on an already-open file. :return: an `.SFTPAttributes` object containing attributes about this file. """ t, msg = self.sftp._request(CMD_FSTAT, self.handle) if t != CMD_ATTRS: raise SFTPError('Expected attributes') return SFTPAttributes._from_msg(msg) def chmod(self, mode): """ Change the mode (permissions) of this file. The permissions are unix-style and identical to those used by Python's `os.chmod` function. :param int mode: new permissions """ self.sftp._log(DEBUG, 'chmod(%s, %r)' % (hexlify(self.handle), mode)) attr = SFTPAttributes() attr.st_mode = mode self.sftp._request(CMD_FSETSTAT, self.handle, attr) def chown(self, uid, gid): """ Change the owner (``uid``) and group (``gid``) of this file. As with Python's `os.chown` function, you must pass both arguments, so if you only want to change one, use `stat` first to retrieve the current owner and group. :param int uid: new owner's uid :param int gid: new group id """ self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid)) attr = SFTPAttributes() attr.st_uid, attr.st_gid = uid, gid self.sftp._request(CMD_FSETSTAT, self.handle, attr) def utime(self, times): """ Set the access and modified times of this file. If ``times`` is ``None``, then the file's access and modified times are set to the current time. Otherwise, ``times`` must be a 2-tuple of numbers, of the form ``(atime, mtime)``, which is used to set the access and modified times, respectively. This bizarre API is mimicked from Python for the sake of consistency -- I apologize. :param tuple times: ``None`` or a tuple of (access time, modified time) in standard internet epoch time (seconds since 01 January 1970 GMT) """ if times is None: times = (time.time(), time.time()) self.sftp._log(DEBUG, 'utime(%s, %r)' % (hexlify(self.handle), times)) attr = SFTPAttributes() attr.st_atime, attr.st_mtime = times self.sftp._request(CMD_FSETSTAT, self.handle, attr) def truncate(self, size): """ Change the size of this file. This usually extends or shrinks the size of the file, just like the ``truncate()`` method on Python file objects. :param size: the new size of the file :type size: int or long """ self.sftp._log(DEBUG, 'truncate(%s, %r)' % (hexlify(self.handle), size)) attr = SFTPAttributes() attr.st_size = size self.sftp._request(CMD_FSETSTAT, self.handle, attr) def check(self, hash_algorithm, offset=0, length=0, block_size=0): """ Ask the server for a hash of a section of this file. This can be used to verify a successful upload or download, or for various rsync-like operations. The file is hashed from ``offset``, for ``length`` bytes. If ``length`` is 0, the remainder of the file is hashed. Thus, if both ``offset`` and ``length`` are zero, the entire file is hashed. Normally, ``block_size`` will be 0 (the default), and this method will return a byte string representing the requested hash (for example, a string of length 16 for MD5, or 20 for SHA-1). If a non-zero ``block_size`` is given, each chunk of the file (from ``offset`` to ``offset + length``) of ``block_size`` bytes is computed as a separate hash. The hash results are all concatenated and returned as a single string. For example, ``check('sha1', 0, 1024, 512)`` will return a string of length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes of the file, and the last 20 bytes will be the SHA-1 of the next 512 bytes. :param str hash_algorithm: the name of the hash algorithm to use (normally ``"sha1"`` or ``"md5"``) :param offset: offset into the file to begin hashing (0 means to start from the beginning) :type offset: int or long :param length: number of bytes to hash (0 means continue to the end of the file) :type length: int or long :param int block_size: number of bytes to hash per result (must not be less than 256; 0 means to compute only one hash of the entire segment) :type block_size: int :return: `str` of bytes representing the hash of each block, concatenated together :raises IOError: if the server doesn't support the "check-file" extension, or possibly doesn't support the hash algorithm requested .. note:: Many (most?) servers don't support this extension yet. .. versionadded:: 1.4 """ t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle, hash_algorithm, long(offset), long(length), block_size) ext = msg.get_text() alg = msg.get_text() data = msg.get_remainder() return data def set_pipelined(self, pipelined=True): """ Turn on/off the pipelining of write operations to this file. When pipelining is on, paramiko won't wait for the server response after each write operation. Instead, they're collected as they come in. At the first non-write operation (including `.close`), all remaining server responses are collected. This means that if there was an error with one of your later writes, an exception might be thrown from within `.close` instead of `.write`. By default, files are not pipelined. :param bool pipelined: ``True`` if pipelining should be turned on for this file; ``False`` otherwise .. versionadded:: 1.5 """ self.pipelined = pipelined def prefetch(self): """ Pre-fetch the remaining contents of this file in anticipation of future `.read` calls. If reading the entire file, pre-fetching can dramatically improve the download speed by avoiding roundtrip latency. The file's contents are incrementally buffered in a background thread. The prefetched data is stored in a buffer until read via the `.read` method. Once data has been read, it's removed from the buffer. The data may be read in a random order (using `.seek`); chunks of the buffer that haven't been read will continue to be buffered. .. versionadded:: 1.5.1 """ size = self.stat().st_size # queue up async reads for the rest of the file chunks = [] n = self._realpos while n < size: chunk = min(self.MAX_REQUEST_SIZE, size - n) chunks.append((n, chunk)) n += chunk if len(chunks) > 0: self._start_prefetch(chunks) def readv(self, chunks): """ Read a set of blocks from the file by (offset, length). This is more efficient than doing a series of `.seek` and `.read` calls, since the prefetch machinery is used to retrieve all the requested blocks at once. :param chunks: a list of (offset, length) tuples indicating which sections of the file to read :type chunks: list(tuple(long, int)) :return: a list of blocks read, in the same order as in ``chunks`` .. versionadded:: 1.5.4 """ self.sftp._log(DEBUG, 'readv(%s, %r)' % (hexlify(self.handle), chunks)) read_chunks = [] for offset, size in chunks: # don't fetch data that's already in the prefetch buffer if self._data_in_prefetch_buffers(offset) or self._data_in_prefetch_requests(offset, size): continue # break up anything larger than the max read size while size > 0: chunk_size = min(size, self.MAX_REQUEST_SIZE) read_chunks.append((offset, chunk_size)) offset += chunk_size size -= chunk_size self._start_prefetch(read_chunks) # now we can just devolve to a bunch of read()s :) for x in chunks: self.seek(x[0]) yield self.read(x[1]) ### internals... def _get_size(self): try: return self.stat().st_size except: return 0 def _start_prefetch(self, chunks): self._prefetching = True self._prefetch_done = False t = threading.Thread(target=self._prefetch_thread, args=(chunks,)) t.setDaemon(True) t.start() def _prefetch_thread(self, chunks): # do these read requests in a temporary thread because there may be # a lot of them, so it may block. for offset, length in chunks: with self._prefetch_lock: num = self.sftp._async_request(self, CMD_READ, self.handle, long(offset), int(length)) self._prefetch_extents[num] = (offset, length) def _async_response(self, t, msg, num): if t == CMD_STATUS: # save exception and re-raise it on next file operation try: self.sftp._convert_status(msg) except Exception as e: self._saved_exception = e return if t != CMD_DATA: raise SFTPError('Expected data') data = msg.get_string() with self._prefetch_lock: offset, length = self._prefetch_extents[num] self._prefetch_data[offset] = data del self._prefetch_extents[num] if len(self._prefetch_extents) == 0: self._prefetch_done = True def _check_exception(self): """if there's a saved exception, raise & clear it""" if self._saved_exception is not None: x = self._saved_exception self._saved_exception = None raise x
mit
akash1808/nova_test_latest
nova/tests/unit/objects/test_bandwidth_usage.py
20
4910
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from oslo_utils import timeutils from nova import context from nova import db from nova.objects import bandwidth_usage from nova import test from nova.tests.unit.objects import test_objects class _TestBandwidthUsage(test.TestCase): def setUp(self): super(_TestBandwidthUsage, self).setUp() self.user_id = 'fake_user' self.project_id = 'fake_project' self.context = context.RequestContext(self.user_id, self.project_id) now, start_period = self._time_now_and_start_period() self.expected_bw_usage = self._fake_bw_usage( time=now, start_period=start_period) @staticmethod def _compare(test, db, obj): for field, value in db.items(): obj_field = field if obj_field == 'uuid': obj_field = 'instance_uuid' test.assertEqual(db[field], obj[obj_field]) @staticmethod def _fake_bw_usage(time=None, start_period=None, bw_in=100, bw_out=200, last_ctr_in=12345, last_ctr_out=67890): fake_bw_usage = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'uuid': 'fake_uuid1', 'mac': 'fake_mac1', 'start_period': start_period, 'bw_in': bw_in, 'bw_out': bw_out, 'last_ctr_in': last_ctr_in, 'last_ctr_out': last_ctr_out, 'last_refreshed': time } return fake_bw_usage @staticmethod def _time_now_and_start_period(): now = timeutils.utcnow().replace(tzinfo=iso8601.iso8601.Utc(), microsecond=0) start_period = now - datetime.timedelta(seconds=10) return now, start_period @mock.patch.object(db, 'bw_usage_get') def test_get_by_instance_uuid_and_mac(self, mock_get): mock_get.return_value = self.expected_bw_usage bw_usage = bandwidth_usage.BandwidthUsage.get_by_instance_uuid_and_mac( self.context, 'fake_uuid', 'fake_mac', start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage) @mock.patch.object(db, 'bw_usage_get_by_uuids') def test_get_by_uuids(self, mock_get_by_uuids): mock_get_by_uuids.return_value = [self.expected_bw_usage] bw_usages = bandwidth_usage.BandwidthUsageList.get_by_uuids( self.context, ['fake_uuid'], start_period=self.expected_bw_usage['start_period']) self.assertEqual(len(bw_usages), 1) self._compare(self, self.expected_bw_usage, bw_usages[0]) @mock.patch.object(db, 'bw_usage_update') def test_create(self, mock_create): mock_create.return_value = self.expected_bw_usage bw_usage = bandwidth_usage.BandwidthUsage(context=self.context) bw_usage.create('fake_uuid', 'fake_mac', 100, 200, 12345, 67890, start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage) @mock.patch.object(db, 'bw_usage_update') def test_update(self, mock_update): expected_bw_usage1 = self._fake_bw_usage( time=self.expected_bw_usage['last_refreshed'], start_period=self.expected_bw_usage['start_period'], last_ctr_in=42, last_ctr_out=42) mock_update.side_effect = [expected_bw_usage1, self.expected_bw_usage] bw_usage = bandwidth_usage.BandwidthUsage(context=self.context) bw_usage.create('fake_uuid1', 'fake_mac1', 100, 200, 42, 42, start_period=self.expected_bw_usage['start_period']) self._compare(self, expected_bw_usage1, bw_usage) bw_usage.create('fake_uuid1', 'fake_mac1', 100, 200, 12345, 67890, start_period=self.expected_bw_usage['start_period']) self._compare(self, self.expected_bw_usage, bw_usage) class TestBandwidthUsageObject(test_objects._LocalTest, _TestBandwidthUsage): pass class TestRemoteBandwidthUsageObject(test_objects._RemoteTest, _TestBandwidthUsage): pass
apache-2.0
alinbalutoiu/tempest
tempest/services/compute/json/services_client.py
9
2135
# Copyright 2013 NEC Corporation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest.api_schema.response.compute.v2_1 import services as schema from tempest.common import service_client class ServicesClient(service_client.ServiceClient): def list_services(self, **params): url = 'os-services' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) self.validate_response(schema.list_services, resp, body) return service_client.ResponseBodyList(resp, body['services']) def enable_service(self, host_name, binary): """ Enable service on a host host_name: Name of host binary: Service binary """ post_body = json.dumps({'binary': binary, 'host': host_name}) resp, body = self.put('os-services/enable', post_body) body = json.loads(body) self.validate_response(schema.enable_service, resp, body) return service_client.ResponseBody(resp, body['service']) def disable_service(self, host_name, binary): """ Disable service on a host host_name: Name of host binary: Service binary """ post_body = json.dumps({'binary': binary, 'host': host_name}) resp, body = self.put('os-services/disable', post_body) body = json.loads(body) return service_client.ResponseBody(resp, body['service'])
apache-2.0
acsone/sale-workflow
sale_service_fleet/report/project_report.py
9
1101
# -*- coding: utf-8 -*- # (c) 2015 Antiun Ingeniería S.L. - Sergio Teruel # (c) 2015 Antiun Ingeniería S.L. - Carlos Dauden # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from openerp import models, fields from openerp import tools class ReportProjectTaskUser(models.Model): _inherit = 'report.project.task.user' vehicle_id = fields.Many2one( comodel_name='fleet.vehicle', string='Vehicle', readonly=True) def init(self, cr): super(ReportProjectTaskUser, self).init(cr) cr.execute("SELECT pg_get_viewdef('report_project_task_user', true)") view_def = cr.fetchone()[0] # Inject the new field in the expected SQL sql = "FROM " index = view_def.find(sql) if index >= 0: sql = ", t.vehicle_id" view_def = (view_def[:index] + sql + "\n " + view_def[index:-1] + sql) tools.drop_view_if_exists(cr, 'report_project_task_user') cr.execute("CREATE OR REPLACE VIEW report_project_task_user " "AS (%s)" % view_def)
agpl-3.0
fusionpig/ansible
v1/ansible/module_utils/gce.py
305
4179
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import pprint USER_AGENT_PRODUCT="Ansible-gce" USER_AGENT_VERSION="v1" def gce_connect(module, provider=None): """Return a Google Cloud Engine connection.""" service_account_email = module.params.get('service_account_email', None) pem_file = module.params.get('pem_file', None) project_id = module.params.get('project_id', None) # If any of the values are not given as parameters, check the appropriate # environment variables. if not service_account_email: service_account_email = os.environ.get('GCE_EMAIL', None) if not project_id: project_id = os.environ.get('GCE_PROJECT', None) if not pem_file: pem_file = os.environ.get('GCE_PEM_FILE_PATH', None) # If we still don't have one or more of our credentials, attempt to # get the remaining values from the libcloud secrets file. if service_account_email is None or pem_file is None: try: import secrets except ImportError: secrets = None if hasattr(secrets, 'GCE_PARAMS'): if not service_account_email: service_account_email = secrets.GCE_PARAMS[0] if not pem_file: pem_file = secrets.GCE_PARAMS[1] keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) if not project_id: project_id = keyword_params.get('project', None) # If we *still* don't have the credentials we need, then it's time to # just fail out. if service_account_email is None or pem_file is None or project_id is None: module.fail_json(msg='Missing GCE connection parameters in libcloud ' 'secrets file.') return None # Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE) if provider is None: provider = Provider.GCE try: gce = get_driver(provider)(service_account_email, pem_file, datacenter=module.params.get('zone', None), project=project_id) gce.connection.user_agent_append("%s/%s" % ( USER_AGENT_PRODUCT, USER_AGENT_VERSION)) except (RuntimeError, ValueError), e: module.fail_json(msg=str(e), changed=False) except Exception, e: module.fail_json(msg=unexpected_error_msg(e), changed=False) return gce def unexpected_error_msg(error): """Create an error string based on passed in error.""" return 'Unexpected response: ' + pprint.pformat(vars(error))
gpl-3.0
tecnologiaenegocios/tn.plonemailing
src/tn/plonemailing/converter.py
1
2960
from five import grok from htmlentitydefs import entitydefs from plone.intelligenttext.transforms import \ convertHtmlToWebIntelligentPlainText from tn.plonemailing import interfaces import lxml.html import re links_with_href_re = re.compile( r'(?m)<a([^<]+)href="([^<"]+)"([^<]*)>([^<]+)<\/a>', re.IGNORECASE ) class NullContentConversion(object): grok.implements(interfaces.IContentConversion) def __init__(self, content_type): self.content_type = content_type def apply(self, original_content): return original_content @grok.adapter(None, None, interfaces.INewsletter, name=u'html') @grok.implementer(interfaces.IContentConversion) def get_html_content_conversion(context, request, newsletter): return NullContentConversion('text/html') @grok.adapter(None, None, interfaces.INewsletter, name=u'text') @grok.implementer(interfaces.IContentConversion) def get_text_content_conversion(context, request, newsletter): return text_content_conversion class TextContentConversion(object): grok.implements(interfaces.IContentConversion) content_type = 'text/plain' def apply(self, html): html = lxml.html.document_fromstring(html) body = lxml.html.tostring(html.cssselect('body')[0], encoding=unicode) return self._to_text(body) def _to_text(self, body): # Because plone.intelligenttext uses htmlentitydefs to convert entities # to text and because htmlentitydefs returns entities in latin-1, we # take care of entity conversion by ourselves. body = self._expand_entities(body) # Expand links to reveal their hrefs. When link tags are stripped out # the user still will see the target URL. body = self._expand_links(body) return convertHtmlToWebIntelligentPlainText(body.encode('utf-8')).\ decode('utf-8') def _expand_entities(self, body): body = body.replace('&nbsp;', ' ') for entity, letter in entitydefs.items(): # Let plone.intelligenttext handle &lt; and &gt;, or else we may be # creating what looks like tags. if entity != 'lt' and entity != 'gt': body = body.replace('&' + entity + ';', letter.decode('latin-1')) return body def _expand_links(self, body): def replace(match): if match.group(2).strip() != match.group(4).strip(): return '<a%shref="%s"%s>%s [%s]<\/a>' % (match.group(1), match.group(2), match.group(3), match.group(4), match.group(2)) return match.group() return links_with_href_re.sub(replace, body) text_content_conversion = TextContentConversion()
bsd-3-clause
reedessick/lvalertTest
bin/sanityCheck_FakeDb.py
1
10948
#!/usr/bin/python usage = "sanityCheck_FakeDb.py [--options]" description = "provides unit tests and basic checks of FakeDb functionality" author = "reed.essick@ligo.org" #------------------------------------------------- import os import random from ligoTest.gracedb.rest import FakeDb import simUtils as utils import pipelines import schedule from lal.gpstime import tconvert from optparse import OptionParser #------------------------------------------------- parser = OptionParser(usage=usage, description=description) parser.add_option('-v', '--verbose', default=False, action='store_true') parser.add_option('-V', '--Verbose', default=False, action='store_true') parser.add_option('-N', '--Nevents', default=5, type='int', help='the number of events we create while testing') parser.add_option('', '--group', default='cbc', type='string') parser.add_option('', '--pipeline', default='gstlal', type='string') parser.add_option('', '--search', default=None, type='string') parser.add_option('-f', '--fakeDB-dir', default='./fakeDB', type='string') parser.add_option('-o', '--output-dir', default='.', type='string') opts, args = parser.parse_args() opts.verbose = opts.verbose or opts.Verbose if not os.path.exists(opts.fakeDB_dir): os.makedirs(opts.fakeDB_dir) if not os.path.exists(opts.output_dir): os.makedirs(opts.output_dir) #------------------------------------------------- labels = "EM_READY PE_READY EM_Throttled EM_Selected EM_Superseded ADVREQ ADVOK ADVNO H1OPS H1OK H1NO L1OPS L1OK L1NO".split() #------------------------------------------------- if opts.verbose: print "instantiating FakeDb" gdb = FakeDb(opts.fakeDB_dir) if opts.verbose: print "creating events" graceids = {} for x in xrange(opts.Nevents): if opts.verbose: print " %d / %d : group, pipeline, search = %s, %s, %s"%(x+1, opts.Nevents, opts.group, opts.pipeline, opts.search) ### create the event randStr = utils.genRandStr() gDBevent = schedule.GraceDBEvent(randStr) pipeObj = pipelines.initPipeline( float(tconvert('now')), 1e-9, ['H1','L1'], opts.group, opts.pipeline, gDBevent, search=opts.search, gdb_url=opts.fakeDB_dir ) agenda = pipeObj.genSchedule(directory=opts.output_dir) ### put some stuff into the event ### writeLabel for label in set( [random.choice(labels) for _ in xrange(5)] ): agenda.insert( schedule.WriteLabel( 100, gDBevent, label, gdb_url=opts.fakeDB_dir ) ) ### removeLabel # agenda.insert( schedule.RemoveLabel( 150, gDBevent, label, gdb_url=opts.fakeDB_dir ) ) # ^this should be defined in the previous loop! ### writeLog for num in xrange(5): message = 'message number : %d'%(num+1) agenda.insert( schedule.WriteLog( 200, gDBevent, message, gdb_url=opts.fakeDB_dir ) ) for num in xrange(5): message = "message with file number : %d"%(num+1) filename = os.path.join( opts.output_dir, "%s-%d.txt"%(randStr, num+1) ) open(filename, 'w').close() agenda.insert( schedule.WriteLog( 300, gDBevent, message, filename=filename, gdb_url=opts.fakeDB_dir ) ) ### writeFile for num in xrange(num, num+5): filename = os.path.join( opts.output_dir, "%s_%d.txt"%(randStr, num+1) ) open(filename, 'w').close() agenda.insert( schedule.WriteFile( 400, gDBevent, filename, gdb_url=opts.fakeDB_dir ) ) ### iterate and do things for action in agenda: if opts.Verbose: print " ", action response = action.execute() ### do the thing if isinstance(action, schedule.CreateEvent): ### this should be the first action so we should be safe defining things herein... graceid = gDBevent.get_graceid() graceids[graceid] = {'event' : {'gpstime' : pipeObj.gps, 'far' : pipeObj.far, 'group' : pipeObj.group.lower(), 'pipeline' : pipeObj.pipeline.lower(), 'search' : pipeObj.search.lower() if pipeObj.search else pipeObj.search, 'gDBevent' : pipeObj.graceDBevent, }, 'logs' : [], 'labels' : [], 'files' : [action.filename], } elif isinstance(action, schedule.WriteLabel): graceids[graceid]['labels'].append( action.label ) elif isinstance(action, schedule.RemoveLabel): raise NotImplementedError('we do not currently support RemoveLabel actions...') elif isinstance(action, schedule.WriteLog): graceids[graceid]['logs'].append( response.json() ) if action.filename: graceids[graceid]['files'].append( action.filename ) elif isinstance(action, schedule.WriteFile): graceids[graceid]['files'].append( action.filename ) else: print "action not understood!\n%s"%action #------------------------------------------------- if opts.verbose: print "checking FakeDb via queries" for graceid in sorted(graceids.keys()): if opts.verbose: print "investigating : %s"%graceid metadata = graceids[graceid] ### query information about this event #-------------------- ### event if opts.verbose: print "\nFakeDb.event()" response = gdb.event( graceid ).json() if opts.Verbose: for key, value in response.items(): print " ", key, "\t", value metadatum = metadata['event'] assert response['graceid'] == graceid, 'graceid is wrong : %s vs %s'%(response['graceid'], graceid) assert graceid == metadatum['gDBevent'].get_graceid(), 'graceDBevent->graceid is wrong : %s vs %s'%(graceid, metadatum['gDBevent'].get_graceid()) assert response['gpstime'] == metadatum['gpstime'], 'gps is wrong : %.6f vs %.6f'%(response['gpstime'], metadatum['gpstime']) assert response['far'] == metadatum['far'], 'far is wrong : %.6e vs %.6e'%(response['far'], metadatum['far']) assert response['group'] == metadatum['group'], 'group is wrong : %s vs %s'%(response['group'], metadatum['group']) assert response['pipeline'] == metadatum['pipeline'], 'pipeline is wrong : %s vs %s'%(response['pipeline'], metadatum['pipeline']) assert response['search'] == metadatum['search'], 'search is wrong : %s vs %s'%(response['search'], metadatum['search']) if opts.verbose: print "passed all checks!" #-------------------- ### labels if opts.verbose: print "\nFakeDb.labels()" response = gdb.labels( graceid ).json() if opts.Verbose: for key, value in response.items(): print " ", key for val in value: print " \t", value metadatum = metadata['labels'] labels = [label['name'] for label in response['labels']] if opts.Verbose: print " ensuring all labels associated with %s were actually applied"%graceid for label in labels: assert label in metadatum, 'label=%s present but not uploaded'%(label) if opts.Verbose: print " passed all checks!" print " ensuring all applied labels are actually present" for label in metadatum: assert label in labels, 'label=%s uploaded but not present'%(label) if opts.Verbose: print " passed all checks!" if opts.verbose: print "passed all checks!" #-------------------- ### files if opts.verbose: print "\nFakeDb.files()" response = gdb.files( graceid ).json() if opts.Verbose: for key, value in response.items(): print " ", key, "\t", value metadatum = [os.path.basename(filename) for filename in metadata['files']] files = response.keys() if opts.Verbose: print " ensuring all files associated with %s were actually uploaded"%graceid for filename in files: assert filename in metadatum, "file=%s present but not uploaded"%(filename) if opts.Verbose: print " passed all checks!" print " ensuring all uploaded files are atually present" for filename in metadatum: assert filename in files, "file=%s uploaded but not present"%(filename) if opts.Verbose: print " passed all checks!" if opts.verbose: print "passed all checks!" #-------------------- ### logs if opts.verbose: print "\nFakeDb.logs()" response = gdb.logs( graceid ).json() if opts.Verbose: for key, value in response.items(): if key == 'log': print " ", key for val in value: print " \t", val else: print " ", key, "\t", value logs = response['log'] # check that labelling produces logs if opts.Verbose: print " ensuring labels generated logs" for label in metadata['labels']: for log in logs: if log['comment'] == 'applying label : %s'%label: break else: assert False, 'label=%s did not produce a log message'%label if opts.Verbose: print " passed all checks!" # check that files produced logs if opts.Verbose: print " ensuring files are attached to logs" for filename in [os.path.basename(filename) for filename in metadata['files']]: for log in logs: if log['filename'] == filename: break else: assert False, 'file=%s was uploaded but produced no log'%filename if opts.Verbose: print " passed all checks!" # check that logging produced logs if opts.Verbose: print " ensuring all log messages were recorded" for log in metadata['logs']: comment = log['comment'] for log in logs: if log['comment'] == comment: break else: assert False, 'log with comment=%s was uploaded but not recorded'%comment if opts.Verbose: print " passed all checks!" if opts.verbose: print "passed all checks!" #------------------------------------------------- ### query things about groups of events ### events #raise NotImplementedError('need to set up queries over multiple events?')
mit
mikebenfield/scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_E.py
35
9862
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import from numpy import abs, asarray, cos, exp, arange, pi, sin, sqrt, sum from .go_benchmark import Benchmark class Easom(Benchmark): r""" Easom objective function. This class defines the Easom [1]_ global optimization problem. This is a a multimodal minimization problem defined as follows: .. math:: f_{\text{Easom}}({x}) = a - \frac{a}{e^{b \sqrt{\frac{\sum_{i=1}^{n} x_i^{2}}{n}}}} + e - e^{\frac{\sum_{i=1}^{n} \cos\left(c x_i\right)} {n}} Where, in this exercise, :math:`a = 20, b = 0.2` and :math:`c = 2 \pi`. Here, :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Gavana website disagrees with Jamil, etc. Gavana equation in docstring is totally wrong. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.global_optimum = [[pi for _ in range(self.N)]] self.fglob = -1.0 def fun(self, x, *args): self.nfev += 1 a = (x[0] - pi)**2 + (x[1] - pi)**2 return -cos(x[0]) * cos(x[1]) * exp(-a) class Eckerle4(Benchmark): r""" Eckerle4 objective function. Eckerle, K., NIST (1979). Circular Interference Transmittance Study. ..[1] http://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml #TODO, this is a NIST regression standard dataset, docstring needs improving """ def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0., 1., 10.], [20, 20., 600.])) self.global_optimum = [[1.5543827178, 4.0888321754, 4.5154121844e2]] self.fglob = 1.4635887487E-03 self.a = asarray([1.5750000E-04, 1.6990000E-04, 2.3500000E-04, 3.1020000E-04, 4.9170000E-04, 8.7100000E-04, 1.7418000E-03, 4.6400000E-03, 6.5895000E-03, 9.7302000E-03, 1.4900200E-02, 2.3731000E-02, 4.0168300E-02, 7.1255900E-02, 1.2644580E-01, 2.0734130E-01, 2.9023660E-01, 3.4456230E-01, 3.6980490E-01, 3.6685340E-01, 3.1067270E-01, 2.0781540E-01, 1.1643540E-01, 6.1676400E-02, 3.3720000E-02, 1.9402300E-02, 1.1783100E-02, 7.4357000E-03, 2.2732000E-03, 8.8000000E-04, 4.5790000E-04, 2.3450000E-04, 1.5860000E-04, 1.1430000E-04, 7.1000000E-05]) self.b = asarray([4.0000000E+02, 4.0500000E+02, 4.1000000E+02, 4.1500000E+02, 4.2000000E+02, 4.2500000E+02, 4.3000000E+02, 4.3500000E+02, 4.3650000E+02, 4.3800000E+02, 4.3950000E+02, 4.4100000E+02, 4.4250000E+02, 4.4400000E+02, 4.4550000E+02, 4.4700000E+02, 4.4850000E+02, 4.5000000E+02, 4.5150000E+02, 4.5300000E+02, 4.5450000E+02, 4.5600000E+02, 4.5750000E+02, 4.5900000E+02, 4.6050000E+02, 4.6200000E+02, 4.6350000E+02, 4.6500000E+02, 4.7000000E+02, 4.7500000E+02, 4.8000000E+02, 4.8500000E+02, 4.9000000E+02, 4.9500000E+02, 5.0000000E+02]) def fun(self, x, *args): self.nfev += 1 vec = x[0] / x[1] * exp(-(self.b - x[2]) ** 2 / (2 * x[1] ** 2)) return sum((self.a - vec) ** 2) class EggCrate(Benchmark): r""" Egg Crate objective function. This class defines the Egg Crate [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{EggCrate}}(x) = x_1^2 + x_2^2 + 25 \left[ \sin^2(x_1) + \sin^2(x_2) \right] with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N)) self.global_optimum = [[0.0, 0.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 return x[0] ** 2 + x[1] ** 2 + 25 * (sin(x[0]) ** 2 + sin(x[1]) ** 2) class EggHolder(Benchmark): r""" Egg Holder [1]_ objective function. This class defines the Egg Holder global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{EggHolder}}=\sum_{1}^{n - 1}\left[-\left(x_{i + 1} + 47 \right ) \sin\sqrt{\lvert x_{i+1} + x_i/2 + 47 \rvert} - x_i \sin\sqrt{\lvert x_i - (x_{i + 1} + 47)\rvert}\right ] Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-512, 512]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = -959.640662711` for :math:`{x} = [512, 404.2319]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Jamil is missing a minus sign on the fglob value """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-512.1] * self.N, [512.0] * self.N)) self.global_optimum = [[512.0, 404.2319]] self.fglob = -959.640662711 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 vec = (-(x[1:] + 47) * sin(sqrt(abs(x[1:] + x[:-1] / 2. + 47))) - x[:-1] * sin(sqrt(abs(x[:-1] - (x[1:] + 47))))) return sum(vec) class ElAttarVidyasagarDutta(Benchmark): r""" El-Attar-Vidyasagar-Dutta [1]_ objective function. This class defines the El-Attar-Vidyasagar-Dutta function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{ElAttarVidyasagarDutta}}(x) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2 with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 1.712780354` for :math:`x= [3.40918683, -2.17143304]` .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = [(-4, 4), (-4, 4)] self.global_optimum = [[3.40918683, -2.17143304]] self.fglob = 1.712780354 def fun(self, x, *args): self.nfev += 1 return ((x[0] ** 2 + x[1] - 10) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2 + (x[0] ** 2 + x[1] ** 3 - 1) ** 2) class Exp2(Benchmark): r""" Exp2 objective function. This class defines the Exp2 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Exp2}}(x) = \sum_{i=0}^9 \left ( e^{-ix_1/10} - 5e^{-ix_2/10} - e^{-i/10} + 5e^{-i} \right )^2 with :math:`x_i \in [0, 20]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10.]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0.0] * self.N, [20.0] * self.N)) self.custom_bounds = [(0, 2), (0, 20)] self.global_optimum = [[1.0, 10.]] self.fglob = 0. def fun(self, x, *args): self.nfev += 1 i = arange(10.) vec = (exp(-i * x[0] / 10.) - 5 * exp(-i * x[1] / 10.) - exp(-i / 10.) + 5 * exp(-i)) ** 2 return sum(vec) class Exponential(Benchmark): r""" Exponential [1] objective function. This class defines the Exponential global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Exponential}}(x) = -e^{-0.5 \sum_{i=1}^n x_i^2} Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil are missing a minus sign on fglob """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N)) self.global_optimum = [[0.0 for _ in range(self.N)]] self.fglob = -1.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 return -exp(-0.5 * sum(x ** 2.0))
bsd-3-clause
eeshangarg/oh-mainline
vendor/packages/Django/django/contrib/gis/db/models/query.py
93
36167
from django.db import connections from django.db.models.query import QuerySet, ValuesQuerySet, ValuesListQuerySet from django.contrib.gis import memoryview from django.contrib.gis.db.models import aggregates from django.contrib.gis.db.models.fields import get_srid_info, PointField, LineStringField from django.contrib.gis.db.models.sql import AreaField, DistanceField, GeomField, GeoQuery from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Area, Distance from django.utils import six class GeoQuerySet(QuerySet): "The Geographic QuerySet." ### Methods overloaded from QuerySet ### def __init__(self, model=None, query=None, using=None): super(GeoQuerySet, self).__init__(model=model, query=query, using=using) self.query = query or GeoQuery(self.model) def values(self, *fields): return self._clone(klass=GeoValuesQuerySet, setup=True, _fields=fields) def values_list(self, *fields, **kwargs): flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),)) if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") return self._clone(klass=GeoValuesListQuerySet, setup=True, flat=flat, _fields=fields) ### GeoQuerySet Methods ### def area(self, tolerance=0.05, **kwargs): """ Returns the area of the geographic field in an `area` attribute on each element of this GeoQuerySet. """ # Peforming setup here rather than in `_spatial_attribute` so that # we can get the units for `AreaField`. procedure_args, geo_field = self._spatial_setup('area', field_name=kwargs.get('field_name', None)) s = {'procedure_args' : procedure_args, 'geo_field' : geo_field, 'setup' : False, } connection = connections[self.db] backend = connection.ops if backend.oracle: s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s' s['procedure_args']['tolerance'] = tolerance s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters. elif backend.postgis or backend.spatialite: if backend.geography: # Geography fields support area calculation, returns square meters. s['select_field'] = AreaField('sq_m') elif not geo_field.geodetic(connection): # Getting the area units of the geographic field. s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection))) else: # TODO: Do we want to support raw number areas for geodetic fields? raise Exception('Area on geodetic coordinate systems not supported.') return self._spatial_attribute('area', s, **kwargs) def centroid(self, **kwargs): """ Returns the centroid of the geographic field in a `centroid` attribute on each element of this GeoQuerySet. """ return self._geom_attribute('centroid', **kwargs) def collect(self, **kwargs): """ Performs an aggregate collect operation on the given geometry field. This is analagous to a union operation, but much faster because boundaries are not dissolved. """ return self._spatial_aggregate(aggregates.Collect, **kwargs) def difference(self, geom, **kwargs): """ Returns the spatial difference of the geographic field in a `difference` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('difference', geom, **kwargs) def distance(self, geom, **kwargs): """ Returns the distance from the given geographic field name to the given geometry in a `distance` attribute on each element of the GeoQuerySet. Keyword Arguments: `spheroid` => If the geometry field is geodetic and PostGIS is the spatial database, then the more accurate spheroid calculation will be used instead of the quicker sphere calculation. `tolerance` => Used only for Oracle. The tolerance is in meters -- a default of 5 centimeters (0.05) is used. """ return self._distance_attribute('distance', geom, **kwargs) def envelope(self, **kwargs): """ Returns a Geometry representing the bounding box of the Geometry field in an `envelope` attribute on each element of the GeoQuerySet. """ return self._geom_attribute('envelope', **kwargs) def extent(self, **kwargs): """ Returns the extent (aggregate) of the features in the GeoQuerySet. The extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax). """ return self._spatial_aggregate(aggregates.Extent, **kwargs) def extent3d(self, **kwargs): """ Returns the aggregate extent, in 3D, of the features in the GeoQuerySet. It is returned as a 6-tuple, comprising: (xmin, ymin, zmin, xmax, ymax, zmax). """ return self._spatial_aggregate(aggregates.Extent3D, **kwargs) def force_rhr(self, **kwargs): """ Returns a modified version of the Polygon/MultiPolygon in which all of the vertices follow the Right-Hand-Rule. By default, this is attached as the `force_rhr` attribute on each element of the GeoQuerySet. """ return self._geom_attribute('force_rhr', **kwargs) def geojson(self, precision=8, crs=False, bbox=False, **kwargs): """ Returns a GeoJSON representation of the geomtry field in a `geojson` attribute on each element of the GeoQuerySet. The `crs` and `bbox` keywords may be set to True if the users wants the coordinate reference system and the bounding box to be included in the GeoJSON representation of the geometry. """ backend = connections[self.db].ops if not backend.geojson: raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ ' 'support GeoJSON serialization.') if not isinstance(precision, six.integer_types): raise TypeError('Precision keyword must be set with an integer.') # Setting the options flag -- which depends on which version of # PostGIS we're using. SpatiaLite only uses the first group of options. if backend.spatial_version >= (1, 4, 0): options = 0 if crs and bbox: options = 3 elif bbox: options = 1 elif crs: options = 2 else: options = 0 if crs and bbox: options = 3 elif crs: options = 1 elif bbox: options = 2 s = {'desc' : 'GeoJSON', 'procedure_args' : {'precision' : precision, 'options' : options}, 'procedure_fmt' : '%(geo_col)s,%(precision)s,%(options)s', } return self._spatial_attribute('geojson', s, **kwargs) def geohash(self, precision=20, **kwargs): """ Returns a GeoHash representation of the given field in a `geohash` attribute on each element of the GeoQuerySet. The `precision` keyword may be used to custom the number of _characters_ used in the output GeoHash, the default is 20. """ s = {'desc' : 'GeoHash', 'procedure_args': {'precision': precision}, 'procedure_fmt': '%(geo_col)s,%(precision)s', } return self._spatial_attribute('geohash', s, **kwargs) def gml(self, precision=8, version=2, **kwargs): """ Returns GML representation of the given field in a `gml` attribute on each element of the GeoQuerySet. """ backend = connections[self.db].ops s = {'desc' : 'GML', 'procedure_args' : {'precision' : precision}} if backend.postgis: # PostGIS AsGML() aggregate function parameter order depends on the # version -- uggh. if backend.spatial_version > (1, 3, 1): s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s' else: s['procedure_fmt'] = '%(geo_col)s,%(precision)s,%(version)s' s['procedure_args'] = {'precision' : precision, 'version' : version} return self._spatial_attribute('gml', s, **kwargs) def intersection(self, geom, **kwargs): """ Returns the spatial intersection of the Geometry field in an `intersection` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('intersection', geom, **kwargs) def kml(self, **kwargs): """ Returns KML representation of the geometry field in a `kml` attribute on each element of this GeoQuerySet. """ s = {'desc' : 'KML', 'procedure_fmt' : '%(geo_col)s,%(precision)s', 'procedure_args' : {'precision' : kwargs.pop('precision', 8)}, } return self._spatial_attribute('kml', s, **kwargs) def length(self, **kwargs): """ Returns the length of the geometry field as a `Distance` object stored in a `length` attribute on each element of this GeoQuerySet. """ return self._distance_attribute('length', None, **kwargs) def make_line(self, **kwargs): """ Creates a linestring from all of the PointField geometries in the this GeoQuerySet and returns it. This is a spatial aggregate method, and thus returns a geometry rather than a GeoQuerySet. """ return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs) def mem_size(self, **kwargs): """ Returns the memory size (number of bytes) that the geometry field takes in a `mem_size` attribute on each element of this GeoQuerySet. """ return self._spatial_attribute('mem_size', {}, **kwargs) def num_geom(self, **kwargs): """ Returns the number of geometries if the field is a GeometryCollection or Multi* Field in a `num_geom` attribute on each element of this GeoQuerySet; otherwise the sets with None. """ return self._spatial_attribute('num_geom', {}, **kwargs) def num_points(self, **kwargs): """ Returns the number of points in the first linestring in the Geometry field in a `num_points` attribute on each element of this GeoQuerySet; otherwise sets with None. """ return self._spatial_attribute('num_points', {}, **kwargs) def perimeter(self, **kwargs): """ Returns the perimeter of the geometry field as a `Distance` object stored in a `perimeter` attribute on each element of this GeoQuerySet. """ return self._distance_attribute('perimeter', None, **kwargs) def point_on_surface(self, **kwargs): """ Returns a Point geometry guaranteed to lie on the surface of the Geometry field in a `point_on_surface` attribute on each element of this GeoQuerySet; otherwise sets with None. """ return self._geom_attribute('point_on_surface', **kwargs) def reverse_geom(self, **kwargs): """ Reverses the coordinate order of the geometry, and attaches as a `reverse` attribute on each element of this GeoQuerySet. """ s = {'select_field' : GeomField(),} kwargs.setdefault('model_att', 'reverse_geom') if connections[self.db].ops.oracle: s['geo_field_type'] = LineStringField return self._spatial_attribute('reverse', s, **kwargs) def scale(self, x, y, z=0.0, **kwargs): """ Scales the geometry to a new size by multiplying the ordinates with the given x,y,z scale factors. """ if connections[self.db].ops.spatialite: if z != 0.0: raise NotImplementedError('SpatiaLite does not support 3D scaling.') s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s', 'procedure_args' : {'x' : x, 'y' : y}, 'select_field' : GeomField(), } else: s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s', 'procedure_args' : {'x' : x, 'y' : y, 'z' : z}, 'select_field' : GeomField(), } return self._spatial_attribute('scale', s, **kwargs) def snap_to_grid(self, *args, **kwargs): """ Snap all points of the input geometry to the grid. How the geometry is snapped to the grid depends on how many arguments were given: - 1 argument : A single size to snap both the X and Y grids to. - 2 arguments: X and Y sizes to snap the grid to. - 4 arguments: X, Y sizes and the X, Y origins. """ if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]: raise TypeError('Size argument(s) for the grid must be a float or integer values.') nargs = len(args) if nargs == 1: size = args[0] procedure_fmt = '%(geo_col)s,%(size)s' procedure_args = {'size' : size} elif nargs == 2: xsize, ysize = args procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s' procedure_args = {'xsize' : xsize, 'ysize' : ysize} elif nargs == 4: xsize, ysize, xorigin, yorigin = args procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s' procedure_args = {'xsize' : xsize, 'ysize' : ysize, 'xorigin' : xorigin, 'yorigin' : yorigin} else: raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.') s = {'procedure_fmt' : procedure_fmt, 'procedure_args' : procedure_args, 'select_field' : GeomField(), } return self._spatial_attribute('snap_to_grid', s, **kwargs) def svg(self, relative=False, precision=8, **kwargs): """ Returns SVG representation of the geographic field in a `svg` attribute on each element of this GeoQuerySet. Keyword Arguments: `relative` => If set to True, this will evaluate the path in terms of relative moves (rather than absolute). `precision` => May be used to set the maximum number of decimal digits used in output (defaults to 8). """ relative = int(bool(relative)) if not isinstance(precision, six.integer_types): raise TypeError('SVG precision keyword argument must be an integer.') s = {'desc' : 'SVG', 'procedure_fmt' : '%(geo_col)s,%(rel)s,%(precision)s', 'procedure_args' : {'rel' : relative, 'precision' : precision, } } return self._spatial_attribute('svg', s, **kwargs) def sym_difference(self, geom, **kwargs): """ Returns the symmetric difference of the geographic field in a `sym_difference` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('sym_difference', geom, **kwargs) def translate(self, x, y, z=0.0, **kwargs): """ Translates the geometry to a new location using the given numeric parameters as offsets. """ if connections[self.db].ops.spatialite: if z != 0.0: raise NotImplementedError('SpatiaLite does not support 3D translation.') s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s', 'procedure_args' : {'x' : x, 'y' : y}, 'select_field' : GeomField(), } else: s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s', 'procedure_args' : {'x' : x, 'y' : y, 'z' : z}, 'select_field' : GeomField(), } return self._spatial_attribute('translate', s, **kwargs) def transform(self, srid=4326, **kwargs): """ Transforms the given geometry field to the given SRID. If no SRID is provided, the transformation will default to using 4326 (WGS84). """ if not isinstance(srid, six.integer_types): raise TypeError('An integer SRID must be provided.') field_name = kwargs.get('field_name', None) tmp, geo_field = self._spatial_setup('transform', field_name=field_name) # Getting the selection SQL for the given geographic field. field_col = self._geocol_select(geo_field, field_name) # Why cascading substitutions? Because spatial backends like # Oracle and MySQL already require a function call to convert to text, thus # when there's also a transformation we need to cascade the substitutions. # For example, 'SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM( ... )' geo_col = self.query.custom_select.get(geo_field, field_col) # Setting the key for the field's column with the custom SELECT SQL to # override the geometry column returned from the database. custom_sel = '%s(%s, %s)' % (connections[self.db].ops.transform, geo_col, srid) # TODO: Should we have this as an alias? # custom_sel = '(%s(%s, %s)) AS %s' % (SpatialBackend.transform, geo_col, srid, qn(geo_field.name)) self.query.transformed_srid = srid # So other GeoQuerySet methods self.query.custom_select[geo_field] = custom_sel return self._clone() def union(self, geom, **kwargs): """ Returns the union of the geographic field with the given Geometry in a `union` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('union', geom, **kwargs) def unionagg(self, **kwargs): """ Performs an aggregate union on the given geometry field. Returns None if the GeoQuerySet is empty. The `tolerance` keyword is for Oracle backends only. """ return self._spatial_aggregate(aggregates.Union, **kwargs) ### Private API -- Abstracted DRY routines. ### def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None): """ Performs set up for executing the spatial function. """ # Does the spatial backend support this? connection = connections[self.db] func = getattr(connection.ops, att, False) if desc is None: desc = att if not func: raise NotImplementedError('%s stored procedure not available on ' 'the %s backend.' % (desc, connection.ops.name)) # Initializing the procedure arguments. procedure_args = {'function' : func} # Is there a geographic field in the model to perform this # operation on? geo_field = self.query._geo_field(field_name) if not geo_field: raise TypeError('%s output only available on GeometryFields.' % func) # If the `geo_field_type` keyword was used, then enforce that # type limitation. if not geo_field_type is None and not isinstance(geo_field, geo_field_type): raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__)) # Setting the procedure args. procedure_args['geo_col'] = self._geocol_select(geo_field, field_name) return procedure_args, geo_field def _spatial_aggregate(self, aggregate, field_name=None, geo_field_type=None, tolerance=0.05): """ DRY routine for calling aggregate spatial stored procedures and returning their result to the caller of the function. """ # Getting the field the geographic aggregate will be called on. geo_field = self.query._geo_field(field_name) if not geo_field: raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name) # Checking if there are any geo field type limitations on this # aggregate (e.g. ST_Makeline only operates on PointFields). if not geo_field_type is None and not isinstance(geo_field, geo_field_type): raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__)) # Getting the string expression of the field name, as this is the # argument taken by `Aggregate` objects. agg_col = field_name or geo_field.name # Adding any keyword parameters for the Aggregate object. Oracle backends # in particular need an additional `tolerance` parameter. agg_kwargs = {} if connections[self.db].ops.oracle: agg_kwargs['tolerance'] = tolerance # Calling the QuerySet.aggregate, and returning only the value of the aggregate. return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg'] def _spatial_attribute(self, att, settings, field_name=None, model_att=None): """ DRY routine for calling a spatial stored procedure on a geometry column and attaching its output as an attribute of the model. Arguments: att: The name of the spatial attribute that holds the spatial SQL function to call. settings: Dictonary of internal settings to customize for the spatial procedure. Public Keyword Arguments: field_name: The name of the geographic field to call the spatial function on. May also be a lookup to a geometry field as part of a foreign key relation. model_att: The name of the model attribute to attach the output of the spatial function to. """ # Default settings. settings.setdefault('desc', None) settings.setdefault('geom_args', ()) settings.setdefault('geom_field', None) settings.setdefault('procedure_args', {}) settings.setdefault('procedure_fmt', '%(geo_col)s') settings.setdefault('select_params', []) connection = connections[self.db] backend = connection.ops # Performing setup for the spatial column, unless told not to. if settings.get('setup', True): default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name, geo_field_type=settings.get('geo_field_type', None)) for k, v in six.iteritems(default_args): settings['procedure_args'].setdefault(k, v) else: geo_field = settings['geo_field'] # The attribute to attach to the model. if not isinstance(model_att, six.string_types): model_att = att # Special handling for any argument that is a geometry. for name in settings['geom_args']: # Using the field's get_placeholder() routine to get any needed # transformation SQL. geom = geo_field.get_prep_value(settings['procedure_args'][name]) params = geo_field.get_db_prep_lookup('contains', geom, connection=connection) geom_placeholder = geo_field.get_placeholder(geom, connection) # Replacing the procedure format with that of any needed # transformation SQL. old_fmt = '%%(%s)s' % name new_fmt = geom_placeholder % '%%s' settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt) settings['select_params'].extend(params) # Getting the format for the stored procedure. fmt = '%%(function)s(%s)' % settings['procedure_fmt'] # If the result of this function needs to be converted. if settings.get('select_field', False): sel_fld = settings['select_field'] if isinstance(sel_fld, GeomField) and backend.select: self.query.custom_select[model_att] = backend.select if connection.ops.oracle: sel_fld.empty_strings_allowed = False self.query.extra_select_fields[model_att] = sel_fld # Finally, setting the extra selection attribute with # the format string expanded with the stored procedure # arguments. return self.extra(select={model_att : fmt % settings['procedure_args']}, select_params=settings['select_params']) def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs): """ DRY routine for GeoQuerySet distance attribute routines. """ # Setting up the distance procedure arguments. procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None)) # If geodetic defaulting distance attribute to meters (Oracle and # PostGIS spherical distances return meters). Otherwise, use the # units of the geometry field. connection = connections[self.db] geodetic = geo_field.geodetic(connection) geography = geo_field.geography if geodetic: dist_att = 'm' else: dist_att = Distance.unit_attname(geo_field.units_name(connection)) # Shortcut booleans for what distance function we're using and # whether the geometry field is 3D. distance = func == 'distance' length = func == 'length' perimeter = func == 'perimeter' if not (distance or length or perimeter): raise ValueError('Unknown distance function: %s' % func) geom_3d = geo_field.dim == 3 # The field's get_db_prep_lookup() is used to get any # extra distance parameters. Here we set up the # parameters that will be passed in to field's function. lookup_params = [geom or 'POINT (0 0)', 0] # Getting the spatial backend operations. backend = connection.ops # If the spheroid calculation is desired, either by the `spheroid` # keyword or when calculating the length of geodetic field, make # sure the 'spheroid' distance setting string is passed in so we # get the correct spatial stored procedure. if spheroid or (backend.postgis and geodetic and (not geography) and length): lookup_params.append('spheroid') lookup_params = geo_field.get_prep_value(lookup_params) params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection) # The `geom_args` flag is set to true if a geometry parameter was # passed in. geom_args = bool(geom) if backend.oracle: if distance: procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s' elif length or perimeter: procedure_fmt = '%(geo_col)s,%(tolerance)s' procedure_args['tolerance'] = tolerance else: # Getting whether this field is in units of degrees since the field may have # been transformed via the `transform` GeoQuerySet method. if self.query.transformed_srid: u, unit_name, s = get_srid_info(self.query.transformed_srid, connection) geodetic = unit_name in geo_field.geodetic_units if backend.spatialite and geodetic: raise ValueError('SQLite does not support linear distance calculations on geodetic coordinate systems.') if distance: if self.query.transformed_srid: # Setting the `geom_args` flag to false because we want to handle # transformation SQL here, rather than the way done by default # (which will transform to the original SRID of the field rather # than to what was transformed to). geom_args = False procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, self.query.transformed_srid) if geom.srid is None or geom.srid == self.query.transformed_srid: # If the geom parameter srid is None, it is assumed the coordinates # are in the transformed units. A placeholder is used for the # geometry parameter. `GeomFromText` constructor is also needed # to wrap geom placeholder for SpatiaLite. if backend.spatialite: procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, self.query.transformed_srid) else: procedure_fmt += ', %%s' else: # We need to transform the geom to the srid specified in `transform()`, # so wrapping the geometry placeholder in transformation SQL. # SpatiaLite also needs geometry placeholder wrapped in `GeomFromText` # constructor. if backend.spatialite: procedure_fmt += ', %s(%s(%%%%s, %s), %s)' % (backend.transform, backend.from_text, geom.srid, self.query.transformed_srid) else: procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, self.query.transformed_srid) else: # `transform()` was not used on this GeoQuerySet. procedure_fmt = '%(geo_col)s,%(geom)s' if not geography and geodetic: # Spherical distance calculation is needed (because the geographic # field is geodetic). However, the PostGIS ST_distance_sphere/spheroid() # procedures may only do queries from point columns to point geometries # some error checking is required. if not backend.geography: if not isinstance(geo_field, PointField): raise ValueError('Spherical distance calculation only supported on PointFields.') if not str(Geometry(memoryview(params[0].ewkb)).geom_type) == 'Point': raise ValueError('Spherical distance calculation only supported with Point Geometry parameters') # The `function` procedure argument needs to be set differently for # geodetic distance calculations. if spheroid: # Call to distance_spheroid() requires spheroid param as well. procedure_fmt += ",'%(spheroid)s'" procedure_args.update({'function' : backend.distance_spheroid, 'spheroid' : params[1]}) else: procedure_args.update({'function' : backend.distance_sphere}) elif length or perimeter: procedure_fmt = '%(geo_col)s' if not geography and geodetic and length: # There's no `length_sphere`, and `length_spheroid` also # works on 3D geometries. procedure_fmt += ",'%(spheroid)s'" procedure_args.update({'function' : backend.length_spheroid, 'spheroid' : params[1]}) elif geom_3d and backend.postgis: # Use 3D variants of perimeter and length routines on PostGIS. if perimeter: procedure_args.update({'function' : backend.perimeter3d}) elif length: procedure_args.update({'function' : backend.length3d}) # Setting up the settings for `_spatial_attribute`. s = {'select_field' : DistanceField(dist_att), 'setup' : False, 'geo_field' : geo_field, 'procedure_args' : procedure_args, 'procedure_fmt' : procedure_fmt, } if geom_args: s['geom_args'] = ('geom',) s['procedure_args']['geom'] = geom elif geom: # The geometry is passed in as a parameter because we handled # transformation conditions in this routine. s['select_params'] = [backend.Adapter(geom)] return self._spatial_attribute(func, s, **kwargs) def _geom_attribute(self, func, tolerance=0.05, **kwargs): """ DRY routine for setting up a GeoQuerySet method that attaches a Geometry attribute (e.g., `centroid`, `point_on_surface`). """ s = {'select_field' : GeomField(),} if connections[self.db].ops.oracle: s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s' s['procedure_args'] = {'tolerance' : tolerance} return self._spatial_attribute(func, s, **kwargs) def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs): """ DRY routine for setting up a GeoQuerySet method that attaches a Geometry attribute and takes a Geoemtry parameter. This is used for geometry set-like operations (e.g., intersection, difference, union, sym_difference). """ s = {'geom_args' : ('geom',), 'select_field' : GeomField(), 'procedure_fmt' : '%(geo_col)s,%(geom)s', 'procedure_args' : {'geom' : geom}, } if connections[self.db].ops.oracle: s['procedure_fmt'] += ',%(tolerance)s' s['procedure_args']['tolerance'] = tolerance return self._spatial_attribute(func, s, **kwargs) def _geocol_select(self, geo_field, field_name): """ Helper routine for constructing the SQL to select the geographic column. Takes into account if the geographic field is in a ForeignKey relation to the current model. """ opts = self.model._meta if not geo_field in opts.fields: # Is this operation going to be on a related geographic field? # If so, it'll have to be added to the select related information # (e.g., if 'location__point' was given as the field name). self.query.add_select_related([field_name]) compiler = self.query.get_compiler(self.db) compiler.pre_sql_setup() rel_table, rel_col = self.query.related_select_cols[self.query.related_select_fields.index(geo_field)] return compiler._field_column(geo_field, rel_table) elif not geo_field in opts.local_fields: # This geographic field is inherited from another model, so we have to # use the db table for the _parent_ model instead. tmp_fld, parent_model, direct, m2m = opts.get_field_by_name(geo_field.name) return self.query.get_compiler(self.db)._field_column(geo_field, parent_model._meta.db_table) else: return self.query.get_compiler(self.db)._field_column(geo_field) class GeoValuesQuerySet(ValuesQuerySet): def __init__(self, *args, **kwargs): super(GeoValuesQuerySet, self).__init__(*args, **kwargs) # This flag tells `resolve_columns` to run the values through # `convert_values`. This ensures that Geometry objects instead # of string values are returned with `values()` or `values_list()`. self.query.geo_values = True class GeoValuesListQuerySet(GeoValuesQuerySet, ValuesListQuerySet): pass
agpl-3.0
lupyuen/RaspberryPiImage
home/pi/GrovePi/Software/Python/others/temboo/Library/YouTube/Channels/ListChannelsByID.py
5
6409
# -*- coding: utf-8 -*- ############################################################################### # # ListChannelsByID # Returns a list of channels that match the list of IDs provided. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class ListChannelsByID(Choreography): def __init__(self, temboo_session): """ Create a new instance of the ListChannelsByID Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(ListChannelsByID, self).__init__(temboo_session, '/Library/YouTube/Channels/ListChannelsByID') def new_input_set(self): return ListChannelsByIDInputSet() def _make_result_set(self, result, path): return ListChannelsByIDResultSet(result, path) def _make_execution(self, session, exec_id, path): return ListChannelsByIDChoreographyExecution(session, exec_id, path) class ListChannelsByIDInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the ListChannelsByID Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((optional, string) The API Key provided by Google for simple API access when you do not need to access user data.) """ super(ListChannelsByIDInputSet, self)._set_input('APIKey', value) def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required for OAuth authentication unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.) """ super(ListChannelsByIDInputSet, self)._set_input('AccessToken', value) def set_ChannelID(self, value): """ Set the value of the ChannelID input for this Choreo. ((required, string) A comma-separated list of the YouTube channel ID(s) for the resource(s) that are being retrieved.) """ super(ListChannelsByIDInputSet, self)._set_input('ChannelID', value) def set_ClientID(self, value): """ Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required for OAuth authentication unless providing a valid AccessToken.) """ super(ListChannelsByIDInputSet, self)._set_input('ClientID', value) def set_ClientSecret(self, value): """ Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required for OAuth authentication unless providing a valid AccessToken.) """ super(ListChannelsByIDInputSet, self)._set_input('ClientSecret', value) def set_Fields(self, value): """ Set the value of the Fields input for this Choreo. ((optional, string) Allows you to specify a subset of fields to include in the response using an xpath-like syntax (i.e. items/snippet/title).) """ super(ListChannelsByIDInputSet, self)._set_input('Fields', value) def set_MaxResults(self, value): """ Set the value of the MaxResults input for this Choreo. ((optional, integer) The maximum number of results to return.) """ super(ListChannelsByIDInputSet, self)._set_input('MaxResults', value) def set_PageToken(self, value): """ Set the value of the PageToken input for this Choreo. ((optional, string) The "nextPageToken" found in the response which is used to page through results.) """ super(ListChannelsByIDInputSet, self)._set_input('PageToken', value) def set_Part(self, value): """ Set the value of the Part input for this Choreo. ((optional, string) Specifies a comma-separated list of channels resource properties that the API response will include. Part names that you can pass are: id, snippet, contentDetails, statistics, and topicDetails.) """ super(ListChannelsByIDInputSet, self)._set_input('Part', value) def set_RefreshToken(self, value): """ Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required for OAuth authentication unless providing a valid AccessToken.) """ super(ListChannelsByIDInputSet, self)._set_input('RefreshToken', value) class ListChannelsByIDResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the ListChannelsByID Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from YouTube.) """ return self._output.get('Response', None) def get_NewAccessToken(self): """ Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.) """ return self._output.get('NewAccessToken', None) class ListChannelsByIDChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return ListChannelsByIDResultSet(response, path)
apache-2.0
YongseopKim/crosswalk-test-suite
wrt/wrt-packertool-android-tests/metacomm/combinatorics/combinatorics.py
33
1259
#=============================================================================== # code from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/190465 # all (c) etc. are of the authors of this procedures, see link above #=============================================================================== def xcombinations(items, n): if n==0: yield [] else: for i in xrange(len(items)): for cc in xcombinations(items[:i]+items[i+1:],n-1): yield [items[i]]+cc def xuniqueCombinations(items, n): if n==0: yield [] else: for i in xrange(len(items)): for cc in xuniqueCombinations( items[i+1:], n-1) : yield [items[i]]+cc def xselections(items, n): if n==0: yield [] else: for i in xrange(len(items)): for ss in xselections(items, n-1): yield [items[i]]+ss def xpermutations(items): return xcombinations(items, len(items)) #supposedly it is faster def permutations2(L): if len(L) <= 1: yield L else: a = [L.pop(0)] for p in permutations(L): for i in range(len(p)+1): yield p[:i] + a + p[i:]
bsd-3-clause
ezarko/cfn-init
comtypes/test/test_server.py
1
10411
import atexit, os, unittest ##import comtypes import comtypes.typeinfo, comtypes.client class TypeLib(object): """This class collects IDL code fragments and eventually writes them into a .IDL file. The compile() method compiles the IDL file into a typelibrary and registers it. A function is also registered with atexit that will unregister the typelib at program exit. """ def __init__(self, lib): self.lib = lib self.interfaces = [] self.coclasses = [] def interface(self, header): itf = Interface(header) self.interfaces.append(itf) return itf def coclass(self, definition): self.coclasses.append(definition) def __str__(self): header = '''import "oaidl.idl"; import "ocidl.idl"; %s {''' % self.lib body = "\n".join([str(itf) for itf in self.interfaces]) footer = "\n".join(self.coclasses) + "}" return "\n".join((header, body, footer)) def compile(self): """Compile and register the typelib""" code = str(self) curdir = os.path.dirname(__file__) idl_path = os.path.join(curdir, "mylib.idl") tlb_path = os.path.join(curdir, "mylib.tlb") if not os.path.isfile(idl_path) or open(idl_path, "r").read() != code: open(idl_path, "w").write(code) os.system(r'call "%%VS71COMNTOOLS%%vsvars32.bat" && ' r'midl /nologo %s /tlb %s' % (idl_path, tlb_path)) # Register the typelib... tlib = comtypes.typeinfo.LoadTypeLib(tlb_path) # create the wrapper module... comtypes.client.GetModule(tlb_path) # Unregister the typelib at interpreter exit... attr = tlib.GetLibAttr() guid, major, minor = attr.guid, attr.wMajorVerNum, attr.wMinorVerNum ## atexit.register(comtypes.typeinfo.UnRegisterTypeLib, ## guid, major, minor) return tlb_path class Interface(object): def __init__(self, header): self.header = header self.code = "" def add(self, text): self.code += text + "\n" return self def __str__(self): return self.header + " {\n" + self.code + "}\n" ################################################################ import comtypes from comtypes.client import wrap tlb = TypeLib("[uuid(f4f74946-4546-44bd-a073-9ea6f9fe78cb)] library TestLib") itf = tlb.interface("""[object, oleautomation, dual, uuid(ed978f5f-cc45-4fcc-a7a6-751ffa8dfedd)] interface IMyInterface : IDispatch""") outgoing = tlb.interface("""[object, oleautomation, dual, uuid(f7c48a90-64ea-4bb8-abf1-b3a3aa996848)] interface IMyEventInterface : IDispatch""") tlb.coclass(""" [uuid(fa9de8f4-20de-45fc-b079-648572428817)] coclass MyServer { [default] interface IMyInterface; [default, source] interface IMyEventInterface; }; """) # The purpose of the MyServer class is to locate three separate code # section snippets closely together: # # 1. The IDL method definition for a COM interface method # 2. The Python implementation of the COM method # 3. The unittest(s) for the COM method. # from comtypes.server.connectionpoints import ConnectableObjectMixin class MyServer(comtypes.CoClass, ConnectableObjectMixin): _reg_typelib_ = ('{f4f74946-4546-44bd-a073-9ea6f9fe78cb}', 0, 0) _reg_clsid_ = comtypes.GUID('{fa9de8f4-20de-45fc-b079-648572428817}') ################ # definition itf.add("""[id(100), propget] HRESULT Name([out, retval] BSTR *pname); [id(100), propput] HRESULT Name([in] BSTR name);""") # implementation Name = "foo" # test def test_Name(self): p = wrap(self.create()) self.assertEqual((p.Name, p.name, p.nAME), ("foo",) * 3) p.NAME = "spam" self.assertEqual((p.Name, p.name, p.nAME), ("spam",) * 3) ################ # definition itf.add("[id(101)] HRESULT MixedInOut([in] int a, [out] int *b, [in] int c, [out] int *d);") # implementation def MixedInOut(self, a, c): return a+1, c+1 #test def test_MixedInOut(self): p = wrap(self.create()) self.assertEqual(p.MixedInOut(1, 2), (2, 3)) ################ # definition itf.add("[id(102)] HRESULT MultiInOutArgs([in, out] int *pa, [in, out] int *pb);") # implementation def MultiInOutArgs(self, pa, pb): return pa[0] * 3, pb[0] * 4 # test def test_MultiInOutArgs(self): p = wrap(self.create()) self.assertEqual(p.MultiInOutArgs(1, 2), (3, 8)) ################ # definition itf.add("HRESULT MultiInOutArgs2([in, out] int *pa, [out] int *pb);") ## # implementation ## def MultiInOutArgs2(self, pa): ## return pa[0] * 3, pa[0] * 4 ## # test ## def test_MultiInOutArgs2(self): ## p = wrap(self.create()) ## self.assertEqual(p.MultiInOutArgs2(42), (126, 168)) ################ # definition itf.add("HRESULT MultiInOutArgs3([out] int *pa, [out] int *pb);") # implementation def MultiInOutArgs3(self): return 42, 43 # test def test_MultiInOutArgs3(self): p = wrap(self.create()) self.assertEqual(p.MultiInOutArgs3(), (42, 43)) ################ # definition itf.add("HRESULT MultiInOutArgs4([out] int *pa, [in, out] int *pb);") # implementation def MultiInOutArgs4(self, pb): return pb[0] + 3, pb[0] + 4 # test def test_MultiInOutArgs4(self): p = wrap(self.create()) res = p.MultiInOutArgs4(pb=32) ## print "MultiInOutArgs4", res itf.add("""HRESULT GetStackTrace([in] ULONG FrameOffset, [in, out] INT *Frames, [in] ULONG FramesSize, [out, optional] ULONG *FramesFilled);""") def GetStackTrace(self, this, *args): ## print "GetStackTrace", args return 0 def test_GetStackTrace(self): p = wrap(self.create()) from ctypes import c_int, POINTER, pointer frames = (c_int * 5)() res = p.GetStackTrace(42, frames, 5) ## print "RES_1", res frames = pointer(c_int(5)) res = p.GetStackTrace(42, frames, 0) ## print "RES_2", res # It is unlear to me if this is allowed or not. Apparently there # are typelibs that define such an argument type, but it may be # that these are buggy. # # Point is that SafeArrayCreateEx(VT_VARIANT|VT_BYREF, ..) fails. # The MSDN docs for SafeArrayCreate() have a notice that neither # VT_ARRAY not VT_BYREF may be set, this notice is missing however # for SafeArrayCreateEx(). # # We have this code here to make sure that comtypes can import # such a typelib, although calling ths method will fail because # such an array cannot be created. itf.add("""HRESULT dummy([in] SAFEARRAY(VARIANT *) foo);""") # Test events. itf.add("""HRESULT DoSomething();""") outgoing.add("""[id(103)] HRESULT OnSomething();""") # implementation def DoSomething(self): "Implement the DoSomething method" self.Fire_Event(0, "OnSomething") # test def test_events(self): p = wrap(self.create()) class Handler(object): called = 0 def OnSomething(self, this): "Handles the OnSomething event" self.called += 1 handler = Handler() ev = comtypes.client.GetEvents(p, handler) p.DoSomething() self.assertEqual(handler.called, 1) class Handler(object): called = 0 def IMyEventInterface_OnSomething(self): "Handles the OnSomething event" self.called += 1 handler = Handler() ev = comtypes.client.GetEvents(p, handler) p.DoSomething() self.assertEqual(handler.called, 1) # events with out-parameters (these are probably very unlikely...) itf.add("""HRESULT DoSomethingElse();""") outgoing.add("""[id(104)] HRESULT OnSomethingElse([out, retval] int *px);""") def DoSomethingElse(self): "Implement the DoSomething method" self.Fire_Event(0, "OnSomethingElse") def test_DoSomethingElse(self): p = wrap(self.create()) class Handler(object): called = 0 def OnSomethingElse(self): "Handles the OnSomething event" self.called += 1 return 42 handler = Handler() ev = comtypes.client.GetEvents(p, handler) p.DoSomethingElse() self.assertEqual(handler.called, 1) class Handler(object): called = 0 def OnSomethingElse(self, this, presult): "Handles the OnSomething event" self.called += 1 presult[0] = 42 handler = Handler() ev = comtypes.client.GetEvents(p, handler) p.DoSomethingElse() self.assertEqual(handler.called, 1) ################################################################ path = tlb.compile() from comtypes.gen import TestLib from comtypes.typeinfo import IProvideClassInfo, IProvideClassInfo2 from comtypes.connectionpoints import IConnectionPointContainer MyServer._com_interfaces_ = [TestLib.IMyInterface, IProvideClassInfo2, IConnectionPointContainer] MyServer._outgoing_interfaces_ = [TestLib.IMyEventInterface] ################################################################ class Test(unittest.TestCase, MyServer): def __init__(self, *args): unittest.TestCase.__init__(self, *args) MyServer.__init__(self) def create(self): obj = MyServer() return obj.QueryInterface(comtypes.IUnknown) if __name__ == "__main__": unittest.main()
apache-2.0
XXMrHyde/android_external_chromium_org
native_client_sdk/src/tools/tests/sel_ldr_test.py
104
2080
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import unittest SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARENT_DIR = os.path.dirname(SCRIPT_DIR) DATA_DIR = os.path.join(SCRIPT_DIR, 'data') CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(PARENT_DIR))) MOCK_DIR = os.path.join(CHROME_SRC, "third_party", "pymock") # For the mock library sys.path.append(MOCK_DIR) sys.path.append(PARENT_DIR) import sel_ldr import mock class TestSelLdr(unittest.TestCase): def testRequiresArg(self): with mock.patch('sys.stderr'): self.assertRaises(SystemExit, sel_ldr.main, []) def testUsesHelper(self): with mock.patch('subprocess.call') as call: with mock.patch('os.path.exists'): with mock.patch('os.path.isfile'): with mock.patch('create_nmf.ParseElfHeader') as parse_header: parse_header.return_value = ('x8-64', False) with mock.patch('getos.GetPlatform') as get_platform: # assert that when we are running on linux # the helper is used. get_platform.return_value = 'linux' sel_ldr.main(['foo.nexe']) parse_header.assert_called_once_with('foo.nexe') self.assertEqual(call.call_count, 1) cmd = call.call_args[0][0] self.assertTrue('helper_bootstrap' in cmd[0]) # assert that when not running on linux the # helper is not used. get_platform.reset_mock() parse_header.reset_mock() call.reset_mock() get_platform.return_value = 'win' sel_ldr.main(['foo.nexe']) parse_header.assert_called_once_with('foo.nexe') self.assertEqual(call.call_count, 1) cmd = call.call_args[0][0] self.assertTrue('helper_bootstrap' not in cmd[0]) if __name__ == '__main__': unittest.main()
bsd-3-clause
SciTools/biggus
biggus/tests/integration/test_maths.py
3
1427
# (C) British Crown Copyright 2015 - 2016, Met Office # # This file is part of Biggus. # # Biggus is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Biggus is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Biggus. If not, see <http://www.gnu.org/licenses/>. """Integration tests for maths operations.""" from __future__ import absolute_import, division, print_function from six.moves import (filter, input, map, range, zip) # noqa import numpy as np import numpy.ma as ma import unittest import biggus import biggus.tests.unit.init._aggregation_test_framework as test_framework class TestSum(unittest.TestCase): def test_sum_float(self): a = biggus.ConstantArray((1), dtype=np.float32) b = a + 1 self.assertEqual('float32', b.dtype) def test_sum_int(self): a = biggus.ConstantArray((1), dtype=np.int32) b = a + 1 self.assertEqual('int32', b.dtype) if __name__ == '__main__': unittest.main()
gpl-3.0
saydulk/django
tests/postgres_tests/test_array.py
70
19822
import decimal import json import unittest import uuid from django import forms from django.core import exceptions, serializers, validators from django.core.management import call_command from django.db import IntegrityError, connection, models from django.test import TransactionTestCase, override_settings from django.utils import timezone from . import PostgreSQLTestCase from .models import ( ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel, NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel, PostgreSQLModel, ) try: from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField except ImportError: pass class TestSaveLoad(PostgreSQLTestCase): def test_integer(self): instance = IntegerArrayModel(field=[1, 2, 3]) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_char(self): instance = CharArrayModel(field=['hello', 'goodbye']) instance.save() loaded = CharArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_dates(self): instance = DateTimeArrayModel( datetimes=[timezone.now()], dates=[timezone.now().date()], times=[timezone.now().time()], ) instance.save() loaded = DateTimeArrayModel.objects.get() self.assertEqual(instance.datetimes, loaded.datetimes) self.assertEqual(instance.dates, loaded.dates) self.assertEqual(instance.times, loaded.times) def test_tuples(self): instance = IntegerArrayModel(field=(1,)) instance.save() loaded = IntegerArrayModel.objects.get() self.assertSequenceEqual(instance.field, loaded.field) def test_integers_passed_as_strings(self): # This checks that get_prep_value is deferred properly instance = IntegerArrayModel(field=['1']) instance.save() loaded = IntegerArrayModel.objects.get() self.assertEqual(loaded.field, [1]) def test_default_null(self): instance = NullableIntegerArrayModel() instance.save() loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk) self.assertEqual(loaded.field, None) self.assertEqual(instance.field, loaded.field) def test_null_handling(self): instance = NullableIntegerArrayModel(field=None) instance.save() loaded = NullableIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) instance = IntegerArrayModel(field=None) with self.assertRaises(IntegrityError): instance.save() def test_nested(self): instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]]) instance.save() loaded = NestedIntegerArrayModel.objects.get() self.assertEqual(instance.field, loaded.field) def test_other_array_types(self): instance = OtherTypesArrayModel( ips=['192.168.0.1', '::1'], uuids=[uuid.uuid4()], decimals=[decimal.Decimal(1.25), 1.75], ) instance.save() loaded = OtherTypesArrayModel.objects.get() self.assertEqual(instance.ips, loaded.ips) self.assertEqual(instance.uuids, loaded.uuids) self.assertEqual(instance.decimals, loaded.decimals) def test_model_set_on_base_field(self): instance = IntegerArrayModel() field = instance._meta.get_field('field') self.assertEqual(field.model, IntegerArrayModel) self.assertEqual(field.base_field.model, IntegerArrayModel) class TestQuerying(PostgreSQLTestCase): def setUp(self): self.objs = [ NullableIntegerArrayModel.objects.create(field=[1]), NullableIntegerArrayModel.objects.create(field=[2]), NullableIntegerArrayModel.objects.create(field=[2, 3]), NullableIntegerArrayModel.objects.create(field=[20, 30, 40]), NullableIntegerArrayModel.objects.create(field=None), ] def test_exact(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1] ) def test_isnull(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:] ) def test_gt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4] ) def test_lt(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1] ) def test_in(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]), self.objs[:2] ) def test_contained_by(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]), self.objs[:2] ) def test_contains(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__contains=[2]), self.objs[1:3] ) def test_contains_charfield(self): # Regression for #22907 self.assertSequenceEqual( CharArrayModel.objects.filter(field__contains=['text']), [] ) def test_contained_by_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__contained_by=['text']), [] ) def test_overlap_charfield(self): self.assertSequenceEqual( CharArrayModel.objects.filter(field__overlap=['text']), [] ) def test_index(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3] ) def test_index_chained(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3] ) def test_index_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance] ) @unittest.expectedFailure def test_index_used_on_nested_data(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance] ) def test_overlap(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]), self.objs[0:3] ) def test_len(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3] ) def test_slice(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3] ) self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3] ) @unittest.expectedFailure def test_slice_nested(self): instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]]) self.assertSequenceEqual( NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance] ) def test_usage_in_subquery(self): self.assertSequenceEqual( NullableIntegerArrayModel.objects.filter( id__in=NullableIntegerArrayModel.objects.filter(field__len=3) ), [self.objs[3]] ) class TestChecks(PostgreSQLTestCase): def test_field_checks(self): class MyModel(PostgreSQLModel): field = ArrayField(models.CharField()) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, 'postgres.E001') def test_invalid_base_fields(self): class MyModel(PostgreSQLModel): field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel')) model = MyModel() errors = model.check() self.assertEqual(len(errors), 1) self.assertEqual(errors[0].id, 'postgres.E002') @unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests") class TestMigrations(TransactionTestCase): available_apps = ['postgres_tests'] def test_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(type(new.base_field), type(field.base_field)) def test_deconstruct_with_size(self): field = ArrayField(models.IntegerField(), size=3) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.size, field.size) def test_deconstruct_args(self): field = ArrayField(models.CharField(max_length=20)) name, path, args, kwargs = field.deconstruct() new = ArrayField(*args, **kwargs) self.assertEqual(new.base_field.max_length, field.base_field.max_length) def test_subclass_deconstruct(self): field = ArrayField(models.IntegerField()) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField') field = ArrayFieldSubclass() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass') @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_default_migrations", }) def test_adding_field_with_default(self): # See #22962 table_name = 'postgres_tests_integerarraydefaultmodel' with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: self.assertIn(table_name, connection.introspection.table_names(cursor)) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) @override_settings(MIGRATION_MODULES={ "postgres_tests": "postgres_tests.array_index_migrations", }) def test_adding_arrayfield_with_index(self): """ ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes. """ table_name = 'postgres_tests_chartextarrayindexmodel' call_command('migrate', 'postgres_tests', verbosity=0) with connection.cursor() as cursor: like_constraint_field_names = [ c.rsplit('_', 2)[0][len(table_name) + 1:] for c in connection.introspection.get_constraints(cursor, table_name) if c.endswith('_like') ] # Only the CharField should have a LIKE index. self.assertEqual(like_constraint_field_names, ['char2']) with connection.cursor() as cursor: indexes = connection.introspection.get_indexes(cursor, table_name) # All fields should have regular indexes. self.assertIn('char', indexes) self.assertIn('char2', indexes) self.assertIn('text', indexes) call_command('migrate', 'postgres_tests', 'zero', verbosity=0) with connection.cursor() as cursor: self.assertNotIn(table_name, connection.introspection.table_names(cursor)) class TestSerialization(PostgreSQLTestCase): test_data = '[{"fields": {"field": "[\\"1\\", \\"2\\"]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]' def test_dumping(self): instance = IntegerArrayModel(field=[1, 2]) data = serializers.serialize('json', [instance]) self.assertEqual(json.loads(data), json.loads(self.test_data)) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.field, [1, 2]) class TestValidation(PostgreSQLTestCase): def test_unbounded(self): field = ArrayField(models.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, None], None) self.assertEqual(cm.exception.code, 'item_invalid') self.assertEqual(cm.exception.message % cm.exception.params, 'Item 1 in the array did not validate: This field cannot be null.') def test_blank_true(self): field = ArrayField(models.IntegerField(blank=True, null=True)) # This should not raise a validation error field.clean([1, None], None) def test_with_size(self): field = ArrayField(models.IntegerField(), size=3) field.clean([1, 2, 3], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([1, 2, 3, 4], None) self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.') def test_nested_array_mismatch(self): field = ArrayField(ArrayField(models.IntegerField())) field.clean([[1, 2], [3, 4]], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([[1, 2], [3, 4, 5]], None) self.assertEqual(cm.exception.code, 'nested_array_mismatch') self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.') def test_with_validators(self): field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)])) field.clean([1, 2], None) with self.assertRaises(exceptions.ValidationError) as cm: field.clean([0], None) self.assertEqual(cm.exception.code, 'item_invalid') self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.') class TestSimpleFormField(PostgreSQLTestCase): def test_valid(self): field = SimpleArrayField(forms.CharField()) value = field.clean('a,b,c') self.assertEqual(value, ['a', 'b', 'c']) def test_to_python_fail(self): field = SimpleArrayField(forms.IntegerField()) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,9') self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.') def test_validate_fail(self): field = SimpleArrayField(forms.CharField(required=True)) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,') self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.') def test_validators_fail(self): field = SimpleArrayField(forms.RegexField('[a-e]{2}')) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,bc,de') self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.') def test_delimiter(self): field = SimpleArrayField(forms.CharField(), delimiter='|') value = field.clean('a|b|c') self.assertEqual(value, ['a', 'b', 'c']) def test_delimiter_with_nesting(self): field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|') value = field.clean('a,b|c,d') self.assertEqual(value, [['a', 'b'], ['c', 'd']]) def test_prepare_value(self): field = SimpleArrayField(forms.CharField()) value = field.prepare_value(['a', 'b', 'c']) self.assertEqual(value, 'a,b,c') def test_max_length(self): field = SimpleArrayField(forms.CharField(), max_length=2) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.') def test_min_length(self): field = SimpleArrayField(forms.CharField(), min_length=4) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('a,b,c') self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.') def test_required(self): field = SimpleArrayField(forms.CharField(), required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean('') self.assertEqual(cm.exception.messages[0], 'This field is required.') def test_model_field_formfield(self): model_field = ArrayField(models.CharField(max_length=27)) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertIsInstance(form_field.base_field, forms.CharField) self.assertEqual(form_field.base_field.max_length, 27) def test_model_field_formfield_size(self): model_field = ArrayField(models.CharField(max_length=27), size=4) form_field = model_field.formfield() self.assertIsInstance(form_field, SimpleArrayField) self.assertEqual(form_field.max_length, 4) class TestSplitFormField(PostgreSQLTestCase): def test_valid(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'} form = SplitForm(data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']}) def test_required(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), required=True, size=3) data = {'array_0': '', 'array_1': '', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['This field is required.']}) def test_remove_trailing_nulls(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True) data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''} form = SplitForm(data) self.assertTrue(form.is_valid(), form.errors) self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']}) def test_required_field(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''} form = SplitForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']}) def test_rendering(self): class SplitForm(forms.Form): array = SplitArrayField(forms.CharField(), size=3) self.assertHTMLEqual(str(SplitForm()), ''' <tr> <th><label for="id_array_0">Array:</label></th> <td> <input id="id_array_0" name="array_0" type="text" /> <input id="id_array_1" name="array_1" type="text" /> <input id="id_array_2" name="array_2" type="text" /> </td> </tr> ''')
bsd-3-clause
krzysztofwos/BitcoinUnlimited
qa/rpc-tests/mempool_limit.py
8
2293
#!/usr/bin/env python3 # Copyright (c) 2014-2015 The Bitcoin Core developers # Copyright (c) 2015-2017 The Bitcoin Unlimited developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Test mempool limiting together/eviction with the wallet from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class MempoolLimitTest(BitcoinTestFramework): def __init__(self): self.txouts = gen_return_txouts() def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0", "-debug"])) self.is_network_split = False self.sync_all() self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 2) def run_test(self): txids = [] utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90) #create a mempool tx that will be evicted us0 = utxos.pop() inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}] outputs = {self.nodes[0].getnewaddress() : 0.0001} tx = self.nodes[0].createrawtransaction(inputs, outputs) self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee txF = self.nodes[0].fundrawtransaction(tx) self.nodes[0].settxfee(0) # return to automatic fee selection txFS = self.nodes[0].signrawtransaction(txF['hex']) txid = self.nodes[0].sendrawtransaction(txFS['hex']) relayfee = self.nodes[0].getnetworkinfo()['relayfee'] base_fee = relayfee*100 for i in range (4): txids.append([]) txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee) # by now, the tx should be evicted, check confirmation state assert(txid not in self.nodes[0].getrawmempool()) txdata = self.nodes[0].gettransaction(txid) assert(txdata['confirmations'] == 0) #confirmation should still be 0 if __name__ == '__main__': MempoolLimitTest().main()
mit
otherness-space/myProject002
my_project_002/lib/python2.7/site-packages/django/contrib/localflavor/fr/forms.py
100
2008
""" FR-specific Form helpers """ from __future__ import absolute_import, unicode_literals import re from django.contrib.localflavor.fr.fr_department import DEPARTMENT_CHOICES from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import CharField, RegexField, Select from django.utils.encoding import smart_text from django.utils.translation import ugettext_lazy as _ phone_digits_re = re.compile(r'^0\d(\s|\.)?(\d{2}(\s|\.)?){3}\d{2}$') class FRZipCodeField(RegexField): default_error_messages = { 'invalid': _('Enter a zip code in the format XXXXX.'), } def __init__(self, max_length=5, min_length=5, *args, **kwargs): super(FRZipCodeField, self).__init__(r'^\d{5}$', max_length, min_length, *args, **kwargs) class FRPhoneNumberField(CharField): """ Validate local French phone number (not international ones) The correct format is '0X XX XX XX XX'. '0X.XX.XX.XX.XX' and '0XXXXXXXXX' validate but are corrected to '0X XX XX XX XX'. """ default_error_messages = { 'invalid': _('Phone numbers must be in 0X XX XX XX XX format.'), } def __init__(self, max_length=14, min_length=10, *args, **kwargs): super(FRPhoneNumberField, self).__init__( max_length, min_length, *args, **kwargs) def clean(self, value): super(FRPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return '' value = re.sub('(\.|\s)', '', smart_text(value)) m = phone_digits_re.search(value) if m: return '%s %s %s %s %s' % (value[0:2], value[2:4], value[4:6], value[6:8], value[8:10]) raise ValidationError(self.error_messages['invalid']) class FRDepartmentSelect(Select): """ A Select widget that uses a list of FR departments as its choices. """ def __init__(self, attrs=None): super(FRDepartmentSelect, self).__init__(attrs, choices=DEPARTMENT_CHOICES)
mit
bennomadic/django-webid-provider
src/django_webid/provider/webiduri.py
1
3264
import types from django.conf import settings from django.shortcuts import get_object_or_404, render_to_response from django.template import RequestContext, TemplateDoesNotExist from django.utils.decorators import classonlymethod from django_conneg.views import ContentNegotiatedView from django_conneg.decorators import renderer as conneg_renderer from django_webid.provider.models import WebIDUser class PriorityContentNegotiatedView(ContentNegotiatedView): """ dynamic addition of renderers """ @classonlymethod def as_view(cls, **initkwargs): ranked_mimetypes = getattr(cls, '_mimetypes', None) if ranked_mimetypes: for mime, prio, name in ranked_mimetypes: renderer = cls.getRenderer(name, mime, name, prio) cls.addMethod(renderer) view = super(PriorityContentNegotiatedView, cls).as_view(**initkwargs) return view @classonlymethod def addMethod(cls, func): return setattr(cls, func.__name__, types.MethodType(func, cls)) @classonlymethod def getRenderer(cls, format, mimetypes, name, priority): if not isinstance(mimetypes, tuple): mimetypes = (mimetypes,) mime = mimetypes[0] def renderer(cls, self, request, context, template_name): template_name = self.join_template_name(template_name, name.lower()) if template_name is None: return NotImplemented try: return render_to_response(template_name, context, context_instance=RequestContext(request), mimetype=mime) except TemplateDoesNotExist: return NotImplemented renderer.__name__ = 'render_%s' % mime.replace('/', '_') renderer = conneg_renderer(format=format, mimetypes=mimetypes, priority=priority)(renderer) return renderer class WebIDProfileView(PriorityContentNegotiatedView): """ View that negotiates the output format Supports: html, rdfa, rdf+xml, xhtml, turtle ... why not vcard??? """ _default_format = 'html' #fallback format??? #_force_fallback_format = 'html' _mimetypes = (('text/html', 10, 'html'), ('application/xhtml+xml', 5, 'rdfa'), ('application/rdf+xml', 1, 'rdfxml'), ('text/turtle', 2, 'turtle')) def get(self, request, username=None): uu = get_object_or_404(WebIDUser, username=username) context = { "webiduser": uu, "MEDIA_URL": settings.MEDIA_URL, "STATIC_URL": settings.STATIC_URL, } # Call render, passing a template name (w/o file extension) return self.render(request, context, 'django_webid/provider/webidprofile/webid') # fix head method # (fixed in bennomadic fork of django_conneg, which is now # in requirements, let's see if it's merged upstream.) #def head(self, request, *args, **kwargs): # return self.get(request, *args, **kwargs)
gpl-2.0
JustArchi/program-y
src/programy/config/brain.py
2
8932
""" Copyright (c) 2016 Keith Sterling Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import logging from programy.config.base import BaseConfigurationData class BrainFileConfiguration(object): def __init__(self, files, extension=".aiml", directories=False): self._files = files self._extension = extension self._directories = directories @property def files(self): return self._files @property def extension(self): return self._extension @property def directories(self): return self._directories class BrainServiceConfiguration(object): def __init__(self, name, data=None): self._name = name.upper() self._params = {} if data is not None: for key in data.keys(): self._params[key.upper()] = data[key] @property def name(self): return self._name @property def path(self): return self._params['PATH'] def parameters(self): return self._params.keys() def set_parameter(self, key, value): self._params[key] = value def parameter(self, name): if name in self._params: return self._params[name] else: return None class BrainConfiguration(BaseConfigurationData): DEFAULT_SUPRESS_WARNINGS = False DEFAULT_ALLOW_SYSTEM_AIML = True DEFAULT_ALLOW_LEARN_AIML = True DEFAULT_ALLOW_LEARNF_AIML = True def __init__(self): self._supress_warnings = BrainConfiguration.DEFAULT_SUPRESS_WARNINGS self._allow_system_aiml = BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML self._allow_learn_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML self._aiml_files = None self._set_files = None self._map_files = None self._denormal = None self._normal = None self._gender = None self._person = None self._person2 = None self._predicates = None self._pronouns = None self._properties = None self._triples = None self._preprocessors = None self._postprocessors = None self._services = [] BaseConfigurationData.__init__(self, "brain") def _get_brain_file_configuration(self, config_file, section, bot_root): files = config_file.get_option(section, "files") files = self.sub_bot_root(files, bot_root) extension = config_file.get_option(section, "extension") directories = config_file.get_option(section, "directories") return BrainFileConfiguration(files, extension, directories) def load_config_section(self, config_file, bot_root): brain = config_file.get_section(self.section_name) if brain is not None: self._supress_warnings = config_file.get_option(brain, "supress_warnings", BrainConfiguration.DEFAULT_SUPRESS_WARNINGS) self._allow_system_aiml = config_file.get_option(brain, "allow_system_aiml", BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML) self._allow_learn_aiml = config_file.get_option(brain, "allow_learn_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML) self._allow_learnf_aiml = config_file.get_option(brain, "allow_learnf_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML) self._allow_learnf_aiml = config_file.get_option(brain, "allow_learnf_aiml", BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML) files = config_file.get_section("files", brain) if files is not None: aiml = config_file.get_section("aiml", files) self._aiml_files = self._get_brain_file_configuration(config_file, aiml, bot_root) sets = config_file.get_section("sets", files) self._set_files = self._get_brain_file_configuration(config_file, sets, bot_root) maps = config_file.get_section("maps", files) self._map_files = self._get_brain_file_configuration(config_file, maps, bot_root) self._denormal = self._get_file_option(config_file, "denormal", files, bot_root) self._normal = self._get_file_option(config_file, "normal", files, bot_root) self._gender = self._get_file_option(config_file, "gender", files, bot_root) self._person = self._get_file_option(config_file, "person", files, bot_root) self._person2 = self._get_file_option(config_file, "person2", files, bot_root) self._predicates = self._get_file_option(config_file, "predicates", files, bot_root) self._pronouns = self._get_file_option(config_file, "pronouns", files, bot_root) self._properties = self._get_file_option(config_file, "properties", files, bot_root) self._triples = self._get_file_option(config_file, "triples", files, bot_root) self._preprocessors = self._get_file_option(config_file, "preprocessors", files, bot_root) self._postprocessors = self._get_file_option(config_file, "postprocessors", files, bot_root) else: logging.warning("Config section [files] missing from Brain, default values not appropriate") raise Exception ("Config section [files] missing from Brain") services = config_file.get_section("services", brain) if services is not None: service_keys = config_file.get_child_section_keys("services", brain) for name in service_keys: service_data = config_file.get_section_data(name, services) self._services.append(BrainServiceConfiguration(name, service_data)) else: logging.warning("Config section [services] missing from Brain, no services loaded") else: logging.warning("Config section [%s] missing, using default values", self.section_name) self._supress_warnings = BrainConfiguration.DEFAULT_SUPRESS_WARNINGS self._allow_system_aiml = BrainConfiguration.DEFAULT_ALLOW_SYSTEM_AIML self._allow_learn_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARN_AIML self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML self._allow_learnf_aiml = BrainConfiguration.DEFAULT_ALLOW_LEARNF_AIML @property def supress_warnings(self): return self._supress_warnings @property def allow_system_aiml(self): return self._allow_system_aiml @property def allow_learn_aiml(self): return self._allow_learn_aiml @property def allow_learnf_aiml(self): return self._allow_learnf_aiml @property def aiml_files(self): return self._aiml_files @property def set_files(self): return self._set_files @property def map_files(self): return self._map_files @property def denormal(self): return self._denormal @property def normal(self): return self._normal @property def gender(self): return self._gender @property def person(self): return self._person @property def person2(self): return self._person2 @property def predicates(self): return self._predicates @property def pronouns(self): return self._pronouns @property def properties(self): return self._properties @property def triples(self): return self._triples @property def preprocessors(self): return self._preprocessors @property def postprocessors(self): return self._postprocessors @property def services(self): return self._services
mit
AnnaWyszomirska/lesson1_1
tests/test_change_in_the contact.py
1
1546
from model.contact import Contact from random import randrange def test_change_in_the_contact(app,db): if len(db.get_contact_list()) == 0: app.contact.add(Contact(bday="//div[@id='content']/form/select[1]//option[4]", bmonth= "//div[@id='content']/form/select[2]//option[3]", aday="//div[@id='content']/form/select[3]//option[19]", amonth="//div[@id='content']/form/select[4]//option[3]", address2="Test ", privatephone="Test", comments="Test" )) old_contacts = db.get_contact_list() index = randrange(len(old_contacts)) contact = Contact(firstname= "New name", lastname = "New name", birthyear="Test", annyear="Test", bday="//div[@id='content']/form/select[1]//option[4]", bmonth= "//div[@id='content']/form/select[2]//option[3]", aday="//div[@id='content']/form/select[3]//option[19]", amonth="//div[@id='content']/form/select[4]//option[3]", ) contact.id = old_contacts[index].id app.contact.change_contact_by_id(index, contact) new_contacts = db.get_contact_list() assert len(old_contacts) == len(new_contacts) old_contacts[index] = contact assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
apache-2.0
EPDCenter/android_kernel_bq_qc
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
VaibhavAgarwalVA/sympy
sympy/simplify/powsimp.py
39
26056
from __future__ import print_function, division from collections import defaultdict from sympy.core.function import expand_log, count_ops from sympy.core import sympify, Basic, Dummy, S, Add, Mul, Pow, expand_mul, factor_terms from sympy.core.compatibility import ordered, default_sort_key, reduce from sympy.core.numbers import Integer, Rational from sympy.core.mul import prod, _keep_coeff from sympy.core.rules import Transform from sympy.functions import exp_polar, exp, log, root, polarify, unpolarify from sympy.polys import lcm, gcd from sympy.ntheory.factor_ import multiplicity def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops): """ reduces expression by combining powers with similar bases and exponents. Notes ===== If deep is True then powsimp() will also simplify arguments of functions. By default deep is set to False. If force is True then bases will be combined without checking for assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true if x and y are both negative. You can make powsimp() only combine bases or only combine exponents by changing combine='base' or combine='exp'. By default, combine='all', which does both. combine='base' will only combine:: a a a 2x x x * y => (x*y) as well as things like 2 => 4 and combine='exp' will only combine :: a b (a + b) x * x => x combine='exp' will strictly only combine exponents in the way that used to be automatic. Also use deep=True if you need the old behavior. When combine='all', 'exp' is evaluated first. Consider the first example below for when there could be an ambiguity relating to this. This is done so things like the second example can be completely combined. If you want 'base' combined first, do something like powsimp(powsimp(expr, combine='base'), combine='exp'). Examples ======== >>> from sympy import powsimp, exp, log, symbols >>> from sympy.abc import x, y, z, n >>> powsimp(x**y*x**z*y**z, combine='all') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='exp') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='base', force=True) x**y*(x*y)**z >>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True) (n*x)**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='exp') n**(y + z)*x**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True) (n*x)**y*(n*x)**z >>> x, y = symbols('x y', positive=True) >>> powsimp(log(exp(x)*exp(y))) log(exp(x)*exp(y)) >>> powsimp(log(exp(x)*exp(y)), deep=True) x + y Radicals with Mul bases will be combined if combine='exp' >>> from sympy import sqrt, Mul >>> x, y = symbols('x y') Two radicals are automatically joined through Mul: >>> a=sqrt(x*sqrt(y)) >>> a*a**3 == a**4 True But if an integer power of that radical has been autoexpanded then Mul does not join the resulting factors: >>> a**4 # auto expands to a Mul, no longer a Pow x**2*y >>> _*a # so Mul doesn't combine them x**2*y*sqrt(x*sqrt(y)) >>> powsimp(_) # but powsimp will (x*sqrt(y))**(5/2) >>> powsimp(x*y*a) # but won't when doing so would violate assumptions x*y*sqrt(x*sqrt(y)) """ from sympy.matrices.expressions.matexpr import MatrixSymbol def recurse(arg, **kwargs): _deep = kwargs.get('deep', deep) _combine = kwargs.get('combine', combine) _force = kwargs.get('force', force) _measure = kwargs.get('measure', measure) return powsimp(arg, _deep, _combine, _force, _measure) expr = sympify(expr) if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or ( expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))): return expr if deep or expr.is_Add or expr.is_Mul and _y not in expr.args: expr = expr.func(*[recurse(w) for w in expr.args]) if expr.is_Pow: return recurse(expr*_y, deep=False)/_y if not expr.is_Mul: return expr # handle the Mul if combine in ('exp', 'all'): # Collect base/exp data, while maintaining order in the # non-commutative parts of the product c_powers = defaultdict(list) nc_part = [] newexpr = [] coeff = S.One for term in expr.args: if term.is_Rational: coeff *= term continue if term.is_Pow: term = _denest_pow(term) if term.is_commutative: b, e = term.as_base_exp() if deep: b, e = [recurse(i) for i in [b, e]] if b.is_Pow or b.func is exp: # don't let smthg like sqrt(x**a) split into x**a, 1/2 # or else it will be joined as x**(a/2) later b, e = b**e, S.One c_powers[b].append(e) else: # This is the logic that combines exponents for equal, # but non-commutative bases: A**x*A**y == A**(x+y). if nc_part: b1, e1 = nc_part[-1].as_base_exp() b2, e2 = term.as_base_exp() if (b1 == b2 and e1.is_commutative and e2.is_commutative): nc_part[-1] = Pow(b1, Add(e1, e2)) continue nc_part.append(term) # add up exponents of common bases for b, e in ordered(iter(c_powers.items())): # allow 2**x/4 -> 2**(x - 2); don't do this when b and e are # Numbers since autoevaluation will undo it, e.g. # 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4 if (b and b.is_Number and not all(ei.is_Number for ei in e) and \ coeff is not S.One and b not in (S.One, S.NegativeOne)): m = multiplicity(abs(b), abs(coeff)) if m: e.append(m) coeff /= b**m c_powers[b] = Add(*e) if coeff is not S.One: if coeff in c_powers: c_powers[coeff] += S.One else: c_powers[coeff] = S.One # convert to plain dictionary c_powers = dict(c_powers) # check for base and inverted base pairs be = list(c_powers.items()) skip = set() # skip if we already saw them for b, e in be: if b in skip: continue bpos = b.is_positive or b.is_polar if bpos: binv = 1/b if b != binv and binv in c_powers: if b.as_numer_denom()[0] is S.One: c_powers.pop(b) c_powers[binv] -= e else: skip.add(binv) e = c_powers.pop(binv) c_powers[b] -= e # check for base and negated base pairs be = list(c_powers.items()) _n = S.NegativeOne for i, (b, e) in enumerate(be): if ((-b).is_Symbol or b.is_Add) and -b in c_powers: if (b.is_positive in (0, 1) or e.is_integer): c_powers[-b] += c_powers.pop(b) if _n in c_powers: c_powers[_n] += e else: c_powers[_n] = e # filter c_powers and convert to a list c_powers = [(b, e) for b, e in c_powers.items() if e] # ============================================================== # check for Mul bases of Rational powers that can be combined with # separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) -> # (x*sqrt(x*y))**(3/2) # ---------------- helper functions def ratq(x): '''Return Rational part of x's exponent as it appears in the bkey. ''' return bkey(x)[0][1] def bkey(b, e=None): '''Return (b**s, c.q), c.p where e -> c*s. If e is not given then it will be taken by using as_base_exp() on the input b. e.g. x**3/2 -> (x, 2), 3 x**y -> (x**y, 1), 1 x**(2*y/3) -> (x**y, 3), 2 exp(x/2) -> (exp(a), 2), 1 ''' if e is not None: # coming from c_powers or from below if e.is_Integer: return (b, S.One), e elif e.is_Rational: return (b, Integer(e.q)), Integer(e.p) else: c, m = e.as_coeff_Mul(rational=True) if c is not S.One: return (b**m, Integer(c.q)), Integer(c.p) else: return (b**e, S.One), S.One else: return bkey(*b.as_base_exp()) def update(b): '''Decide what to do with base, b. If its exponent is now an integer multiple of the Rational denominator, then remove it and put the factors of its base in the common_b dictionary or update the existing bases if necessary. If it has been zeroed out, simply remove the base. ''' newe, r = divmod(common_b[b], b[1]) if not r: common_b.pop(b) if newe: for m in Mul.make_args(b[0]**newe): b, e = bkey(m) if b not in common_b: common_b[b] = 0 common_b[b] += e if b[1] != 1: bases.append(b) # ---------------- end of helper functions # assemble a dictionary of the factors having a Rational power common_b = {} done = [] bases = [] for b, e in c_powers: b, e = bkey(b, e) if b in common_b.keys(): common_b[b] = common_b[b] + e else: common_b[b] = e if b[1] != 1 and b[0].is_Mul: bases.append(b) c_powers = [(b, e) for b, e in common_b.items() if e] bases.sort(key=default_sort_key) # this makes tie-breaking canonical bases.sort(key=measure, reverse=True) # handle longest first for base in bases: if base not in common_b: # it may have been removed already continue b, exponent = base last = False # True when no factor of base is a radical qlcm = 1 # the lcm of the radical denominators while True: bstart = b qstart = qlcm bb = [] # list of factors ee = [] # (factor's expo. and it's current value in common_b) for bi in Mul.make_args(b): bib, bie = bkey(bi) if bib not in common_b or common_b[bib] < bie: ee = bb = [] # failed break ee.append([bie, common_b[bib]]) bb.append(bib) if ee: # find the number of extractions possible # e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1 min1 = ee[0][1]/ee[0][0] for i in range(len(ee)): rat = ee[i][1]/ee[i][0] if rat < 1: break min1 = min(min1, rat) else: # update base factor counts # e.g. if ee = [(2, 5), (3, 6)] then min1 = 2 # and the new base counts will be 5-2*2 and 6-2*3 for i in range(len(bb)): common_b[bb[i]] -= min1*ee[i][0] update(bb[i]) # update the count of the base # e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y) # will increase by 4 to give bkey (x*sqrt(y), 2, 5) common_b[base] += min1*qstart*exponent if (last # no more radicals in base or len(common_b) == 1 # nothing left to join with or all(k[1] == 1 for k in common_b) # no rad's in common_b ): break # see what we can exponentiate base by to remove any radicals # so we know what to search for # e.g. if base were x**(1/2)*y**(1/3) then we should # exponentiate by 6 and look for powers of x and y in the ratio # of 2 to 3 qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)]) if qlcm == 1: break # we are done b = bstart**qlcm qlcm *= qstart if all(ratq(bi) == 1 for bi in Mul.make_args(b)): last = True # we are going to be done after this next pass # this base no longer can find anything to join with and # since it was longer than any other we are done with it b, q = base done.append((b, common_b.pop(base)*Rational(1, q))) # update c_powers and get ready to continue with powsimp c_powers = done # there may be terms still in common_b that were bases that were # identified as needing processing, so remove those, too for (b, q), e in common_b.items(): if (b.is_Pow or b.func is exp) and \ q is not S.One and not b.exp.is_Rational: b, be = b.as_base_exp() b = b**(be/q) else: b = root(b, q) c_powers.append((b, e)) check = len(c_powers) c_powers = dict(c_powers) assert len(c_powers) == check # there should have been no duplicates # ============================================================== # rebuild the expression newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()])) if combine == 'exp': return expr.func(newexpr, expr.func(*nc_part)) else: return recurse(expr.func(*nc_part), combine='base') * \ recurse(newexpr, combine='base') elif combine == 'base': # Build c_powers and nc_part. These must both be lists not # dicts because exp's are not combined. c_powers = [] nc_part = [] for term in expr.args: if term.is_commutative: c_powers.append(list(term.as_base_exp())) else: # This is the logic that combines bases that are # different and non-commutative, but with equal and # commutative exponents: A**x*B**x == (A*B)**x. if nc_part: b1, e1 = nc_part[-1].as_base_exp() b2, e2 = term.as_base_exp() if (e1 == e2 and e2.is_commutative): nc_part[-1] = Pow(b1*b2, e1) continue nc_part.append(term) # Pull out numerical coefficients from exponent if assumptions allow # e.g., 2**(2*x) => 4**x for i in range(len(c_powers)): b, e = c_powers[i] if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar): continue exp_c, exp_t = e.as_coeff_Mul(rational=True) if exp_c is not S.One and exp_t is not S.One: c_powers[i] = [Pow(b, exp_c), exp_t] # Combine bases whenever they have the same exponent and # assumptions allow # first gather the potential bases under the common exponent c_exp = defaultdict(list) for b, e in c_powers: if deep: e = recurse(e) c_exp[e].append(b) del c_powers # Merge back in the results of the above to form a new product c_powers = defaultdict(list) for e in c_exp: bases = c_exp[e] # calculate the new base for e if len(bases) == 1: new_base = bases[0] elif e.is_integer or force: new_base = expr.func(*bases) else: # see which ones can be joined unk = [] nonneg = [] neg = [] for bi in bases: if bi.is_negative: neg.append(bi) elif bi.is_nonnegative: nonneg.append(bi) elif bi.is_polar: nonneg.append( bi) # polar can be treated like non-negative else: unk.append(bi) if len(unk) == 1 and not neg or len(neg) == 1 and not unk: # a single neg or a single unk can join the rest nonneg.extend(unk + neg) unk = neg = [] elif neg: # their negative signs cancel in groups of 2*q if we know # that e = p/q else we have to treat them as unknown israt = False if e.is_Rational: israt = True else: p, d = e.as_numer_denom() if p.is_integer and d.is_integer: israt = True if israt: neg = [-w for w in neg] unk.extend([S.NegativeOne]*len(neg)) else: unk.extend(neg) neg = [] del israt # these shouldn't be joined for b in unk: c_powers[b].append(e) # here is a new joined base new_base = expr.func(*(nonneg + neg)) # if there are positive parts they will just get separated # again unless some change is made def _terms(e): # return the number of terms of this expression # when multiplied out -- assuming no joining of terms if e.is_Add: return sum([_terms(ai) for ai in e.args]) if e.is_Mul: return prod([_terms(mi) for mi in e.args]) return 1 xnew_base = expand_mul(new_base, deep=False) if len(Add.make_args(xnew_base)) < _terms(new_base): new_base = factor_terms(xnew_base) c_powers[new_base].append(e) # break out the powers from c_powers now c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e] # we're done return expr.func(*(c_part + nc_part)) else: raise ValueError("combine must be one of ('all', 'exp', 'base').") def powdenest(eq, force=False, polar=False): r""" Collect exponents on powers as assumptions allow. Given ``(bb**be)**e``, this can be simplified as follows: * if ``bb`` is positive, or * ``e`` is an integer, or * ``|be| < 1`` then this simplifies to ``bb**(be*e)`` Given a product of powers raised to a power, ``(bb1**be1 * bb2**be2...)**e``, simplification can be done as follows: - if e is positive, the gcd of all bei can be joined with e; - all non-negative bb can be separated from those that are negative and their gcd can be joined with e; autosimplification already handles this separation. - integer factors from powers that have integers in the denominator of the exponent can be removed from any term and the gcd of such integers can be joined with e Setting ``force`` to True will make symbols that are not explicitly negative behave as though they are positive, resulting in more denesting. Setting ``polar`` to True will do simplifications on the Riemann surface of the logarithm, also resulting in more denestings. When there are sums of logs in exp() then a product of powers may be obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``. Examples ======== >>> from sympy.abc import a, b, x, y, z >>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest >>> powdenest((x**(2*a/3))**(3*x)) (x**(2*a/3))**(3*x) >>> powdenest(exp(3*x*log(2))) 2**(3*x) Assumptions may prevent expansion: >>> powdenest(sqrt(x**2)) sqrt(x**2) >>> p = symbols('p', positive=True) >>> powdenest(sqrt(p**2)) p No other expansion is done. >>> i, j = symbols('i,j', integer=True) >>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j x**(x*(i + j)) But exp() will be denested by moving all non-log terms outside of the function; this may result in the collapsing of the exp to a power with a different base: >>> powdenest(exp(3*y*log(x))) x**(3*y) >>> powdenest(exp(y*(log(a) + log(b)))) (a*b)**y >>> powdenest(exp(3*(log(a) + log(b)))) a**3*b**3 If assumptions allow, symbols can also be moved to the outermost exponent: >>> i = Symbol('i', integer=True) >>> powdenest(((x**(2*i))**(3*y))**x) ((x**(2*i))**(3*y))**x >>> powdenest(((x**(2*i))**(3*y))**x, force=True) x**(6*i*x*y) >>> powdenest(((x**(2*a/3))**(3*y/i))**x) ((x**(2*a/3))**(3*y/i))**x >>> powdenest((x**(2*i)*y**(4*i))**z, force=True) (x*y**2)**(2*i*z) >>> n = Symbol('n', negative=True) >>> powdenest((x**i)**y, force=True) x**(i*y) >>> powdenest((n**i)**x, force=True) (n**i)**x """ from sympy.simplify.simplify import posify if force: eq, rep = posify(eq) return powdenest(eq, force=False).xreplace(rep) if polar: eq, rep = polarify(eq) return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep) new = powsimp(sympify(eq)) return new.xreplace(Transform( _denest_pow, filter=lambda m: m.is_Pow or m.func is exp)) _y = Dummy('y') def _denest_pow(eq): """ Denest powers. This is a helper function for powdenest that performs the actual transformation. """ from sympy.simplify.simplify import logcombine b, e = eq.as_base_exp() if b.is_Pow or isinstance(b.func, exp) and e != 1: new = b._eval_power(e) if new is not None: eq = new b, e = new.as_base_exp() # denest exp with log terms in exponent if b is S.Exp1 and e.is_Mul: logs = [] other = [] for ei in e.args: if any(ai.func is log for ai in Add.make_args(ei)): logs.append(ei) else: other.append(ei) logs = logcombine(Mul(*logs)) return Pow(exp(logs), Mul(*other)) _, be = b.as_base_exp() if be is S.One and not (b.is_Mul or b.is_Rational and b.q != 1 or b.is_positive): return eq # denest eq which is either pos**e or Pow**e or Mul**e or # Mul(b1**e1, b2**e2) # handle polar numbers specially polars, nonpolars = [], [] for bb in Mul.make_args(b): if bb.is_polar: polars.append(bb.as_base_exp()) else: nonpolars.append(bb) if len(polars) == 1 and not polars[0][0].is_Mul: return Pow(polars[0][0], polars[0][1]*e)*powdenest(Mul(*nonpolars)**e) elif polars: return Mul(*[powdenest(bb**(ee*e)) for (bb, ee) in polars]) \ *powdenest(Mul(*nonpolars)**e) if b.is_Integer: # use log to see if there is a power here logb = expand_log(log(b)) if logb.is_Mul: c, logb = logb.args e *= c base = logb.args[0] return Pow(base, e) # if b is not a Mul or any factor is an atom then there is nothing to do if not b.is_Mul or any(s.is_Atom for s in Mul.make_args(b)): return eq # let log handle the case of the base of the argument being a Mul, e.g. # sqrt(x**(2*i)*y**(6*i)) -> x**i*y**(3**i) if x and y are positive; we # will take the log, expand it, and then factor out the common powers that # now appear as coefficient. We do this manually since terms_gcd pulls out # fractions, terms_gcd(x+x*y/2) -> x*(y + 2)/2 and we don't want the 1/2; # gcd won't pull out numerators from a fraction: gcd(3*x, 9*x/2) -> x but # we want 3*x. Neither work with noncommutatives. def nc_gcd(aa, bb): a, b = [i.as_coeff_Mul() for i in [aa, bb]] c = gcd(a[0], b[0]).as_numer_denom()[0] g = Mul(*(a[1].args_cnc(cset=True)[0] & b[1].args_cnc(cset=True)[0])) return _keep_coeff(c, g) glogb = expand_log(log(b)) if glogb.is_Add: args = glogb.args g = reduce(nc_gcd, args) if g != 1: cg, rg = g.as_coeff_Mul() glogb = _keep_coeff(cg, rg*Add(*[a/g for a in args])) # now put the log back together again if glogb.func is log or not glogb.is_Mul: if glogb.args[0].is_Pow or glogb.args[0].func is exp: glogb = _denest_pow(glogb.args[0]) if (abs(glogb.exp) < 1) == True: return Pow(glogb.base, glogb.exp*e) return eq # the log(b) was a Mul so join any adds with logcombine add = [] other = [] for a in glogb.args: if a.is_Add: add.append(a) else: other.append(a) return Pow(exp(logcombine(Mul(*add))), e*Mul(*other))
bsd-3-clause
highweb-project/highweb-webcl-html5spec
build/config/mac/mac_app.py
21
2701
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import errno import os import subprocess import sys def MakeDirectories(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): return 0 else: return -1 return 0 def ProcessInfoPlist(args): output_plist_file = os.path.abspath(os.path.join(args.output, 'Info.plist')) return subprocess.check_call([ '/usr/bin/env', 'xcrun', 'plutil' '-convert', 'binary1', '-o', output_plist_file, '--', args.input, ]) def ProcessNIB(args): output_nib_file = os.path.join(os.path.abspath(args.output), "%s.nib" % os.path.splitext(os.path.basename(args.input))[0]) return subprocess.check_call([ '/usr/bin/env', 'xcrun', 'ibtool', '--module', args.module, '--auto-activate-custom-fonts', '--target-device', 'mac', '--compile', output_nib_file, os.path.abspath(args.input), ]) def GenerateProjectStructure(args): application_path = os.path.join( args.dir, args.name + ".app", "Contents" ) return MakeDirectories( application_path ) def main(): parser = argparse.ArgumentParser(description='A script that aids in ' 'the creation of an Mac application') subparsers = parser.add_subparsers() # Plist Parser plist_parser = subparsers.add_parser('plist', help='Process the Info.plist') plist_parser.set_defaults(func=ProcessInfoPlist) plist_parser.add_argument('-i', dest='input', help='The input plist path') plist_parser.add_argument('-o', dest='output', help='The output plist dir') # NIB Parser plist_parser = subparsers.add_parser('nib', help='Process a NIB file') plist_parser.set_defaults(func=ProcessNIB) plist_parser.add_argument('-i', dest='input', help='The input nib path') plist_parser.add_argument('-o', dest='output', help='The output nib dir') plist_parser.add_argument('-m', dest='module', help='The module name') # Directory Structure Parser dir_struct_parser = subparsers.add_parser('structure', help='Creates the directory of an Mac application') dir_struct_parser.set_defaults(func=GenerateProjectStructure) dir_struct_parser.add_argument('-d', dest='dir', help='Out directory') dir_struct_parser.add_argument('-n', dest='name', help='App name') args = parser.parse_args() return args.func(args) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
automl/RoBO
robo/acquisition_functions/log_ei.py
1
4117
import logging from scipy.stats import norm import numpy as np from robo.acquisition_functions.base_acquisition import BaseAcquisitionFunction logger = logging.getLogger(__name__) class LogEI(BaseAcquisitionFunction): def __init__(self, model, par=0.0, **kwargs): r""" Computes for a given x the logarithm expected improvement as acquisition_functions value. Parameters ---------- model: Model object A model that implements at least - predict(X) If you want to calculate derivatives than it should also support - predictive_gradients(X) par: float Controls the balance between exploration and exploitation of the acquisition_functions function. Default is 0.0 """ super(LogEI, self).__init__(model) self.par = par def compute(self, X, derivative=False, eta=None, **kwargs): """ Computes the Log EI value and its derivatives. Parameters ---------- X: np.ndarray(1, D), The input point where the acquisition_functions function should be evaluate. The dimensionality of X is (N, D), with N as the number of points to evaluate at and D is the number of dimensions of one X. derivative: Boolean If is set to true also the derivative of the acquisition_functions function at X is returned Not implemented yet! eta: float The baseline performance y_star to compute the improvement Returns ------- np.ndarray(1,1) Log Expected Improvement of X np.ndarray(1,D) Derivative of Log Expected Improvement at X (only if derivative=True) """ if derivative: logger.error("LogEI does not support derivative \ calculation until now") return m, v = self.model.predict(X) if eta is None: _, eta = self.model.get_incumbent() f_min = eta - self.par s = np.sqrt(v) z = (f_min - m) / s log_ei = np.zeros([m.size]) for i in range(0, m.size): mu, sigma = m[i], s[i] # par_s = self.par * sigma # Degenerate case 1: first term vanishes if np.any(abs(f_min - mu) == 0): if sigma > 0: log_ei[i] = np.log(sigma) + norm.logpdf(z[i]) else: log_ei[i] = -np.Infinity # Degenerate case 2: second term vanishes and first term # has a special form. elif sigma == 0: if np.any(mu < f_min): log_ei[i] = np.log(f_min - mu) else: log_ei[i] = -np.Infinity # Normal case else: b = np.log(sigma) + norm.logpdf(z[i]) # log(y+z) is tricky, we distinguish two cases: if np.any(f_min > mu): # When y>0, z>0, we define a=ln(y), b=ln(z). # Then y+z = exp[ max(a,b) + ln(1 + exp(-|b-a|)) ], # and thus log(y+z) = max(a,b) + ln(1 + exp(-|b-a|)) a = np.log(f_min - mu) + norm.logcdf(z[i]) log_ei[i] = max(a, b) + np.log(1 + np.exp(-abs(b - a))) else: # When y<0, z>0, we define a=ln(-y), b=ln(z), # and it has to be true that b >= a in # order to satisfy y+z>=0. # Then y+z = exp[ b + ln(exp(b-a) -1) ], # and thus log(y+z) = a + ln(exp(b-a) -1) a = np.log(mu - f_min) + norm.logcdf(z[i]) if a >= b: # a>b can only happen due to numerical inaccuracies # or approximation errors log_ei[i] = -np.Infinity else: log_ei[i] = b + np.log(1 - np.exp(a - b)) return log_ei
bsd-3-clause
40223134/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/encodings/aliases.py
726
15414
""" Encoding Aliases Support This module is used by the encodings package search function to map encodings names to module names. Note that the search function normalizes the encoding names before doing the lookup, so the mapping will have to map normalized encoding names to module names. Contents: The following aliases dictionary contains mappings of all IANA character set names for which the Python core library provides codecs. In addition to these, a few Python specific codec aliases have also been added. """ aliases = { # Please keep this list sorted alphabetically by value ! # ascii codec '646' : 'ascii', 'ansi_x3.4_1968' : 'ascii', 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name 'ansi_x3.4_1986' : 'ascii', 'cp367' : 'ascii', 'csascii' : 'ascii', 'ibm367' : 'ascii', 'iso646_us' : 'ascii', 'iso_646.irv_1991' : 'ascii', 'iso_ir_6' : 'ascii', 'us' : 'ascii', 'us_ascii' : 'ascii', # base64_codec codec 'base64' : 'base64_codec', 'base_64' : 'base64_codec', # big5 codec 'big5_tw' : 'big5', 'csbig5' : 'big5', # big5hkscs codec 'big5_hkscs' : 'big5hkscs', 'hkscs' : 'big5hkscs', # bz2_codec codec 'bz2' : 'bz2_codec', # cp037 codec '037' : 'cp037', 'csibm037' : 'cp037', 'ebcdic_cp_ca' : 'cp037', 'ebcdic_cp_nl' : 'cp037', 'ebcdic_cp_us' : 'cp037', 'ebcdic_cp_wt' : 'cp037', 'ibm037' : 'cp037', 'ibm039' : 'cp037', # cp1026 codec '1026' : 'cp1026', 'csibm1026' : 'cp1026', 'ibm1026' : 'cp1026', # cp1125 codec '1125' : 'cp1125', 'ibm1125' : 'cp1125', 'cp866u' : 'cp1125', 'ruscii' : 'cp1125', # cp1140 codec '1140' : 'cp1140', 'ibm1140' : 'cp1140', # cp1250 codec '1250' : 'cp1250', 'windows_1250' : 'cp1250', # cp1251 codec '1251' : 'cp1251', 'windows_1251' : 'cp1251', # cp1252 codec '1252' : 'cp1252', 'windows_1252' : 'cp1252', # cp1253 codec '1253' : 'cp1253', 'windows_1253' : 'cp1253', # cp1254 codec '1254' : 'cp1254', 'windows_1254' : 'cp1254', # cp1255 codec '1255' : 'cp1255', 'windows_1255' : 'cp1255', # cp1256 codec '1256' : 'cp1256', 'windows_1256' : 'cp1256', # cp1257 codec '1257' : 'cp1257', 'windows_1257' : 'cp1257', # cp1258 codec '1258' : 'cp1258', 'windows_1258' : 'cp1258', # cp273 codec '273' : 'cp273', 'ibm273' : 'cp273', 'csibm273' : 'cp273', # cp424 codec '424' : 'cp424', 'csibm424' : 'cp424', 'ebcdic_cp_he' : 'cp424', 'ibm424' : 'cp424', # cp437 codec '437' : 'cp437', 'cspc8codepage437' : 'cp437', 'ibm437' : 'cp437', # cp500 codec '500' : 'cp500', 'csibm500' : 'cp500', 'ebcdic_cp_be' : 'cp500', 'ebcdic_cp_ch' : 'cp500', 'ibm500' : 'cp500', # cp775 codec '775' : 'cp775', 'cspc775baltic' : 'cp775', 'ibm775' : 'cp775', # cp850 codec '850' : 'cp850', 'cspc850multilingual' : 'cp850', 'ibm850' : 'cp850', # cp852 codec '852' : 'cp852', 'cspcp852' : 'cp852', 'ibm852' : 'cp852', # cp855 codec '855' : 'cp855', 'csibm855' : 'cp855', 'ibm855' : 'cp855', # cp857 codec '857' : 'cp857', 'csibm857' : 'cp857', 'ibm857' : 'cp857', # cp858 codec '858' : 'cp858', 'csibm858' : 'cp858', 'ibm858' : 'cp858', # cp860 codec '860' : 'cp860', 'csibm860' : 'cp860', 'ibm860' : 'cp860', # cp861 codec '861' : 'cp861', 'cp_is' : 'cp861', 'csibm861' : 'cp861', 'ibm861' : 'cp861', # cp862 codec '862' : 'cp862', 'cspc862latinhebrew' : 'cp862', 'ibm862' : 'cp862', # cp863 codec '863' : 'cp863', 'csibm863' : 'cp863', 'ibm863' : 'cp863', # cp864 codec '864' : 'cp864', 'csibm864' : 'cp864', 'ibm864' : 'cp864', # cp865 codec '865' : 'cp865', 'csibm865' : 'cp865', 'ibm865' : 'cp865', # cp866 codec '866' : 'cp866', 'csibm866' : 'cp866', 'ibm866' : 'cp866', # cp869 codec '869' : 'cp869', 'cp_gr' : 'cp869', 'csibm869' : 'cp869', 'ibm869' : 'cp869', # cp932 codec '932' : 'cp932', 'ms932' : 'cp932', 'mskanji' : 'cp932', 'ms_kanji' : 'cp932', # cp949 codec '949' : 'cp949', 'ms949' : 'cp949', 'uhc' : 'cp949', # cp950 codec '950' : 'cp950', 'ms950' : 'cp950', # euc_jis_2004 codec 'jisx0213' : 'euc_jis_2004', 'eucjis2004' : 'euc_jis_2004', 'euc_jis2004' : 'euc_jis_2004', # euc_jisx0213 codec 'eucjisx0213' : 'euc_jisx0213', # euc_jp codec 'eucjp' : 'euc_jp', 'ujis' : 'euc_jp', 'u_jis' : 'euc_jp', # euc_kr codec 'euckr' : 'euc_kr', 'korean' : 'euc_kr', 'ksc5601' : 'euc_kr', 'ks_c_5601' : 'euc_kr', 'ks_c_5601_1987' : 'euc_kr', 'ksx1001' : 'euc_kr', 'ks_x_1001' : 'euc_kr', # gb18030 codec 'gb18030_2000' : 'gb18030', # gb2312 codec 'chinese' : 'gb2312', 'csiso58gb231280' : 'gb2312', 'euc_cn' : 'gb2312', 'euccn' : 'gb2312', 'eucgb2312_cn' : 'gb2312', 'gb2312_1980' : 'gb2312', 'gb2312_80' : 'gb2312', 'iso_ir_58' : 'gb2312', # gbk codec '936' : 'gbk', 'cp936' : 'gbk', 'ms936' : 'gbk', # hex_codec codec 'hex' : 'hex_codec', # hp_roman8 codec 'roman8' : 'hp_roman8', 'r8' : 'hp_roman8', 'csHPRoman8' : 'hp_roman8', # hz codec 'hzgb' : 'hz', 'hz_gb' : 'hz', 'hz_gb_2312' : 'hz', # iso2022_jp codec 'csiso2022jp' : 'iso2022_jp', 'iso2022jp' : 'iso2022_jp', 'iso_2022_jp' : 'iso2022_jp', # iso2022_jp_1 codec 'iso2022jp_1' : 'iso2022_jp_1', 'iso_2022_jp_1' : 'iso2022_jp_1', # iso2022_jp_2 codec 'iso2022jp_2' : 'iso2022_jp_2', 'iso_2022_jp_2' : 'iso2022_jp_2', # iso2022_jp_2004 codec 'iso_2022_jp_2004' : 'iso2022_jp_2004', 'iso2022jp_2004' : 'iso2022_jp_2004', # iso2022_jp_3 codec 'iso2022jp_3' : 'iso2022_jp_3', 'iso_2022_jp_3' : 'iso2022_jp_3', # iso2022_jp_ext codec 'iso2022jp_ext' : 'iso2022_jp_ext', 'iso_2022_jp_ext' : 'iso2022_jp_ext', # iso2022_kr codec 'csiso2022kr' : 'iso2022_kr', 'iso2022kr' : 'iso2022_kr', 'iso_2022_kr' : 'iso2022_kr', # iso8859_10 codec 'csisolatin6' : 'iso8859_10', 'iso_8859_10' : 'iso8859_10', 'iso_8859_10_1992' : 'iso8859_10', 'iso_ir_157' : 'iso8859_10', 'l6' : 'iso8859_10', 'latin6' : 'iso8859_10', # iso8859_11 codec 'thai' : 'iso8859_11', 'iso_8859_11' : 'iso8859_11', 'iso_8859_11_2001' : 'iso8859_11', # iso8859_13 codec 'iso_8859_13' : 'iso8859_13', 'l7' : 'iso8859_13', 'latin7' : 'iso8859_13', # iso8859_14 codec 'iso_8859_14' : 'iso8859_14', 'iso_8859_14_1998' : 'iso8859_14', 'iso_celtic' : 'iso8859_14', 'iso_ir_199' : 'iso8859_14', 'l8' : 'iso8859_14', 'latin8' : 'iso8859_14', # iso8859_15 codec 'iso_8859_15' : 'iso8859_15', 'l9' : 'iso8859_15', 'latin9' : 'iso8859_15', # iso8859_16 codec 'iso_8859_16' : 'iso8859_16', 'iso_8859_16_2001' : 'iso8859_16', 'iso_ir_226' : 'iso8859_16', 'l10' : 'iso8859_16', 'latin10' : 'iso8859_16', # iso8859_2 codec 'csisolatin2' : 'iso8859_2', 'iso_8859_2' : 'iso8859_2', 'iso_8859_2_1987' : 'iso8859_2', 'iso_ir_101' : 'iso8859_2', 'l2' : 'iso8859_2', 'latin2' : 'iso8859_2', # iso8859_3 codec 'csisolatin3' : 'iso8859_3', 'iso_8859_3' : 'iso8859_3', 'iso_8859_3_1988' : 'iso8859_3', 'iso_ir_109' : 'iso8859_3', 'l3' : 'iso8859_3', 'latin3' : 'iso8859_3', # iso8859_4 codec 'csisolatin4' : 'iso8859_4', 'iso_8859_4' : 'iso8859_4', 'iso_8859_4_1988' : 'iso8859_4', 'iso_ir_110' : 'iso8859_4', 'l4' : 'iso8859_4', 'latin4' : 'iso8859_4', # iso8859_5 codec 'csisolatincyrillic' : 'iso8859_5', 'cyrillic' : 'iso8859_5', 'iso_8859_5' : 'iso8859_5', 'iso_8859_5_1988' : 'iso8859_5', 'iso_ir_144' : 'iso8859_5', # iso8859_6 codec 'arabic' : 'iso8859_6', 'asmo_708' : 'iso8859_6', 'csisolatinarabic' : 'iso8859_6', 'ecma_114' : 'iso8859_6', 'iso_8859_6' : 'iso8859_6', 'iso_8859_6_1987' : 'iso8859_6', 'iso_ir_127' : 'iso8859_6', # iso8859_7 codec 'csisolatingreek' : 'iso8859_7', 'ecma_118' : 'iso8859_7', 'elot_928' : 'iso8859_7', 'greek' : 'iso8859_7', 'greek8' : 'iso8859_7', 'iso_8859_7' : 'iso8859_7', 'iso_8859_7_1987' : 'iso8859_7', 'iso_ir_126' : 'iso8859_7', # iso8859_8 codec 'csisolatinhebrew' : 'iso8859_8', 'hebrew' : 'iso8859_8', 'iso_8859_8' : 'iso8859_8', 'iso_8859_8_1988' : 'iso8859_8', 'iso_ir_138' : 'iso8859_8', # iso8859_9 codec 'csisolatin5' : 'iso8859_9', 'iso_8859_9' : 'iso8859_9', 'iso_8859_9_1989' : 'iso8859_9', 'iso_ir_148' : 'iso8859_9', 'l5' : 'iso8859_9', 'latin5' : 'iso8859_9', # johab codec 'cp1361' : 'johab', 'ms1361' : 'johab', # koi8_r codec 'cskoi8r' : 'koi8_r', # latin_1 codec # # Note that the latin_1 codec is implemented internally in C and a # lot faster than the charmap codec iso8859_1 which uses the same # encoding. This is why we discourage the use of the iso8859_1 # codec and alias it to latin_1 instead. # '8859' : 'latin_1', 'cp819' : 'latin_1', 'csisolatin1' : 'latin_1', 'ibm819' : 'latin_1', 'iso8859' : 'latin_1', 'iso8859_1' : 'latin_1', 'iso_8859_1' : 'latin_1', 'iso_8859_1_1987' : 'latin_1', 'iso_ir_100' : 'latin_1', 'l1' : 'latin_1', 'latin' : 'latin_1', 'latin1' : 'latin_1', # mac_cyrillic codec 'maccyrillic' : 'mac_cyrillic', # mac_greek codec 'macgreek' : 'mac_greek', # mac_iceland codec 'maciceland' : 'mac_iceland', # mac_latin2 codec 'maccentraleurope' : 'mac_latin2', 'maclatin2' : 'mac_latin2', # mac_roman codec 'macintosh' : 'mac_roman', 'macroman' : 'mac_roman', # mac_turkish codec 'macturkish' : 'mac_turkish', # mbcs codec 'dbcs' : 'mbcs', # ptcp154 codec 'csptcp154' : 'ptcp154', 'pt154' : 'ptcp154', 'cp154' : 'ptcp154', 'cyrillic_asian' : 'ptcp154', # quopri_codec codec 'quopri' : 'quopri_codec', 'quoted_printable' : 'quopri_codec', 'quotedprintable' : 'quopri_codec', # rot_13 codec 'rot13' : 'rot_13', # shift_jis codec 'csshiftjis' : 'shift_jis', 'shiftjis' : 'shift_jis', 'sjis' : 'shift_jis', 's_jis' : 'shift_jis', # shift_jis_2004 codec 'shiftjis2004' : 'shift_jis_2004', 'sjis_2004' : 'shift_jis_2004', 's_jis_2004' : 'shift_jis_2004', # shift_jisx0213 codec 'shiftjisx0213' : 'shift_jisx0213', 'sjisx0213' : 'shift_jisx0213', 's_jisx0213' : 'shift_jisx0213', # tactis codec 'tis260' : 'tactis', # tis_620 codec 'tis620' : 'tis_620', 'tis_620_0' : 'tis_620', 'tis_620_2529_0' : 'tis_620', 'tis_620_2529_1' : 'tis_620', 'iso_ir_166' : 'tis_620', # utf_16 codec 'u16' : 'utf_16', 'utf16' : 'utf_16', # utf_16_be codec 'unicodebigunmarked' : 'utf_16_be', 'utf_16be' : 'utf_16_be', # utf_16_le codec 'unicodelittleunmarked' : 'utf_16_le', 'utf_16le' : 'utf_16_le', # utf_32 codec 'u32' : 'utf_32', 'utf32' : 'utf_32', # utf_32_be codec 'utf_32be' : 'utf_32_be', # utf_32_le codec 'utf_32le' : 'utf_32_le', # utf_7 codec 'u7' : 'utf_7', 'utf7' : 'utf_7', 'unicode_1_1_utf_7' : 'utf_7', # utf_8 codec 'u8' : 'utf_8', 'utf' : 'utf_8', 'utf8' : 'utf_8', 'utf8_ucs2' : 'utf_8', 'utf8_ucs4' : 'utf_8', # uu_codec codec 'uu' : 'uu_codec', # zlib_codec codec 'zip' : 'zlib_codec', 'zlib' : 'zlib_codec', # temporary mac CJK aliases, will be replaced by proper codecs in 3.1 'x_mac_japanese' : 'shift_jis', 'x_mac_korean' : 'euc_kr', 'x_mac_simp_chinese' : 'gb2312', 'x_mac_trad_chinese' : 'big5', }
gpl-3.0
finklabs/banana
tests/test_template.py
1
3235
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals import os try: from banana import template TEMPLATE_MISSING = False except ImportError: TEMPLATE_MISSING = True import pytest from .helpers import create_tempfile, cleanup_tempfiles, temp_folder from . import here @pytest.mark.skipif(TEMPLATE_MISSING, reason='Optional jinja2 missing') def test_create_dir(temp_folder): basedir = os.path.join(temp_folder[0], 'foo', 'bar') target = os.path.join(basedir, 'content.txt') template.create_dir(target) assert os.path.exists(basedir) assert not os.path.exists(target) @pytest.mark.skipif(TEMPLATE_MISSING, reason='Optional jinja2 missing') def test_copy(temp_folder): source = here('test_template.py') basedir = os.path.join(temp_folder[0], 'foo', 'bar') target = os.path.join(basedir, 'test_template.py') template.copy(source, target) assert os.path.exists(target) @pytest.mark.skipif(TEMPLATE_MISSING, reason='Optional jinja2 missing') def test_copy_wildcard(temp_folder): basedir = os.path.join(temp_folder[0], 'foo', 'bar') target = os.path.join(basedir, 'test_route_home.py') template.copy_wildcard(here('.'), basedir, 'test_route*.py') assert os.path.exists(target) @pytest.mark.skipif(TEMPLATE_MISSING, reason='Optional jinja2 missing') def test_copy_tpl(temp_folder, cleanup_tempfiles): templ = create_tempfile('ad1\n{{ replace }}\nad3\n') cleanup_tempfiles.append(templ) basedir = os.path.join(temp_folder[0], 'foo', 'bar') target = os.path.join(basedir, 'my_resultfile.py') template.copy_tpl(templ, target, {'replace': 'ad2'}) print(os.listdir(basedir)) with open(target, 'r') as rfile: content = ''.join(rfile.readlines()) assert content == 'ad1\nad2\nad3\n' ''' def test_copy_wildcard(): def test_copy_tpl(): def create_dir(dst): """create directory if necessary :param dst: """ directory = os.path.dirname(dst) if directory and not os.path.exists(directory): os.makedirs(directory) # TODO create colored output for progress... def copy(src, dst): """copy """ create_dir(dst) shutil.copy(src, dst) def copy_wildcard(src_folder, dst_folder, glob): """copy """ create_dir(dst_folder) for sname in iglob(os.path.join(src_folder, glob)): rname = os.path.relpath(sname, src_folder) dname = os.path.join(dst_folder, rname) create_dir(dname) shutil.copy(sname, dname) def copy_tpl(template_file, dst, template_vars): """This supports jinja2 templates. Please feel encouraged to use the template framework of your choosing. jinja2 docu: http://jinja.pocoo.org/docs/2.9/ :param template_file: :param dst: :param template_vars: dictionary containing key, values used in the template """ create_dir(dst) # load template template_loader = jinja2.FileSystemLoader(searchpath='/') template_env = jinja2.Environment(loader=template_loader) template = template_env.get_template(template_file) # render and write to file output = template.render(template_vars) with open(dst, 'wb') as f: f.write(output) '''
mit
dotKom/onlineweb4
apps/marks/models.py
1
8113
# -*- coding: utf-8 -*- from datetime import date, datetime, timedelta from django.conf import settings from django.db import models from django.utils import timezone from django.utils.translation import ugettext as _ User = settings.AUTH_USER_MODEL DURATION = 30 # summer starts 1st June, ends 15th August SUMMER = ((6, 1), (8, 15)) # winter starts 1st December, ends 15th January WINTER = ((12, 1), (1, 15)) def get_expiration_date(user): if user: marks = MarkUser.objects.filter(user=user).order_by('-expiration_date') if marks: return marks[0].expiration_date return None class MarksManager(models.Manager): @staticmethod def all_active(): return Mark.objects.filter(given_to__expiration_date__gt=timezone.now().date()) @staticmethod def active(user): return MarkUser.objects.filter(user=user).filter(expiration_date__gt=timezone.now().date()) @staticmethod def inactive(user=None): return MarkUser.objects.filter(user=user).filter(expiration_date__lte=timezone.now().date()) class Mark(models.Model): CATEGORY_CHOICES = ( (0, _("Ingen")), (1, _("Sosialt")), (2, _("Bedriftspresentasjon")), (3, _("Kurs")), (4, _("Tilbakemelding")), (5, _("Kontoret")), (6, _("Betaling")), ) title = models.CharField(_("tittel"), max_length=155) added_date = models.DateField(_("utdelt dato")) given_by = models.ForeignKey( User, related_name="mark_given_by", verbose_name=_("gitt av"), editable=False, null=True, blank=True, on_delete=models.CASCADE ) last_changed_date = models.DateTimeField(_("sist redigert"), auto_now=True, editable=False) last_changed_by = models.ForeignKey( User, related_name="marks_last_changed_by", verbose_name=_("sist redigert av"), editable=False, null=True, blank=False, on_delete=models.CASCADE ) description = models.CharField( _("beskrivelse"), max_length=255, help_text=_( "Hvis dette feltet etterlates blankt vil det fylles med en standard grunn for typen prikk som er valgt." ), blank=True ) category = models.SmallIntegerField(_("kategori"), choices=CATEGORY_CHOICES, default=0) # managers objects = models.Manager() # default manager marks = MarksManager() # active marks manager def __str__(self): return _("Prikk for %s") % self.title def save(self, *args, **kwargs): if not self.added_date: self.added_date = timezone.now().date() super(Mark, self).save(*args, **kwargs) def delete(self, **kwargs): given_to = [mu.user for mu in self.given_to.all()] super(Mark, self).delete() for user in given_to: _fix_mark_history(user) class Meta(object): verbose_name = _("Prikk") verbose_name_plural = _("Prikker") permissions = ( ('view_mark', 'View Mark'), ) class MarkUser(models.Model): """ One entry for a user that has received a mark. """ mark = models.ForeignKey(Mark, related_name="given_to", on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) expiration_date = models.DateField(_("utløpsdato"), editable=False) def save(self, *args, **kwargs): run_history_update = False if not self.expiration_date: self.expiration_date = timezone.now().date() run_history_update = True super(MarkUser, self).save(*args, **kwargs) if run_history_update: _fix_mark_history(self.user) def delete(self): super(MarkUser, self).delete() _fix_mark_history(self.user) def __str__(self): return _("Mark entry for user: %s") % self.user.get_full_name() class Meta: unique_together = ("user", "mark") ordering = ('expiration_date',) permissions = ( ('view_userentry', 'View UserEntry'), ) def _fix_mark_history(user): """ Goes through a users complete mark history and resets all expiration dates. The reasons for doing it this way is that the mark rules now insist on marks building on previous expiration dates if such exists. Instead of having the entire mark database be a linked list structure, it can be simplified to guarantee the integrity of the expiration dates by running this whenever; * new Mark is saved or deleted * a new MarkUser entry is made * an existing MarkUser entry is deleted """ markusers = MarkUser.objects.filter(user=user).order_by('mark__added_date') last_expiry_date = None for entry in markusers: # If there's a last_expiry date, it means a mark has been processed already. # If that expiration date is within a DURATION of this added date, build on it. if last_expiry_date and entry.mark.added_date - timedelta(days=DURATION) < last_expiry_date: entry.expiration_date = _get_with_duration_and_vacation(last_expiry_date) # If there is no last_expiry_date or the last expiry date is over a DURATION old # we add DURATIION days from the added date of the mark. else: entry.expiration_date = _get_with_duration_and_vacation(entry.mark.added_date) entry.save() last_expiry_date = entry.expiration_date def _get_with_duration_and_vacation(added_date=timezone.now()): """ Checks whether the span of a marks duration needs to have vacation durations added. """ if type(added_date) == datetime: added_date = added_date.date() # Add the duration expiry_date = added_date + timedelta(days=DURATION) # Set up the summer and winter vacations summer_start_date = date(added_date.year, SUMMER[0][0], SUMMER[0][1]) summer_end_date = date(added_date.year, SUMMER[1][0], SUMMER[1][1]) first_winter_start_date = date(added_date.year, WINTER[0][0], WINTER[0][1]) first_winter_end_date = date(added_date.year + 1, WINTER[1][0], WINTER[1][1]) second_winter_end_date = date(added_date.year, WINTER[1][0], WINTER[1][1]) # If we're in the middle of summer, add the days remaining of summer if summer_start_date < added_date < summer_end_date: expiry_date += timedelta(days=(summer_end_date - added_date).days) # If the number of days between added_date and the beginning of summer vacation is less # than the duration, we need to add the length of summer to the expiry date elif 0 < (summer_start_date - added_date).days < DURATION: expiry_date += timedelta(days=(summer_end_date - summer_start_date).days) # Same for middle of winter vacation, which will be at the end of the year elif first_winter_start_date < added_date < first_winter_end_date: expiry_date += timedelta(days=(first_winter_end_date - added_date).days) # And for before the vacation elif 0 < (first_winter_start_date - added_date).days < DURATION: expiry_date += timedelta(days=(first_winter_end_date - first_winter_start_date).days) # Then we need to check the edge case where now is between newyears and and of winter vacation elif second_winter_end_date > added_date: expiry_date += timedelta(days=(second_winter_end_date - added_date).days) return expiry_date class Suspension(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) title = models.CharField(_('tittel'), max_length=64) description = models.CharField(_("beskrivelse"), max_length=255) active = models.BooleanField(default=True) added_date = models.DateTimeField(auto_now=True, editable=False) expiration_date = models.DateField(_("utløpsdato"), null=True, blank=True) # Using id because foreign key to Payment caused circular dependencies payment_id = models.IntegerField(null=True, blank=True) def __str__(self): return "Suspension: " + str(self.user) # TODO URL
mit
raj-krishnan/PyAlpha
pyalpha/alpha/stock_lists.py
1
23247
""" Contains lists of stocks in the S&P100 and S&P500. NOTE ==== Some of the stocks have been commented out due to errors arising when on attempting to procure their data using ystockquote. ISSUE: https://github.com/cgoldberg/ystockquote/issues/43 """ SNP100 = [ "AAPL", # Apple Inc. "ABBV", # AbbVie Inc. "ABT", # Abbott Laboratories "ACN", # Accenture plc "AGN", # Allergan plc "AIG", # American International Group Inc. "ALL", # Allstate Corp. "AMGN", # Amgen Inc. "AMZN", # Amazon.com "AXP", # American Express Inc. "BA", # Boeing Co. "BAC", # Bank of America Corp "BIIB", # Biogen Idec "BK", # Bank of New York "BLK", # BlackRock Inc "BMY", # Bristol-Myers Squibb "BRK", # Berkshire Hathaway "C", # Citigroup Inc "CAT", # Caterpillar Inc "CELG", # Celgene Corp "CL", # Colgate-Palmolive Co. "CMCSA", # Comcast Corporation "COF", # Capital One Financial Corp. "COP", # ConocoPhillips "COST", # Costco "CSCO", # Cisco Systems "CVS", # CVS Caremark "CVX", # Chevron "DD", # DuPont "DHR", # Danaher "DIS", # The Walt Disney Company "DOW", # Dow Chemical "DUK", # Duke Energy "EMC", # EMC Corporation "EMR", # Emerson Electric Co. "EXC", # Exelon "F", # Ford Motor "FB", # Facebook "FDX", # FedEx "FOX", # Twenty-First Century Fox Inc "FOXA", # Twenty-First Century Fox Inc "GD", # General Dynamics "GE", # General Electric Co. "GILD", # Gilead Sciences "GM", # General Motors "GOOG", # Alphabet Inc # "GOOGL", # Alphabet Inc "GS", # Goldman Sachs "HAL", # Halliburton "HD", # Home Depot "HON", # Honeywell "IBM", # International Business Machines "INTC", # Intel Corporation "JNJ", # Johnson & Johnson Inc "JPM", # JP Morgan Chase & Co "KMI", # Kinder Morgan Inc/DE "KO", # The Coca-Cola Company "LLY", # Eli Lilly and Company "LMT", # Lockheed-Martin "LOW", # Lowe's "MA", # Mastercard Inc "MCD", # McDonald's Corp "MDLZ", # Mondelēz International "MDT", # Medtronic Inc. "MET", # Metlife Inc. "MMM", # 3M Company "MO", # Altria Group "MON", # Monsanto "MRK", # Merck & Co. "MS", # Morgan Stanley "MSFT", # Microsoft # "NEE", # NextEra Energy YSTOCKQUOTE ERROR "NKE", # Nike "ORCL", # Oracle Corporation "OXY", # Occidental Petroleum Corp. "PCLN", # Priceline Group Inc/The "PEP", # Pepsico Inc. "PFE", # Pfizer Inc "PG", # Procter & Gamble Co "PM", # Phillip Morris International # "PYPL", # PayPal Holdings YSTOCKQUOTE ERROR "QCOM", # Qualcomm Inc. "RTN", # Raytheon Company "SBUX", # Starbucks Corporation "SLB", # Schlumberger "SO", # Southern Company "SPG", # Simon Property Group, Inc. "T", # AT&T Inc "TGT", # Target Corp. "TWX", # Time Warner Inc. "TXN", # Texas Instruments "UNH", # UnitedHealth Group Inc. "UNP", # Union Pacific Corp. "UPS", # United Parcel Service Inc "USB", # US Bancorp "UTX", # United Technologies Corp "V", # Visa Inc. "VZ", # Verizon Communications Inc "WBA", # Walgreens Boots Alliance "WFC", # Wells Fargo "WMT", # Wal-Mart "XOM", # Exxon Mobil Corp ] SNP500 = [ "MMM", # 3M Company "ABT", # Abbott Laboratories "ABBV", # AbbVie "ACN", # Accenture plc "ATVI", # Activision Blizzard "AYI", # Acuity Brands Inc "ADBE", # Adobe Systems Inc "AAP", # Advance Auto Parts "AES", # AES Corp "AET", # Aetna Inc "AFL", # AFLAC Inc "AMG", # Affiliated Managers Group Inc "A", # Agilent Technologies Inc "APD", # Air Products & Chemicals Inc "AKAM", # Akamai Technologies Inc "ALK", # Alaska Air Group Inc "ALB", # Albemarle Corp "AGN", # Allergan plc "LNT", # Alliant Energy Corp "ALXN", # Alexion Pharmaceuticals "ALLE", # Allegion "ADS", # Alliance Data Systems "ALL", # Allstate Corp "GOOGL", # Alphabet Inc Class A "GOOG", # Alphabet Inc Class C "MO", # Altria Group Inc "AMZN", # Amazon.com Inc "AEE", # Ameren Corp "AAL", # American Airlines Group "AEP", # American Electric Power "AXP", # American Express Co "AIG", # American International Group, Inc. "AMT", # American Tower Corp A "AWK", # American Water Works Company Inc "AMP", # Ameriprise Financial "ABC", # AmerisourceBergen Corp "AME", # Ametek "AMGN", # Amgen Inc "APH", # Amphenol Corp A "APC", # Anadarko Petroleum Corp "ADI", # Analog Devices, Inc. "ANTM", # Anthem Inc. "AON", # Aon plc "APA", # Apache Corporation "AIV", # Apartment Investment & Mgmt "AAPL", # Apple Inc. "AMAT", # Applied Materials Inc "ADM", # Archer-Daniels-Midland Co "ARNC", # Arconic Inc "AJG", # Arthur J. Gallagher & Co. "AIZ", # Assurant Inc "T", # AT&T Inc "ADSK", # Autodesk Inc "ADP", # Automatic Data Processing "AN", # AutoNation Inc "AZO", # AutoZone Inc "AVB", # AvalonBay Communities, Inc. "AVY", # Avery Dennison Corp "BHI", # Baker Hughes Inc "BLL", # Ball Corp "BAC", # Bank of America Corp "BK", # The Bank of New York Mellon Corp. "BCR", # Bard (C.R.) Inc. "BAX", # Baxter International Inc. "BBT", # BB&T Corporation "BDX", # Becton Dickinson "BBBY", # Bed Bath & Beyond "BRK", # Berkshire Hathaway "BBY", # Best Buy Co. Inc. "BIIB", # BIOGEN IDEC Inc. "BLK", # BlackRock "HRB", # Block H&R "BA", # Boeing Company "BWA", # BorgWarner "BXP", # Boston Properties "BSX", # Boston Scientific "BMY", # Bristol-Myers Squibb "AVGO", # Broadcom "BF-B", # Brown-Forman Corporation "CHRW", # C. H. Robinson Worldwide "CA", # CA, Inc. "COG", # Cabot Oil & Gas "CPB", # Campbell Soup "COF", # Capital One Financial "CAH", # Cardinal Health Inc. "HSIC", # Henry Schein "KMX", # Carmax Inc "CCL", # Carnival Corp. "CAT", # Caterpillar Inc. "CBG", # CBRE Group "CBS", # CBS Corp. "CELG", # Celgene Corp. "CNC", # Centene Corporation "CNP", # CenterPoint Energy "CTL", # CenturyLink Inc "CERN", # Cerner "CF", # CF Industries Holdings Inc "SCHW", # Charles Schwab Corporation "CHTR", # Charter Communications "CHK", # Chesapeake Energy "CVX", # Chevron Corp. "CMG", # Chipotle Mexican Grill "CB", # Chubb Limited "CHD", # Church & Dwight "CI", # CIGNA Corp. "XEC", # Cimarex Energy "CINF", # Cincinnati Financial "CTAS", # Cintas Corporation "CSCO", # Cisco Systems "C", # Citigroup Inc. "CFG", # Citizens Financial Group "CTXS", # Citrix Systems "CLX", # The Clorox Company "CME", # CME Group Inc. "CMS", # CMS Energy "COH", # Coach Inc. "KO", # Coca Cola Company "CTSH", # Cognizant Technology Solutions "CL", # Colgate-Palmolive "CMCSA", # Comcast A Corp "CMA", # Comerica Inc. "CAG", # ConAgra Foods Inc. "CXO", # Concho Resources "COP", # ConocoPhillips "ED", # Consolidated Edison "STZ", # Constellation Brands "GLW", # Corning Inc. "COST", # Costco Co. "COTY", # Coty, Inc "CCI", # Crown Castle International Corp. # "CSRA", # CSRA Inc. "CSX", # CSX Corp. "CMI", # Cummins Inc. "CVS", # CVS Health "DHI", # D. R. Horton "DHR", # Danaher Corp. "DRI", # Darden Restaurants "DVA", # DaVita Inc. "DE", # Deere & Co. "DLPH", # Delphi Automotive "DAL", # Delta Air Lines "XRAY", # Dentsply Sirona "DVN", # Devon Energy Corp. "DLR", # Digital Realty Trust "DFS", # Discover Financial Services "DISCA", # Discovery Communications-A "DISCK", # Discovery Communications-C "DG", # Dollar General "DLTR", # Dollar Tree "D", # Dominion Resources "DOV", # Dover Corp. "DOW", # Dow Chemical "DPS", # Dr Pepper Snapple Group "DTE", # DTE Energy Co. "DD", # Du Pont (E.I.) "DUK", # Duke Energy "DNB", # Dun & Bradstreet "ETFC", # E*Trade "EMN", # Eastman Chemical "ETN", # Eaton Corporation "EBAY", # eBay Inc. "ECL", # Ecolab Inc. "EIX", # Edison Int'l "EW", # Edwards Lifesciences "EA", # Electronic Arts "EMR", # Emerson Electric Company "ENDP", # Endo International "ETR", # Entergy Corp. "EOG", # EOG Resources "EQT", # EQT Corporation "EFX", # Equifax Inc. "EQIX", # Equinix "EQR", # Equity Residential "ESS", # Essex Property Trust Inc "EL", # Estee Lauder Cos. "ES", # Eversource Energy "EXC", # Exelon Corp. "EXPE", # Expedia Inc. "EXPD", # Expeditors Int'l "ESRX", # Express Scripts "EXR", # Extra Space Storage "XOM", # Exxon Mobil Corp. "FFIV", # F5 Networks "FB", # Facebook "FAST", # Fastenal Co "FRT", # Federal Realty Investment Trust "FDX", # FedEx Corporation "FIS", # Fidelity National Information Services "FITB", # Fifth Third Bancorp "FSLR", # First Solar Inc "FE", # FirstEnergy Corp "FISV", # Fiserv Inc "FLIR", # FLIR Systems "FLS", # Flowserve Corporation "FLR", # Fluor Corp. "FMC", # FMC Corporation "FTI", # FMC Technologies Inc. "FL", # Foot Locker Inc "F", # Ford Motor # "FTV", # Fortive Corp YSTOCKQUOTE ERROR "FBHS", # Fortune Brands Home & Security "BEN", # Franklin Resources "FCX", # Freeport-McMoran Cp & Gld "FTR", # Frontier Communications "GPS", # Gap (The) "GRMN", # Garmin Ltd. "GD", # General Dynamics "GE", # General Electric "GGP", # General Growth Properties Inc. "GIS", # General Mills "GM", # General Motors "GPC", # Genuine Parts "GILD", # Gilead Sciences "GPN", # Global Payments Inc "GS", # Goldman Sachs Group "GT", # Goodyear Tire & Rubber "GWW", # Grainger (W.W.) Inc. "HAL", # Halliburton Co. "HBI", # Hanesbrands Inc "HOG", # Harley-Davidson "HAR", # Harman Int'l Industries "HRS", # Harris Corporation "HIG", # Hartford Financial Svc.Gp. "HAS", # Hasbro Inc. "HCA", # HCA Holdings "HCP", # HCP Inc. "HP", # Helmerich & Payne "HES", # Hess Corporation # "HPE", # Hewlett Packard Enterprise "HOLX", # Hologic "HD", # Home Depot "HON", # Honeywell Int'l Inc. "HRL", # Hormel Foods Corp. "HST", # Host Hotels & Resorts "HPQ", # HP Inc. "HUM", # Humana Inc. "HBAN", # Huntington Bancshares "ITW", # Illinois Tool Works "ILMN", # Illumina Inc "IR", # Ingersoll-Rand PLC "INTC", # Intel Corp. "ICE", # Intercontinental Exchange "IBM", # International Bus. Machines "IP", # International Paper "IPG", # Interpublic Group "IFF", # Intl Flavors & Fragrances "INTU", # Intuit Inc. "ISRG", # Intuitive Surgical Inc. "IVZ", # Invesco Ltd. "IRM", # Iron Mountain Incorporated "JEC", # Jacobs Engineering Group "JBHT", # J. B. Hunt Transport Services "SJM", # JM Smucker "JNJ", # Johnson & Johnson "JCI", # Johnson Controls International Plc "JPM", # JPMorgan Chase & Co. "JNPR", # Juniper Networks "KSU", # Kansas City Southern "K", # Kellogg Co. "KEY", # KeyCorp "KMB", # Kimberly-Clark "KIM", # Kimco Realty "KMI", # Kinder Morgan "KLAC", # KLA-Tencor Corp. "KSS", # Kohl's Corp. "KHC", # Kraft Heinz Co "KR", # Kroger Co. "LB", # L Brands Inc. "LLL", # L-3 Communications Holdings "LH", # Laboratory Corp. of America Holding "LRCX", # Lam Research "LM", # Legg Mason "LEG", # Leggett & Platt "LEN", # Lennar Corp. "LVLT", # Level 3 Communications "LUK", # Leucadia National Corp. "LLY", # Lilly (Eli) & Co. "LNC", # Lincoln National "LLTC", # Linear Technology Corp. "LKQ", # LKQ Corporation "LMT", # Lockheed Martin Corp. "L", # Loews Corp. "LOW", # Lowe's Cos. "LYB", # LyondellBasell "MTB", # M&T Bank Corp. "MAC", # Macerich "M", # Macy's Inc. "MNK", # Mallinckrodt Plc "MRO", # Marathon Oil Corp. "MPC", # Marathon Petroleum "MAR", # Marriott Int'l. "MMC", # Marsh & McLennan "MLM", # Martin Marietta Materials "MAS", # Masco Corp. "MAS", # Masco Corp. "MA", # Mastercard Inc. "MAT", # Mattel Inc. "MKC", # McCormick & Co. "MCD", # McDonald's Corp. "MCK", # McKesson Corp. "MJN", # Mead Johnson "MDT", # Medtronic plc "MRK", # Merck & Co. "MET", # MetLife Inc. "MTD", # Mettler Toledo "KORS", # Michael Kors Holdings "MCHP", # Microchip Technology "MU", # Micron Technology "MSFT", # Microsoft Corp. "MHK", # Mohawk Industries "TAP", # Molson Coors Brewing Company "MDLZ", # Mondelez International "MON", # Monsanto Co. "MNST", # Monster Beverage "MCO", # Moody's Corp "MS", # Morgan Stanley "MOS", # The Mosaic Company "MSI", # Motorola Solutions Inc. "MUR", # Murphy Oil "MYL", # Mylan N.V. "NDAQ", # NASDAQ OMX Group "NOV", # National Oilwell Varco Inc. "NAVI", # Navient "NTAP", # NetApp "NFLX", # Netflix Inc. "NWL", # Newell Rubbermaid Co. "NFX", # Newfield Exploration Co "NEM", # Newmont Mining Corp. (Hldg. Co.) "NWSA", # News Corp. Class A "NWS", # News Corp. Class B # "NEE", # NextEra Energy YSTOCKQUOTE ERROR "NLSN", # Nielsen Holdings "NKE", # Nike "NI", # NiSource Inc. "NBL", # Noble Energy Inc "JWN", # Nordstrom "NSC", # Norfolk Southern Corp. "NTRS", # Northern Trust Corp. "NOC", # Northrop Grumman Corp. "NRG", # NRG Energy "NUE", # Nucor Corp. "NVDA", # Nvidia Corporation "ORLY", # O'Reilly Automotive "OXY", # Occidental Petroleum "OMC", # Omnicom Group "OKE", # ONEOK "ORCL", # Oracle Corp. "OI", # Owens-Illinois Inc "PCAR", # PACCAR Inc. "PH", # Parker-Hannifin "PDCO", # Patterson Companies "PAYX", # Paychex Inc. # "PYPL", # PayPal YSTOCKQUOTE ERROR "PNR", # Pentair Ltd. "PBCT", # People's United Financial "PEP", # PepsiCo Inc. "PKI", # PerkinElmer "PRGO", # Perrigo "PFE", # Pfizer Inc. "PCG", # PG&E Corp. "PM", # Philip Morris International "PSX", # Phillips 66 "PNW", # Pinnacle West Capital "PXD", # Pioneer Natural Resources "PBI", # Pitney-Bowes "PNC", # PNC Financial Services "RL", # Polo Ralph Lauren Corp. "PPG", # PPG Industries "PPL", # PPL Corp. "PX", # Praxair Inc. "PCLN", # Priceline.com Inc "PFG", # Principal Financial Group "PG", # Procter & Gamble "PGR", # Progressive Corp. "PLD", # Prologis "PRU", # Prudential Financial "PEG", # Public Serv. Enterprise Inc. "PSA", # Public Storage "PHM", # Pulte Homes Inc. "PVH", # PVH Corp. "QRVO", # Qorvo "PWR", # Quanta Services Inc. "QCOM", # QUALCOMM Inc. "DGX", # Quest Diagnostics "RRC", # Range Resources Corp. "RTN", # Raytheon Co. "O", # Realty Income Corporation "RHT", # Red Hat Inc. "REGN", # Regeneron "RF", # Regions Financial Corp. "RSG", # Republic Services Inc "RAI", # Reynolds American Inc. "RHI", # Robert Half International "ROK", # Rockwell Automation Inc. "COL", # Rockwell Collins "ROP", # Roper Industries "ROST", # Ross Stores "RCL", # Royal Caribbean Cruises Ltd "R", # Ryder System "CRM", # Salesforce.com "SCG", # SCANA Corp "SLB", # Schlumberger Ltd. "SNI", # Scripps Networks Interactive Inc. "STX", # Seagate Technology "SEE", # Sealed Air "SRE", # Sempra Energy "SHW", # Sherwin-Williams "SIG", # Signet Jewelers "SPG", # Simon Property Group Inc "SWKS", # Skyworks Solutions "SLG", # SL Green Realty "SNA", # Snap-On Inc. "SO", # Southern Co. "LUV", # Southwest Airlines "SWN", # Southwestern Energy "SE", # Spectra Energy Corp. "SPGI", # S&P Global, Inc. "STJ", # St Jude Medical "SWK", # Stanley Black & Decker "SPLS", # Staples Inc. "SBUX", # Starbucks Corp. "STT", # State Street Corp. "SRCL", # Stericycle Inc "SYK", # Stryker Corp. "STI", # SunTrust Banks "SYMC", # Symantec Corp. "SYF", # Synchrony Financial "SYY", # Sysco Corp. "TROW", # T. Rowe Price Group "TGT", # Target Corp. "TEL", # TE Connectivity Ltd. "TGNA", # Tegna "TDC", # Teradata Corp. "TSO", # Tesoro Petroleum Co. "TXN", # Texas Instruments "TXT", # Textron Inc. "COO", # The Cooper Companies "HSY", # The Hershey Company "TRV", # The Travelers Companies Inc. "TMO", # Thermo Fisher Scientific "TIF", # Tiffany & Co. "TWX", # Time Warner Inc. "TJX", # TJX Companies Inc. "TMK", # Torchmark Corp. "TSS", # Total System Services "TSCO", # Tractor Supply Company "TDG", # TransDigm Group "RIG", # Transocean "TRIP", # TripAdvisor "FOXA", # Twenty-First Century Fox Class A "FOX", # Twenty-First Century Fox Class B "TSN", # Tyson Foods "UDR", # UDR Inc "ULTA", # Ulta Salon Cosmetics & Fragrance Inc "USB", # U.S. Bancorp "UA", # Under Armour # "UA.C", # Under Armour YSTOCKQUOTE ERROR "UNP", # Union Pacific "UAL", # United Continental Holdings "UNH", # United Health Group Inc. "UPS", # United Parcel Service "URI", # United Rentals, Inc. "UTX", # United Technologies "UHS", # Universal Health Services, Inc. "UNM", # Unum Group "URBN", # Urban Outfitters "VFC", # V.F. Corp. "VLO", # Valero Energy "VAR", # Varian Medical Systems "VTR", # Ventas Inc "VRSN", # Verisign Inc. "VRSK", # Verisk Analytics "VZ", # Verizon Communications "VRTX", # Vertex Pharmaceuticals Inc "VIAB", # Viacom Inc. "V", # Visa Inc. "VNO", # Vornado Realty Trust "VMC", # Vulcan Materials "WMT", # Wal-Mart Stores "WBA", # Walgreens Boots Alliance "DIS", # The Walt Disney Company "WM", # Waste Management Inc. "WAT", # Waters Corporation "WFC", # Wells Fargo "HCN", # Welltower Inc. "WDC", # Western Digital "WU", # Western Union Co "WRK", # Westrock Co "WY", # Weyerhaeuser Corp. "WHR", # Whirlpool Corp. "WFM", # Whole Foods Market "WMB", # Williams Cos. # "WLTW", # Willis Towers Watson YSTOCKQUOTE ERROR "WEC", # Wisconsin Energy Corporation "WYN", # Wyndham Worldwide "WYNN", # Wynn Resorts Ltd "XEL", # Xcel Energy Inc "XRX", # Xerox Corp. "XLNX", # Xilinx Inc "XL", # XL Capital "XYL", # Xylem Inc. "YHOO", # Yahoo Inc. "YUM", # Yum! Brands Inc "ZBH", # Zimmer Biomet Holdings "ZION", # Zions Bancorp "ZTS", # Zoetis ]
apache-2.0
PXke/invenio
invenio/legacy/websubmit/functions/Print_Success_CPLX.py
4
2815
## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ## Description: function Print_Success_CPLX ## This function outputs a message telling the user his/her ## request was taken into account. ## Author: A.Voitier ## PARAMETERS: - import os import re from invenio.legacy.dbquery import run_sql def Print_Success_CPLX(parameters, curdir, form, user_info=None): global rn act = form['act'] doctype = form['doctype'] category = rn.split('-') categ = category[2] #Path of file containing group group_id = "" if os.path.exists("%s/%s" % (curdir,'Group')): fp = open("%s/%s" % (curdir,'Group'),"r") group = fp.read() group = group.replace("/","_") group = re.sub("[\n\r]+","",group) group_id = run_sql ("""SELECT id FROM usergroup WHERE name = %s""", (group,))[0][0] else: return "" t="<br /><br /><B>Your request has been taken into account!</B><br /><br />" sth = run_sql("SELECT rn FROM sbmCPLXAPPROVAL WHERE doctype=%s and categ=%s and rn=%s and type=%s and id_group=%s", (doctype,categ,rn,act,group_id)) if not len(sth) == 0: run_sql("UPDATE sbmCPLXAPPROVAL SET dLastReq=NOW(), status='waiting', dProjectLeaderAction='' WHERE doctype=%s and categ=%s and rn=%s and type=%s and id_group=%s", (doctype,categ,rn,act,group_id)) if (act == "RRP") or (act == "RPB"): t+="NOTE: Approval has already been requested for this document. You will be warned by email as soon as the Project Leader takes his/her decision regarding your document.<br /><br />" else: if (act == "RRP") or (act == "RPB"): t+="A notification has been sent to the Publication Committee Chair. You will be notified by email as soon as the Project Leader makes his/her decision regarding your document." if act == "RDA": t+="An email has been sent to the Project Leader. You will be warned by email as soon as the Project Leader takes his/her decision regarding your document.<br /><br />" return t
gpl-2.0
drtconway/bioinfosummer2016
solutions/exercise-2/mlst.py
1
2255
from pykmer.basics import kmers from pykmer.file import readFasta import pykmer.kfset as kfset import gzip import sys def isFasta(nm): """does this filename look like a FASTA file?""" if nm.endswith(".fa"): return True if nm.endswith(".fas"): return True if nm.endswith(".fasta"): return True if nm.endswith(".fna"): return True return False K = 27 # Step 1 # Index the alleles idx = {} lens = {} profArg = 1 for loc in sys.argv[1:]: if not isFasta(loc): break with open(loc) as f: for (nm, seq) in readFasta(f): xs = set(kmers(K, seq, True)) for x in xs: if x not in idx: idx[x] = set([]) idx[x].add(nm) lens[nm] = len(xs) profArg += 1 # Step 2, # Index the profile tuples. profiles = {} headers = None with open(sys.argv[profArg]) as f: for l in f: t = l.strip().split('\t') if headers is None: headers = t continue st = t[0] aroC = "AROC" + t[2] dnaN = "DNAN" + t[3] hemD = "HEMD" + t[4] hisD = "HISD" + t[5] purE = "PURE" + t[6] sucA = "SUCA" + t[7] thrA = "THRA" + t[8] prof = (aroC, dnaN, hemD, hisD, purE, sucA, thrA) profiles[prof] = st for fn in sys.argv[profArg+1:]: seen = set([]) partial = {} results = [] (_, xs) = kfset.read(fn) for (x,_) in xs: if x not in idx: continue # k-mer was in the index, # but we've already processed it. if x in seen: continue seen.add(x) for nm in idx[x]: if nm not in partial: # First k-mer for the allele. # Initialize with the number # of k-mers in the allele. partial[nm] = lens[nm] partial[nm] -= 1 if partial[nm] == 0: # Bingo! We found all the # k-mers in the allele. results.append(nm) results.sort() r = tuple(results) if r in profiles: print fn + '\t' + profiles[r] else: print fn + '\t' + 'unknown:' + '\t'.join(results)
apache-2.0
alberto-antonietti/nest-simulator
pynest/examples/hh_phaseplane.py
12
5096
# -*- coding: utf-8 -*- # # hh_phaseplane.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """Numerical phase-plane analysis of the Hodgkin-Huxley neuron ---------------------------------------------------------------- hh_phaseplane makes a numerical phase-plane analysis of the Hodgkin-Huxley neuron (``hh_psc_alpha``). Dynamics is investigated in the V-n space (see remark below). A constant DC can be specified and its influence on the nullclines can be studied. Remark ~~~~~~~~ To make the two-dimensional analysis possible, the (four-dimensional) Hodgkin-Huxley formalism needs to be artificially reduced to two dimensions, in this case by 'clamping' the two other variables, `m` and `h`, to constant values (`m_eq` and `h_eq`). """ import nest import numpy as np from matplotlib import pyplot as plt amplitude = 100. # Set externally applied current amplitude in pA dt = 0.1 # simulation step length [ms] v_min = -100. # Min membrane potential v_max = 42. # Max membrane potential n_min = 0.1 # Min inactivation variable n_max = 0.81 # Max inactivation variable delta_v = 2. # Membrane potential step length delta_n = 0.01 # Inactivation variable step length V_vec = np.arange(v_min, v_max, delta_v) n_vec = np.arange(n_min, n_max, delta_n) num_v_steps = len(V_vec) num_n_steps = len(n_vec) nest.ResetKernel() nest.set_verbosity('M_ERROR') nest.SetKernelStatus({'resolution': dt}) neuron = nest.Create('hh_psc_alpha') # Numerically obtain equilibrium state nest.Simulate(1000) m_eq = neuron[0].Act_m h_eq = neuron[0].Inact_h neuron.I_e = amplitude # Apply external current # Scan state space print('Scanning phase space') V_matrix = np.zeros([num_n_steps, num_v_steps]) n_matrix = np.zeros([num_n_steps, num_v_steps]) # pp_data will contain the phase-plane data as a vector field pp_data = np.zeros([num_n_steps * num_v_steps, 4]) count = 0 for i, V in enumerate(V_vec): for j, n in enumerate(n_vec): # Set V_m and n neuron.set(V_m=V, Act_n=n, Act_m=m_eq, Inact_h=h_eq) # Find state V_m = neuron[0].V_m Act_n = neuron[0].Act_n # Simulate a short while nest.Simulate(dt) # Find difference between new state and old state V_m_new = neuron[0].V_m - V Act_n_new = neuron[0].Act_n - n # Store in vector for later analysis V_matrix[j, i] = abs(V_m_new) n_matrix[j, i] = abs(Act_n_new) pp_data[count] = np.array([V_m, Act_n, V_m_new, Act_n_new]) if count % 10 == 0: # Write updated state next to old state print('') print('Vm: \t', V_m) print('new Vm:\t', V_m_new) print('Act_n:', Act_n) print('new Act_n:', Act_n_new) count += 1 # Set state for AP generation neuron.set(V_m=-34., Act_n=0.2, Act_m=m_eq, Inact_h=h_eq) print('') print('AP-trajectory') # ap will contain the trace of a single action potential as one possible # numerical solution in the vector field ap = np.zeros([1000, 2]) for i in range(1, 1001): # Find state V_m = neuron[0].V_m Act_n = neuron[0].Act_n if i % 10 == 0: # Write new state next to old state print('Vm: \t', V_m) print('Act_n:', Act_n) ap[i - 1] = np.array([V_m, Act_n]) # Simulate again neuron.set(Act_m=m_eq, Inact_h=h_eq) nest.Simulate(dt) # Make analysis print('') print('Plot analysis') nullcline_V = [] nullcline_n = [] print('Searching nullclines') for i in range(0, len(V_vec)): index = np.nanargmin(V_matrix[:][i]) if index != 0 and index != len(n_vec): nullcline_V.append([V_vec[i], n_vec[index]]) index = np.nanargmin(n_matrix[:][i]) if index != 0 and index != len(n_vec): nullcline_n.append([V_vec[i], n_vec[index]]) print('Plotting vector field') factor = 0.1 for i in range(0, np.shape(pp_data)[0], 3): plt.plot([pp_data[i][0], pp_data[i][0] + factor * pp_data[i][2]], [pp_data[i][1], pp_data[i][1] + factor * pp_data[i][3]], color=[0.6, 0.6, 0.6]) plt.plot(nullcline_V[:][0], nullcline_V[:][1], linewidth=2.0) plt.plot(nullcline_n[:][0], nullcline_n[:][1], linewidth=2.0) plt.xlim([V_vec[0], V_vec[-1]]) plt.ylim([n_vec[0], n_vec[-1]]) plt.plot(ap[:][0], ap[:][1], color='black', linewidth=1.0) plt.xlabel('Membrane potential V [mV]') plt.ylabel('Inactivation variable n') plt.title('Phase space of the Hodgkin-Huxley Neuron') plt.show()
gpl-2.0
sunze/py_flask
migrations/versions/41a93799080_.py
1
2207
"""empty message Revision ID: 41a93799080 Revises: 1a9765a646b Create Date: 2015-07-02 14:38:39.418104 """ # revision identifiers, used by Alembic. revision = '41a93799080' down_revision = '1a9765a646b' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('comment', sa.Column('id', sa.Integer(), nullable=False), sa.Column('body', sa.Text(), nullable=True), sa.Column('body_html', sa.Text(), nullable=True), sa.Column('ctime', sa.Integer(), nullable=True), sa.Column('mtime', sa.Integer(), nullable=True), sa.Column('status', sa.Integer(), nullable=True), sa.Column('uid', sa.Integer(), nullable=True), sa.Column('reply', sa.Integer(), nullable=True), sa.Column('pid', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['pid'], ['post.id'], ), sa.ForeignKeyConstraint(['reply'], ['user.id'], ), sa.ForeignKeyConstraint(['uid'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) op.drop_table('comments') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('comments', sa.Column('id', mysql.INTEGER(display_width=11), nullable=False), sa.Column('body', mysql.TEXT(), nullable=True), sa.Column('body_html', mysql.TEXT(), nullable=True), sa.Column('ctime', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True), sa.Column('mtime', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True), sa.Column('status', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True), sa.Column('uid', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True), sa.Column('pid', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['pid'], ['post.id'], name='comments_ibfk_1'), sa.ForeignKeyConstraint(['uid'], ['user.id'], name='comments_ibfk_2'), sa.PrimaryKeyConstraint('id'), mysql_default_charset='latin1', mysql_engine='InnoDB' ) op.drop_table('comment') ### end Alembic commands ###
mit
PyGithub/PyGithub
tests/GitBlob.py
3
3579
############################ Copyrights and license ############################ # # # Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2012 Zearin <zearin@gonk.net> # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2016 Jannis Gebauer <ja.geb@me.com> # # Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> # # Copyright 2017 Simon <spam@esemi.ru> # # Copyright 2018 sfdye <tsfdye@gmail.com> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ from . import Framework class GitBlob(Framework.TestCase): def setUp(self): super().setUp() self.blob = ( self.g.get_user() .get_repo("PyGithub") .get_git_blob("53bce9fa919b4544e67275089b3ec5b44be20667") ) def testAttributes(self): self.assertTrue( self.blob.content.startswith( "IyEvdXNyL2Jpbi9lbnYgcHl0aG9uCgpmcm9tIGRpc3R1dGlscy5jb3JlIGlt\ncG9ydCBzZXR1cAppbXBvcnQgdGV4dHdyYXAKCnNldHVwKAogICAgbmFtZSA9\n" ) ) self.assertTrue( self.blob.content.endswith( "Z3JhbW1pbmcgTGFuZ3VhZ2UgOjogUHl0aG9uIiwKICAgICAgICAiVG9waWMg\nOjogU29mdHdhcmUgRGV2ZWxvcG1lbnQiLAogICAgXSwKKQo=\n" ) ) self.assertEqual(len(self.blob.content), 1757) self.assertEqual(self.blob.encoding, "base64") self.assertEqual(self.blob.size, 1295) self.assertEqual(self.blob.sha, "53bce9fa919b4544e67275089b3ec5b44be20667") self.assertEqual( self.blob.url, "https://api.github.com/repos/jacquev6/PyGithub/git/blobs/53bce9fa919b4544e67275089b3ec5b44be20667", ) self.assertEqual( repr(self.blob), 'GitBlob(sha="53bce9fa919b4544e67275089b3ec5b44be20667")', )
lgpl-3.0
jordanemedlock/psychtruths
temboo/Library/Klout/User/Score.py
5
3024
# -*- coding: utf-8 -*- ############################################################################### # # Score # Retrieves a user's Klout Score and deltas. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class Score(Choreography): def __init__(self, temboo_session): """ Create a new instance of the Score Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(Score, self).__init__(temboo_session, '/Library/Klout/User/Score') def new_input_set(self): return ScoreInputSet() def _make_result_set(self, result, path): return ScoreResultSet(result, path) def _make_execution(self, session, exec_id, path): return ScoreChoreographyExecution(session, exec_id, path) class ScoreInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the Score Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Klout.) """ super(ScoreInputSet, self)._set_input('APIKey', value) def set_KloutID(self, value): """ Set the value of the KloutID input for this Choreo. ((required, string) The id for a Klout user to retrieve a score for.) """ super(ScoreInputSet, self)._set_input('KloutID', value) class ScoreResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the Score Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Klout.) """ return self._output.get('Response', None) class ScoreChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return ScoreResultSet(response, path)
apache-2.0
fritsvanveen/QGIS
python/plugins/processing/algs/qgis/DeleteColumn.py
1
2904
# -*- coding: utf-8 -*- """ *************************************************************************** DeleteColumn.py --------------------- Date : May 2010 Copyright : (C) 2010 by Michael Minn Email : pyqgis at michaelminn dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Michael Minn' __date__ = 'May 2010' __copyright__ = '(C) 2010, Michael Minn' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import QgsFeature from processing.core.GeoAlgorithm import GeoAlgorithm from processing.core.parameters import ParameterVector from processing.core.parameters import ParameterTableField from processing.core.outputs import OutputVector from processing.tools import dataobjects, vector class DeleteColumn(GeoAlgorithm): INPUT = 'INPUT' COLUMN = 'COLUMN' OUTPUT = 'OUTPUT' def defineCharacteristics(self): self.name, self.i18n_name = self.trAlgorithm('Delete column') self.group, self.i18n_group = self.trAlgorithm('Vector table tools') self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer'))) self.addParameter(ParameterTableField(self.COLUMN, self.tr('Field to delete'), self.INPUT)) self.addOutput(OutputVector(self.OUTPUT, self.tr('Deleted column'))) def processAlgorithm(self, progress): layer = dataobjects.getObjectFromUri( self.getParameterValue(self.INPUT)) idx = layer.fieldNameIndex(self.getParameterValue(self.COLUMN)) fields = layer.fields() fields.remove(idx) writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields, layer.wkbType(), layer.crs()) features = vector.features(layer) total = 100.0 / len(features) feat = QgsFeature() for current, f in enumerate(features): feat.setGeometry(f.geometry()) attributes = f.attributes() del attributes[idx] feat.setAttributes(attributes) writer.addFeature(feat) progress.setPercentage(int(current * total)) del writer
gpl-2.0
glatard/nipype
nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py
9
4045
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.specialized import VBRAINSDemonWarp def test_VBRAINSDemonWarp_inputs(): input_map = dict(args=dict(argstr='%s', ), arrayOfPyramidLevelIterations=dict(argstr='--arrayOfPyramidLevelIterations %s', sep=',', ), backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), checkerboardPatternSubdivisions=dict(argstr='--checkerboardPatternSubdivisions %s', sep=',', ), environ=dict(nohash=True, usedefault=True, ), fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), fixedVolume=dict(argstr='--fixedVolume %s...', ), gradient_type=dict(argstr='--gradient_type %s', ), gui=dict(argstr='--gui ', ), histogramMatch=dict(argstr='--histogramMatch ', ), ignore_exception=dict(nohash=True, usedefault=True, ), initializeWithDisplacementField=dict(argstr='--initializeWithDisplacementField %s', ), initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), inputPixelType=dict(argstr='--inputPixelType %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), makeBOBF=dict(argstr='--makeBOBF ', ), max_step_length=dict(argstr='--max_step_length %f', ), medianFilterSize=dict(argstr='--medianFilterSize %s', sep=',', ), minimumFixedPyramid=dict(argstr='--minimumFixedPyramid %s', sep=',', ), minimumMovingPyramid=dict(argstr='--minimumMovingPyramid %s', sep=',', ), movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), movingVolume=dict(argstr='--movingVolume %s...', ), neighborhoodForBOBF=dict(argstr='--neighborhoodForBOBF %s', sep=',', ), numberOfBCHApproximationTerms=dict(argstr='--numberOfBCHApproximationTerms %d', ), numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputCheckerboardVolume=dict(argstr='--outputCheckerboardVolume %s', hash_files=False, ), outputDebug=dict(argstr='--outputDebug ', ), outputDisplacementFieldPrefix=dict(argstr='--outputDisplacementFieldPrefix %s', ), outputDisplacementFieldVolume=dict(argstr='--outputDisplacementFieldVolume %s', hash_files=False, ), outputNormalized=dict(argstr='--outputNormalized ', ), outputPixelType=dict(argstr='--outputPixelType %s', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), promptUser=dict(argstr='--promptUser ', ), registrationFilterType=dict(argstr='--registrationFilterType %s', ), seedForBOBF=dict(argstr='--seedForBOBF %s', sep=',', ), smoothDisplacementFieldSigma=dict(argstr='--smoothDisplacementFieldSigma %f', ), terminal_output=dict(nohash=True, ), upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), weightFactors=dict(argstr='--weightFactors %s', sep=',', ), ) inputs = VBRAINSDemonWarp.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_VBRAINSDemonWarp_outputs(): output_map = dict(outputCheckerboardVolume=dict(), outputDisplacementFieldVolume=dict(), outputVolume=dict(), ) outputs = VBRAINSDemonWarp.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
willusher/ansible-modules-core
system/authorized_key.py
7
17223
#!/usr/bin/python # -*- coding: utf-8 -*- """ Ansible module to add authorized_keys for ssh logins. (c) 2012, Brad Olson <brado@movedbylight.com> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ DOCUMENTATION = ''' --- module: authorized_key short_description: Adds or removes an SSH authorized key description: - "Adds or removes SSH authorized keys for particular user accounts" version_added: "0.5" options: user: description: - The username on the remote host whose authorized_keys file will be modified required: true key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) required: true path: description: - Alternate path to the authorized_keys file required: false default: "(homedir)+/.ssh/authorized_keys" version_added: "1.2" manage_dir: description: - Whether this module should manage the directory of the authorized key file. If set, the module will create the directory, as well as set the owner and permissions of an existing directory. Be sure to set C(manage_dir=no) if you are using an alternate directory for authorized_keys, as set with C(path), since you could lock yourself out of SSH access. See the example below. required: false choices: [ "yes", "no" ] default: "yes" version_added: "1.2" state: description: - Whether the given key (with the given key_options) should or should not be in the file required: false choices: [ "present", "absent" ] default: "present" key_options: description: - A string of ssh key options to be prepended to the key in the authorized_keys file required: false default: null version_added: "1.4" exclusive: description: - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys can be specified in a single C(key) string value by separating them by newlines. - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a single batch as mentioned above. required: false choices: [ "yes", "no" ] default: "no" version_added: "1.9" validate_certs: description: - This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated. - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. - Prior to 2.1 the code worked as if this was set to C(yes). required: false default: "yes" choices: ["yes", "no"] version_added: "2.1" author: "Ansible Core Team" ''' EXAMPLES = ''' # Example using key data from a local file on the management machine - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" # Using github url as key source - authorized_key: user=charlie key=https://github.com/charlie.keys # Using alternate directory locations: - authorized_key: user: charlie key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" path: '/etc/ssh/authorized_keys/charlie' manage_dir: no # Using with_file - name: Set up authorized_keys for the deploy user authorized_key: user=deploy key="{{ item }}" with_file: - public_keys/doe-jane - public_keys/doe-john # Using key_options: - authorized_key: user: charlie key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options: 'no-port-forwarding,from="10.0.1.1"' # Using validate_certs: - authorized_key: user=charlie key=https://github.com/user.keys validate_certs=no # Set up authorized_keys exclusively with one key - authorized_key: user=root key="{{ item }}" state=present exclusive=yes with_file: - public_keys/doe-jane # Copies the key from the user who is running ansible to the remote machine user ubuntu - authorized_key: user=ubuntu key="{{ lookup('file', lookup('env','HOME') + "/.ssh/id_rsa.pub") }}" become: yes ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. # # Arguments # ========= # user = username # key = line to add to authorized_keys for user # path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys) # manage_dir = whether to create, and control ownership of the directory (default: true) # state = absent|present (default: present) # # see example in examples/playbooks import sys import os import pwd import os.path import tempfile import re import shlex class keydict(dict): """ a dictionary that maintains the order of keys as they are added """ # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): super(keydict,self).__init__(*args, **kw) self.itemlist = super(keydict,self).keys() def __setitem__(self, key, value): self.itemlist.append(key) super(keydict,self).__setitem__(key, value) def __iter__(self): return iter(self.itemlist) def keys(self): return list(set(self.itemlist)) def values(self): return [self[key] for key in self] def itervalues(self): return (self[key] for key in self) def keyfile(module, user, write=False, path=None, manage_dir=True): """ Calculate name of authorized keys file, optionally creating the directories and file, properly setting permissions. :param str user: name of user in passwd file :param bool write: if True, write changes to authorized_keys file (creating directories if needed) :param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys' :param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file :return: full path string to authorized_keys for user """ if module.check_mode and path is not None: keysfile = path return keysfile try: user_entry = pwd.getpwnam(user) except KeyError: e = get_exception() if module.check_mode and path is None: module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode") module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) if path is None: homedir = user_entry.pw_dir sshdir = os.path.join(homedir, ".ssh") keysfile = os.path.join(sshdir, "authorized_keys") else: sshdir = os.path.dirname(path) keysfile = path if not write: return keysfile uid = user_entry.pw_uid gid = user_entry.pw_gid if manage_dir: if not os.path.exists(sshdir): os.mkdir(sshdir, int('0700', 8)) if module.selinux_enabled(): module.set_default_selinux_context(sshdir, False) os.chown(sshdir, uid, gid) os.chmod(sshdir, int('0700', 8)) if not os.path.exists(keysfile): basedir = os.path.dirname(keysfile) if not os.path.exists(basedir): os.makedirs(basedir) try: f = open(keysfile, "w") #touches file so we can set ownership and perms finally: f.close() if module.selinux_enabled(): module.set_default_selinux_context(keysfile, False) try: os.chown(keysfile, uid, gid) os.chmod(keysfile, int('0600', 8)) except OSError: pass return keysfile def parseoptions(module, options): ''' reads a string containing ssh-key options and returns a dictionary of those options ''' options_dict = keydict() #ordered dict if options: try: # the following regex will split on commas while # ignoring those commas that fall within quotes regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') parts = regex.split(options)[1:-1] for part in parts: if "=" in part: (key, value) = part.split("=", 1) if options_dict.has_key(key): if isinstance(options_dict[key], list): options_dict[key].append(value) else: options_dict[key] = [options_dict[key], value] else: options_dict[key] = value elif part != ",": options_dict[part] = None except: module.fail_json(msg="invalid option string: %s" % options) return options_dict def parsekey(module, raw_key): ''' parses a key, which may or may not contain a list of ssh-key options at the beginning ''' VALID_SSH2_KEY_TYPES = [ 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', 'ecdsa-sha2-nistp521', 'ssh-dss', 'ssh-rsa', ] options = None # connection options key = None # encrypted key string key_type = None # type of ssh key type_index = None # index of keytype in key string|list # remove comment yaml escapes raw_key = raw_key.replace('\#', '#') # split key safely lex = shlex.shlex(raw_key) lex.quotes = [] lex.commenters = '' #keep comment hashes lex.whitespace_split = True key_parts = list(lex) for i in range(0, len(key_parts)): if key_parts[i] in VALID_SSH2_KEY_TYPES: type_index = i key_type = key_parts[i] break # check for options if type_index is None: return None elif type_index > 0: options = " ".join(key_parts[:type_index]) # parse the options (if any) options = parseoptions(module, options) # get key after the type index key = key_parts[(type_index + 1)] # set comment to everything after the key if len(key_parts) > (type_index + 1): comment = " ".join(key_parts[(type_index + 2):]) return (key, key_type, options, comment) def readkeys(module, filename): if not os.path.isfile(filename): return {} keys = {} f = open(filename) for line in f.readlines(): key_data = parsekey(module, line) if key_data: # use key as identifier keys[key_data[0]] = key_data else: # for an invalid line, just append the line # to the array so it will be re-output later keys[line] = line f.close() return keys def writekeys(module, filename, keys): fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename)) f = open(tmp_path,"w") try: for index, key in keys.items(): try: (keyhash,type,options,comment) = key option_str = "" if options: option_strings = [] for option_key in options.keys(): if options[option_key]: if isinstance(options[option_key], list): for value in options[option_key]: option_strings.append("%s=%s" % (option_key, value)) else: option_strings.append("%s=%s" % (option_key, options[option_key])) else: option_strings.append("%s" % option_key) option_str = ",".join(option_strings) option_str += " " key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment) except: key_line = key f.writelines(key_line) except IOError: e = get_exception() module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) f.close() module.atomic_move(tmp_path, filename) def enforce_state(module, params): """ Add or remove key. """ user = params["user"] key = params["key"] path = params.get("path", None) manage_dir = params.get("manage_dir", True) state = params.get("state", "present") key_options = params.get("key_options", None) exclusive = params.get("exclusive", False) validate_certs = params.get("validate_certs", True) error_msg = "Error getting key from: %s" # if the key is a url, request it and use it as key source if key.startswith("http"): try: resp, info = fetch_url(module, key) if info['status'] != 200: module.fail_json(msg=error_msg % key) else: key = resp.read() except Exception: module.fail_json(msg=error_msg % key) # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] # check current state -- just get the filename, don't create file do_write = False params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) existing_keys = readkeys(module, params["keyfile"]) # Add a place holder for keys that should exist in the state=present and # exclusive=true case keys_to_exist = [] # Check our new keys, if any of them exist we'll continue. for new_key in key: parsed_new_key = parsekey(module, new_key) if not parsed_new_key: module.fail_json(msg="invalid key specified: %s" % new_key) if key_options is not None: parsed_options = parseoptions(module, key_options) parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3]) present = False matched = False non_matching_keys = [] if parsed_new_key[0] in existing_keys: present = True # Then we check if everything matches, including # the key type and options. If not, we append this # existing key to the non-matching list # We only want it to match everything when the state # is present if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present": non_matching_keys.append(existing_keys[parsed_new_key[0]]) else: matched = True # handle idempotent state=present if state=="present": keys_to_exist.append(parsed_new_key[0]) if len(non_matching_keys) > 0: for non_matching_key in non_matching_keys: if non_matching_key[0] in existing_keys: del existing_keys[non_matching_key[0]] do_write = True if not matched: existing_keys[parsed_new_key[0]] = parsed_new_key do_write = True elif state=="absent": if not matched: continue del existing_keys[parsed_new_key[0]] do_write = True # remove all other keys to honor exclusive if state == "present" and exclusive: to_remove = frozenset(existing_keys).difference(keys_to_exist) for key in to_remove: del existing_keys[key] do_write = True if do_write: if module.check_mode: module.exit_json(changed=True) writekeys(module, keyfile(module, user, do_write, path, manage_dir), existing_keys) params['changed'] = True else: if module.check_mode: module.exit_json(changed=False) return params def main(): module = AnsibleModule( argument_spec = dict( user = dict(required=True, type='str'), key = dict(required=True, type='str'), path = dict(required=False, type='str'), manage_dir = dict(required=False, type='bool', default=True), state = dict(default='present', choices=['absent','present']), key_options = dict(required=False, type='str'), unique = dict(default=False, type='bool'), exclusive = dict(default=False, type='bool'), validate_certs = dict(default=True, type='bool'), ), supports_check_mode=True ) results = enforce_state(module, module.params) module.exit_json(**results) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * main()
gpl-3.0
levinas/assembly
lib/assembly/plugins/bwa.py
2
2226
import os from plugins import BaseAligner from yapsy.IPlugin import IPlugin from asmtypes import ArastDataInputError class BwaAligner(BaseAligner, IPlugin): def run(self, contig_file=None, reads=None, merged_pair=False): ### Data Checks if len(self.data.contigfiles) != 1: raise ArastDataInputError('BWA requires exactly 1 contigs file') ### Index contigs using IS algorithm contig_file = self.data.contigfiles[0] cmd_args = [self.executable, 'index', '-a', 'is', contig_file] self.arast_popen(cmd_args, overrides=False) ### Align reads bamfiles = [] for i, readset in enumerate(self.data.readsets): samfile = os.path.join(self.outpath, '{}_{}.sam'.format(os.path.basename(readset.files[0]), i)) cmd_args = [self.executable, 'mem', '-t', self.process_threads_allowed, contig_file] + readset.files if readset.type == 'paired': cmd_args.append('-p') cmd_args += ['>', samfile] self.arast_popen(' '.join(cmd_args), shell=True, overrides=False) if not os.path.exists(samfile): raise Exception('Unable to complete alignment') ## Convert to BAM bamfile = samfile.replace('.sam', '.bam') cmd_args = ['samtools', 'view', '-bSho', bamfile, samfile] self.arast_popen(cmd_args) bamfiles.append(bamfile) ### Merge samfiles if multiple if len(bamfiles) > 1: bamfile = os.path.join(self.outpath, '{}_{}.bam'.format(os.path.basename(contig_file), i)) self.arast_popen(['samtools', 'merge', bamfile] + bamfiles) if not os.path.exists(bamfile): raise Exception('Unable to complete alignment') else: bamfile = bamfiles[0] if not os.path.exists(bamfile): raise Exception('Unable to complete alignment') ## Convert back to sam samfile = bamfile.replace('.bam', '.sam') self.arast_popen(['samtools', 'view', '-h', '-o', samfile, bamfile]) return {'alignment': samfile, 'alignment_bam': bamfile}
mit
Shrhawk/edx-platform
common/lib/xmodule/xmodule/library_content_module.py
10
24844
# -*- coding: utf-8 -*- """ LibraryContent: The XBlock used to include blocks from a library in a course. """ import json from lxml import etree from copy import copy from capa.responsetypes import registry from gettext import ngettext from lazy import lazy from .mako_module import MakoModuleDescriptor from opaque_keys.edx.locator import LibraryLocator import random from webob import Response from xblock.core import XBlock from xblock.fields import Scope, String, List, Integer, Boolean from xblock.fragment import Fragment from xmodule.validation import StudioValidationMessage, StudioValidation from xmodule.x_module import XModule, STUDENT_VIEW from xmodule.studio_editable import StudioEditableModule, StudioEditableDescriptor from .xml_module import XmlDescriptor from pkg_resources import resource_string # pylint: disable=no-name-in-module # Make '_' a no-op so we can scrape strings _ = lambda text: text ANY_CAPA_TYPE_VALUE = 'any' def _get_human_name(problem_class): """ Get the human-friendly name for a problem type. """ return getattr(problem_class, 'human_name', problem_class.__name__) def _get_capa_types(): """ Gets capa types tags and labels """ capa_types = {tag: _get_human_name(registry.get_class_for_tag(tag)) for tag in registry.registered_tags()} return [{'value': ANY_CAPA_TYPE_VALUE, 'display_name': _('Any Type')}] + sorted([ {'value': capa_type, 'display_name': caption} for capa_type, caption in capa_types.items() ], key=lambda item: item.get('display_name')) class LibraryContentFields(object): """ Fields for the LibraryContentModule. Separated out for now because they need to be added to the module and the descriptor. """ # Please note the display_name of each field below is used in # common/test/acceptance/pages/studio/library.py:StudioLibraryContentXBlockEditModal # to locate input elements - keep synchronized display_name = String( display_name=_("Display Name"), help=_("Display name for this module"), default="Randomized Content Block", scope=Scope.settings, ) source_library_id = String( display_name=_("Library"), help=_("Select the library from which you want to draw content."), scope=Scope.settings, values_provider=lambda instance: instance.source_library_values(), ) source_library_version = String( # This is a hidden field that stores the version of source_library when we last pulled content from it display_name=_("Library Version"), scope=Scope.settings, ) mode = String( display_name=_("Mode"), help=_("Determines how content is drawn from the library"), default="random", values=[ {"display_name": _("Choose n at random"), "value": "random"} # Future addition: Choose a new random set of n every time the student refreshes the block, for self tests # Future addition: manually selected blocks ], scope=Scope.settings, ) max_count = Integer( display_name=_("Count"), help=_("Enter the number of components to display to each student."), default=1, scope=Scope.settings, ) capa_type = String( display_name=_("Problem Type"), help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'), default=ANY_CAPA_TYPE_VALUE, values=_get_capa_types(), scope=Scope.settings, ) filters = String(default="") # TBD has_score = Boolean( display_name=_("Scored"), help=_("Set this value to True if this module is either a graded assignment or a practice problem."), default=False, scope=Scope.settings, ) selected = List( # This is a list of (block_type, block_id) tuples used to record # which random/first set of matching blocks was selected per user default=[], scope=Scope.user_state, ) has_children = True @property def source_library_key(self): """ Convenience method to get the library ID as a LibraryLocator and not just a string """ return LibraryLocator.from_string(self.source_library_id) #pylint: disable=abstract-method @XBlock.wants('library_tools') # Only needed in studio class LibraryContentModule(LibraryContentFields, XModule, StudioEditableModule): """ An XBlock whose children are chosen dynamically from a content library. Can be used to create randomized assessments among other things. Note: technically, all matching blocks from the content library are added as children of this block, but only a subset of those children are shown to any particular student. """ def _publish_event(self, event_name, result, **kwargs): """ Helper method to publish an event for analytics purposes """ event_data = { "location": unicode(self.location), "result": result, "previous_count": getattr(self, "_last_event_result_count", len(self.selected)), "max_count": self.max_count, } event_data.update(kwargs) self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data) self._last_event_result_count = len(result) # pylint: disable=attribute-defined-outside-init def selected_children(self): """ Returns a set() of block_ids indicating which of the possible children have been selected to display to the current user. This reads and updates the "selected" field, which has user_state scope. Note: self.selected and the return value contain block_ids. To get actual BlockUsageLocators, it is necessary to use self.children, because the block_ids alone do not specify the block type. """ if hasattr(self, "_selected_set"): # Already done: return self._selected_set # pylint: disable=access-member-before-definition selected = set(tuple(k) for k in self.selected) # set of (block_type, block_id) tuples assigned to this student lib_tools = self.runtime.service(self, 'library_tools') format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys) # Determine which of our children we will show: valid_block_keys = set([(c.block_type, c.block_id) for c in self.children]) # pylint: disable=no-member # Remove any selected blocks that are no longer valid: invalid_block_keys = (selected - valid_block_keys) if invalid_block_keys: selected -= invalid_block_keys # Publish an event for analytics purposes: # reason "invalid" means deleted from library or a different library is now being used. self._publish_event( "removed", result=format_block_keys(selected), removed=format_block_keys(invalid_block_keys), reason="invalid" ) # If max_count has been decreased, we may have to drop some previously selected blocks: overlimit_block_keys = set() while len(selected) > self.max_count: overlimit_block_keys.add(selected.pop()) if overlimit_block_keys: # Publish an event for analytics purposes: self._publish_event( "removed", result=format_block_keys(selected), removed=format_block_keys(overlimit_block_keys), reason="overlimit" ) # Do we have enough blocks now? num_to_add = self.max_count - len(selected) if num_to_add > 0: added_block_keys = None # We need to select [more] blocks to display to this user: pool = valid_block_keys - selected if self.mode == "random": num_to_add = min(len(pool), num_to_add) added_block_keys = set(random.sample(pool, num_to_add)) # We now have the correct n random children to show for this user. else: raise NotImplementedError("Unsupported mode.") selected |= added_block_keys if added_block_keys: # Publish an event for analytics purposes: self._publish_event( "assigned", result=format_block_keys(selected), added=format_block_keys(added_block_keys) ) # Save our selections to the user state, to ensure consistency: self.selected = list(selected) # TODO: this doesn't save from the LMS "Progress" page. # Cache the results self._selected_set = selected # pylint: disable=attribute-defined-outside-init return selected def _get_selected_child_blocks(self): """ Generator returning XBlock instances of the children selected for the current user. """ for block_type, block_id in self.selected_children(): yield self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id)) def student_view(self, context): fragment = Fragment() contents = [] child_context = {} if not context else copy(context) for child in self._get_selected_child_blocks(): for displayable in child.displayable_items(): rendered_child = displayable.render(STUDENT_VIEW, child_context) fragment.add_frag_resources(rendered_child) contents.append({ 'id': displayable.location.to_deprecated_string(), 'content': rendered_child.content, }) fragment.add_content(self.system.render_template('vert_module.html', { 'items': contents, 'xblock_context': context, })) return fragment def validate(self): """ Validates the state of this Library Content Module Instance. """ return self.descriptor.validate() def author_view(self, context): """ Renders the Studio views. Normal studio view: If block is properly configured, displays library status summary Studio container view: displays a preview of all possible children. """ fragment = Fragment() root_xblock = context.get('root_xblock') is_root = root_xblock and root_xblock.location == self.location if is_root: # User has clicked the "View" link. Show a preview of all possible children: if self.children: # pylint: disable=no-member fragment.add_content(self.system.render_template("library-block-author-preview-header.html", { 'max_count': self.max_count, 'display_name': self.display_name or self.url_name, })) context['can_edit_visibility'] = False self.render_children(context, fragment, can_reorder=False, can_add=False) # else: When shown on a unit page, don't show any sort of preview - # just the status of this block in the validation area. # The following JS is used to make the "Update now" button work on the unit page and the container view: fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js')) fragment.initialize_js('LibraryContentAuthorView') return fragment def get_child_descriptors(self): """ Return only the subset of our children relevant to the current student. """ return list(self._get_selected_child_blocks()) @XBlock.wants('user') @XBlock.wants('library_tools') # Only needed in studio @XBlock.wants('studio_user_permissions') # Only available in studio class LibraryContentDescriptor(LibraryContentFields, MakoModuleDescriptor, XmlDescriptor, StudioEditableDescriptor): """ Descriptor class for LibraryContentModule XBlock. """ module_class = LibraryContentModule mako_template = 'widgets/metadata-edit.html' js = {'coffee': [resource_string(__name__, 'js/src/vertical/edit.coffee')]} js_module_name = "VerticalDescriptor" show_in_read_only_mode = True @property def non_editable_metadata_fields(self): non_editable_fields = super(LibraryContentDescriptor, self).non_editable_metadata_fields # The only supported mode is currently 'random'. # Add the mode field to non_editable_metadata_fields so that it doesn't # render in the edit form. non_editable_fields.extend([LibraryContentFields.mode, LibraryContentFields.source_library_version]) return non_editable_fields @lazy def tools(self): """ Grab the library tools service or raise an error. """ return self.runtime.service(self, 'library_tools') def get_user_id(self): """ Get the ID of the current user. """ user_service = self.runtime.service(self, 'user') if user_service: # May be None when creating bok choy test fixtures user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None) else: user_id = None return user_id @XBlock.handler def refresh_children(self, request=None, suffix=None): # pylint: disable=unused-argument """ Refresh children: This method is to be used when any of the libraries that this block references have been updated. It will re-fetch all matching blocks from the libraries, and copy them as children of this block. The children will be given new block_ids, but the definition ID used should be the exact same definition ID used in the library. This method will update this block's 'source_library_id' field to store the version number of the libraries used, so we easily determine if this block is up to date or not. """ user_perms = self.runtime.service(self, 'studio_user_permissions') user_id = self.get_user_id() if not self.tools: return Response("Library Tools unavailable in current runtime.", status=400) self.tools.update_children(self, user_id, user_perms) return Response() # Copy over any overridden settings the course author may have applied to the blocks. def _copy_overrides(self, store, user_id, source, dest): """ Copy any overrides the user has made on blocks in this library. """ for field in source.fields.itervalues(): if field.scope == Scope.settings and field.is_set_on(source): setattr(dest, field.name, field.read_from(source)) if source.has_children: source_children = [self.runtime.get_block(source_key) for source_key in source.children] dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children] for source_child, dest_child in zip(source_children, dest_children): self._copy_overrides(store, user_id, source_child, dest_child) store.update_item(dest, user_id) def studio_post_duplicate(self, store, source_block): """ Used by the studio after basic duplication of a source block. We handle the children ourselves, because we have to properly reference the library upstream and set the overrides. Otherwise we'll end up losing data on the next refresh. """ # The first task will be to refresh our copy of the library to generate the children. # We must do this at the currently set version of the library block. Otherwise we may not have # exactly the same children-- someone may be duplicating an out of date block, after all. user_id = self.get_user_id() user_perms = self.runtime.service(self, 'studio_user_permissions') # pylint: disable=no-member if not self.tools: raise RuntimeError("Library tools unavailable, duplication will not be sane!") self.tools.update_children(self, user_id, user_perms, version=self.source_library_version) self._copy_overrides(store, user_id, source_block, self) # Children have been handled. return True def _validate_library_version(self, validation, lib_tools, version, library_key): """ Validates library version """ latest_version = lib_tools.get_library_version(library_key) if latest_version is not None: if version is None or version != unicode(latest_version): validation.set_summary( StudioValidationMessage( StudioValidationMessage.WARNING, _(u'This component is out of date. The library has new content.'), # TODO: change this to action_runtime_event='...' once the unit page supports that feature. # See https://openedx.atlassian.net/browse/TNL-993 action_class='library-update-btn', # Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes) action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻") ) ) return False else: validation.set_summary( StudioValidationMessage( StudioValidationMessage.ERROR, _(u'Library is invalid, corrupt, or has been deleted.'), action_class='edit-button', action_label=_(u"Edit Library List.") ) ) return False return True def _set_validation_error_if_empty(self, validation, summary): """ Helper method to only set validation summary if it's empty """ if validation.empty: validation.set_summary(summary) def validate(self): """ Validates the state of this Library Content Module Instance. This is the override of the general XBlock method, and it will also ask its superclass to validate. """ validation = super(LibraryContentDescriptor, self).validate() if not isinstance(validation, StudioValidation): validation = StudioValidation.copy(validation) library_tools = self.runtime.service(self, "library_tools") if not (library_tools and library_tools.can_use_library_content(self)): validation.set_summary( StudioValidationMessage( StudioValidationMessage.ERROR, _( u"This course does not support content libraries. " u"Contact your system administrator for more information." ) ) ) return validation if not self.source_library_id: validation.set_summary( StudioValidationMessage( StudioValidationMessage.NOT_CONFIGURED, _(u"A library has not yet been selected."), action_class='edit-button', action_label=_(u"Select a Library.") ) ) return validation lib_tools = self.runtime.service(self, 'library_tools') self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key) # Note: we assume refresh_children() has been called # since the last time fields like source_library_id or capa_types were changed. matching_children_count = len(self.children) # pylint: disable=no-member if matching_children_count == 0: self._set_validation_error_if_empty( validation, StudioValidationMessage( StudioValidationMessage.WARNING, _(u'There are no matching problem types in the specified libraries.'), action_class='edit-button', action_label=_(u"Select another problem type.") ) ) if matching_children_count < self.max_count: self._set_validation_error_if_empty( validation, StudioValidationMessage( StudioValidationMessage.WARNING, ( ngettext( u'The specified library is configured to fetch {count} problem, ', u'The specified library is configured to fetch {count} problems, ', self.max_count ) + ngettext( u'but there is only {actual} matching problem.', u'but there are only {actual} matching problems.', matching_children_count ) ).format(count=self.max_count, actual=matching_children_count), action_class='edit-button', action_label=_(u"Edit the library configuration.") ) ) return validation def source_library_values(self): """ Return a list of possible values for self.source_library_id """ lib_tools = self.runtime.service(self, 'library_tools') user_perms = self.runtime.service(self, 'studio_user_permissions') all_libraries = lib_tools.list_available_libraries() if user_perms: all_libraries = [ (key, name) for key, name in all_libraries if user_perms.can_read(key) or self.source_library_id == unicode(key) ] all_libraries.sort(key=lambda entry: entry[1]) # Sort by name if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]: all_libraries.append((self.source_library_id, _(u"Invalid Library"))) all_libraries = [(u"", _("No Library Selected"))] + all_libraries values = [{"display_name": name, "value": unicode(key)} for key, name in all_libraries] return values def editor_saved(self, user, old_metadata, old_content): """ If source_library_id or capa_type has been edited, refresh_children automatically. """ old_source_library_id = old_metadata.get('source_library_id', []) if (old_source_library_id != self.source_library_id or old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type): try: self.refresh_children() except ValueError: pass # The validation area will display an error message, no need to do anything now. def has_dynamic_children(self): """ Inform the runtime that our children vary per-user. See get_child_descriptors() above """ return True def get_content_titles(self): """ Returns list of friendly titles for our selected children only; without thi, all possible children's titles would be seen in the sequence bar in the LMS. This overwrites the get_content_titles method included in x_module by default. """ titles = [] for child in self._xmodule.get_child_descriptors(): titles.extend(child.get_content_titles()) return titles @classmethod def definition_from_xml(cls, xml_object, system): children = [ # pylint: disable=no-member system.process_xml(etree.tostring(child)).scope_ids.usage_id for child in xml_object.getchildren() ] definition = { attr_name: json.loads(attr_value) for attr_name, attr_value in xml_object.attrib } return definition, children def definition_to_xml(self, resource_fs): """ Exports Library Content Module to XML """ # pylint: disable=no-member xml_object = etree.Element('library_content') for child in self.get_children(): self.runtime.add_block_as_child_node(child, xml_object) # Set node attributes based on our fields. for field_name, field in self.fields.iteritems(): if field_name in ('children', 'parent', 'content'): continue if field.is_set_on(self): xml_object.set(field_name, unicode(field.read_from(self))) return xml_object
agpl-3.0
Elettronik/SickRage
lib/tvdb_api/tvdb_ui.py
92
5494
#!/usr/bin/env python2 #encoding:utf-8 #author:dbr/Ben #project:tvdb_api #repository:http://github.com/dbr/tvdb_api #license:unlicense (http://unlicense.org/) """Contains included user interfaces for Tvdb show selection. A UI is a callback. A class, it's __init__ function takes two arguments: - config, which is the Tvdb config dict, setup in tvdb_api.py - log, which is Tvdb's logger instance (which uses the logging module). You can call log.info() log.warning() etc It must have a method "selectSeries", this is passed a list of dicts, each dict contains the the keys "name" (human readable show name), and "sid" (the shows ID as on thetvdb.com). For example: [{'name': u'Lost', 'sid': u'73739'}, {'name': u'Lost Universe', 'sid': u'73181'}] The "selectSeries" method must return the appropriate dict, or it can raise tvdb_userabort (if the selection is aborted), tvdb_shownotfound (if the show cannot be found). A simple example callback, which returns a random series: >>> import random >>> from tvdb_ui import BaseUI >>> class RandomUI(BaseUI): ... def selectSeries(self, allSeries): ... import random ... return random.choice(allSeries) Then to use it.. >>> from tvdb_api import Tvdb >>> t = Tvdb(custom_ui = RandomUI) >>> random_matching_series = t['Lost'] >>> type(random_matching_series) <class 'tvdb_api.Show'> """ __author__ = "dbr/Ben" __version__ = "1.9" import logging import warnings from tvdb_exceptions import tvdb_userabort def log(): return logging.getLogger(__name__) class BaseUI: """Default non-interactive UI, which auto-selects first results """ def __init__(self, config, log = None): self.config = config if log is not None: warnings.warn("the UI's log parameter is deprecated, instead use\n" "use import logging; logging.getLogger('ui').info('blah')\n" "The self.log attribute will be removed in the next version") self.log = logging.getLogger(__name__) def selectSeries(self, allSeries): return allSeries[0] class ConsoleUI(BaseUI): """Interactively allows the user to select a show from a console based UI """ def _displaySeries(self, allSeries, limit = 6): """Helper function, lists series with corresponding ID """ if limit is not None: toshow = allSeries[:limit] else: toshow = allSeries print "TVDB Search Results:" for i, cshow in enumerate(toshow): i_show = i + 1 # Start at more human readable number 1 (not 0) log().debug('Showing allSeries[%s], series %s)' % (i_show, allSeries[i]['seriesname'])) if i == 0: extra = " (default)" else: extra = "" print "%s -> %s [%s] # http://thetvdb.com/?tab=series&id=%s&lid=%s%s" % ( i_show, cshow['seriesname'].encode("UTF-8", "ignore"), cshow['language'].encode("UTF-8", "ignore"), str(cshow['id']), cshow['lid'], extra ) def selectSeries(self, allSeries): self._displaySeries(allSeries) if len(allSeries) == 1: # Single result, return it! print "Automatically selecting only result" return allSeries[0] if self.config['select_first'] is True: print "Automatically returning first search result" return allSeries[0] while True: # return breaks this loop try: print "Enter choice (first number, return for default, 'all', ? for help):" ans = raw_input() except KeyboardInterrupt: raise tvdb_userabort("User aborted (^c keyboard interupt)") except EOFError: raise tvdb_userabort("User aborted (EOF received)") log().debug('Got choice of: %s' % (ans)) try: selected_id = int(ans) - 1 # The human entered 1 as first result, not zero except ValueError: # Input was not number if len(ans.strip()) == 0: # Default option log().debug('Default option, returning first series') return allSeries[0] if ans == "q": log().debug('Got quit command (q)') raise tvdb_userabort("User aborted ('q' quit command)") elif ans == "?": print "## Help" print "# Enter the number that corresponds to the correct show." print "# a - display all results" print "# all - display all results" print "# ? - this help" print "# q - abort tvnamer" print "# Press return with no input to select first result" elif ans.lower() in ["a", "all"]: self._displaySeries(allSeries, limit = None) else: log().debug('Unknown keypress %s' % (ans)) else: log().debug('Trying to return ID: %d' % (selected_id)) try: return allSeries[selected_id] except IndexError: log().debug('Invalid show number entered!') print "Invalid number (%s) selected!" self._displaySeries(allSeries)
gpl-3.0
2uller/LotF
App/Lib/site-packages/pygame/tests/run_tests__tests/print_stdout/fake_2_test.py
75
1178
if __name__ == '__main__': import sys import os pkg_dir = (os.path.split( os.path.split( os.path.split( os.path.abspath(__file__))[0])[0])[0]) parent_dir, pkg_name = os.path.split(pkg_dir) is_pygame_pkg = (pkg_name == 'tests' and os.path.split(parent_dir)[1] == 'pygame') if not is_pygame_pkg: sys.path.insert(0, parent_dir) else: is_pygame_pkg = __name__.startswith('pygame.tests.') if is_pygame_pkg: from pygame.tests import test_utils from pygame.tests.test_utils import unittest else: from test import test_utils from test.test_utils import unittest class KeyModuleTest(unittest.TestCase): def test_get_focused(self): self.assert_(True) def test_get_mods(self): self.assert_(True) def test_get_pressed(self): self.assert_(True) def test_name(self): self.assert_(True) def test_set_mods(self): self.assert_(True) def test_set_repeat(self): self.assert_(True) if __name__ == '__main__': unittest.main()
gpl-2.0
bclau/nova
nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py
7
19700
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo.config import cfg import webob from nova.api.openstack.compute import plugins from nova.api.openstack.compute.plugins.v3 import multiple_create from nova.api.openstack.compute.plugins.v3 import servers from nova.compute import api as compute_api from nova.compute import flavors from nova import db from nova.network import manager from nova.openstack.common import jsonutils from nova.openstack.common import rpc from nova import test from nova.tests.api.openstack import fakes from nova.tests import fake_instance from nova.tests.image import fake CONF = cfg.CONF FAKE_UUID = fakes.FAKE_UUID def fake_gen_uuid(): return FAKE_UUID def return_security_group(context, instance_id, security_group_id): pass class ServersControllerCreateTest(test.TestCase): def setUp(self): """Shared implementation for tests below that create instance.""" super(ServersControllerCreateTest, self).setUp() self.flags(verbose=True, enable_instance_password=True) self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) CONF.set_override('extensions_blacklist', 'os-multiple-create', 'osapi_v3') self.no_mult_create_controller = servers.ServersController( extension_info=ext_info) def instance_create(context, inst): inst_type = flavors.get_flavor_by_flavor_id(3) image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' def_image_ref = 'http://localhost/images/%s' % image_uuid self.instance_cache_num += 1 instance = fake_instance.fake_db_instance(**{ 'id': self.instance_cache_num, 'display_name': inst['display_name'] or 'test', 'uuid': FAKE_UUID, 'instance_type': dict(inst_type), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fead::1234', 'image_ref': inst.get('image_ref', def_image_ref), 'user_id': 'fake', 'project_id': 'fake', 'reservation_id': inst['reservation_id'], "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "progress": 0, "fixed_ips": [], "task_state": "", "vm_state": "", "security_groups": inst['security_groups'], }) self.instance_cache_by_id[instance['id']] = instance self.instance_cache_by_uuid[instance['uuid']] = instance return instance def instance_get(context, instance_id): """Stub for compute/api create() pulling in instance after scheduling """ return self.instance_cache_by_id[instance_id] def instance_update(context, uuid, values): instance = self.instance_cache_by_uuid[uuid] instance.update(values) return instance def server_update(context, instance_uuid, params, update_cells=True, columns_to_join=None): inst = self.instance_cache_by_uuid[instance_uuid] inst.update(params) return (inst, inst) def fake_method(*args, **kwargs): pass def project_get_networks(context, user_id): return dict(id='1', host='localhost') def queue_get_for(context, *args): return 'network_topic' fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fake.stub_out_image_service(self.stubs) fakes.stub_out_nw_api(self.stubs) self.stubs.Set(uuid, 'uuid4', fake_gen_uuid) self.stubs.Set(db, 'instance_add_security_group', return_security_group) self.stubs.Set(db, 'project_get_networks', project_get_networks) self.stubs.Set(db, 'instance_create', instance_create) self.stubs.Set(db, 'instance_system_metadata_update', fake_method) self.stubs.Set(db, 'instance_get', instance_get) self.stubs.Set(db, 'instance_update', instance_update) self.stubs.Set(rpc, 'cast', fake_method) self.stubs.Set(db, 'instance_update_and_get_original', server_update) self.stubs.Set(rpc, 'queue_get_for', queue_get_for) self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip', fake_method) def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) if no_image: server.pop('image_ref', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" if override_controller: server = override_controller.create(req, body).obj['server'] else: server = self.controller.create(req, body).obj['server'] def test_create_instance_with_multiple_create_disabled(self): ret_res_id = True min_count = 2 max_count = 3 params = { multiple_create.MIN_ATTRIBUTE_NAME: min_count, multiple_create.MAX_ATTRIBUTE_NAME: max_count, } old_create = compute_api.API.create def create(*args, **kwargs): self.assertNotIn('min_count', kwargs) self.assertNotIn('max_count', kwargs) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra( params, override_controller=self.no_mult_create_controller) def test_create_instance_with_multiple_create_enabled(self): min_count = 2 max_count = 3 params = { multiple_create.MIN_ATTRIBUTE_NAME: min_count, multiple_create.MAX_ATTRIBUTE_NAME: max_count, } old_create = compute_api.API.create def create(*args, **kwargs): self.assertEqual(kwargs['min_count'], 2) self.assertEqual(kwargs['max_count'], 3) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params) def test_create_instance_invalid_negative_min(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: -1, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_instance_invalid_negative_max(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MAX_ATTRIBUTE_NAME: -1, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_instance_invalid_min_greater_than_max(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 4, multiple_create.MAX_ATTRIBUTE_NAME: 2, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_instance_invalid_alpha_min(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 'abcd', 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_instance_invalid_alpha_max(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MAX_ATTRIBUTE_NAME: 'abcd', 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_multiple_instances(self): """Test creating multiple instances but not asking for reservation_id """ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, 'personality': [] } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.create(req, body).obj self.assertEqual(FAKE_UUID, res["server"]["id"]) self._check_admin_pass_len(res["server"]) def test_create_multiple_instances_pass_disabled(self): """Test creating multiple instances but not asking for reservation_id """ self.flags(enable_instance_password=False) image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, 'personality': [] } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.create(req, body).obj self.assertEqual(FAKE_UUID, res["server"]["id"]) self._check_admin_pass_missing(res["server"]) def _check_admin_pass_len(self, server_dict): """utility function - check server_dict for admin_pass length.""" self.assertEqual(CONF.password_length, len(server_dict["admin_pass"])) def _check_admin_pass_missing(self, server_dict): """utility function - check server_dict for absence of adminPass.""" self.assertTrue("admin_pass" not in server_dict) def test_create_multiple_instances_resv_id_return(self): """Test creating multiple instances with asking for reservation_id """ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, 'personality': [], multiple_create.RRID_ATTRIBUTE_NAME: True } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" res = self.controller.create(req, body) reservation_id = res.obj['servers_reservation']['reservation_id'] self.assertNotEqual(reservation_id, "") self.assertNotEqual(reservation_id, None) self.assertTrue(len(reservation_id) > 1) def test_create_multiple_instances_with_multiple_volume_bdm(self): """ Test that a BadRequest is raised if multiple instances are requested with a list of block device mappings for volumes. """ min_count = 2 bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}, {'device_name': 'foo2', 'volume_id': 'vol-yyyy'} ] params = { 'block_device_mapping': bdm, 'min_count': min_count } old_create = compute_api.API.create def create(*args, **kwargs): self.assertEqual(kwargs['min_count'], 2) self.assertEqual(len(kwargs['block_device_mapping']), 2) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params, no_image=True) def test_create_multiple_instances_with_single_volume_bdm(self): """ Test that a BadRequest is raised if multiple instances are requested to boot from a single volume. """ min_count = 2 bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}] params = { 'block_device_mapping': bdm, multiple_create.MIN_ATTRIBUTE_NAME: min_count } old_create = compute_api.API.create def create(*args, **kwargs): self.assertEqual(kwargs['min_count'], 2) self.assertEqual(kwargs['block_device_mapping']['volume_id'], 'vol-xxxx') return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_extra, params, no_image=True) def test_create_multiple_instance_with_non_integer_max_count(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MAX_ATTRIBUTE_NAME: 2.5, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, 'personality': [] } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) def test_create_multiple_instance_with_non_integer_min_count(self): image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/123/flavors/3' body = { 'server': { multiple_create.MIN_ATTRIBUTE_NAME: 2.5, 'name': 'server_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, 'metadata': {'hello': 'world', 'open': 'stack'}, 'personality': [] } } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body) class TestServerCreateRequestXMLDeserializer(test.TestCase): def setUp(self): super(TestServerCreateRequestXMLDeserializer, self).setUp() ext_info = plugins.LoadedExtensionInfo() controller = servers.ServersController(extension_info=ext_info) self.deserializer = servers.CreateDeserializer(controller) def test_request_with_multiple_create_args(self): serial_request = """ <server xmlns="http://docs.openstack.org/compute/api/v2" xmlns:%(alias)s="%(namespace)s" name="new-server-test" image_ref="1" flavor_ref="1" %(alias)s:min_count="1" %(alias)s:max_count="3" %(alias)s:return_reservation_id="True"> </server>""" % { 'alias': multiple_create.ALIAS, 'namespace': multiple_create.MultipleCreate.namespace} request = self.deserializer.deserialize(serial_request) expected = {"server": { "name": "new-server-test", "image_ref": "1", "flavor_ref": "1", multiple_create.MIN_ATTRIBUTE_NAME: "1", multiple_create.MAX_ATTRIBUTE_NAME: "3", multiple_create.RRID_ATTRIBUTE_NAME: True, }} self.assertEquals(request['body'], expected)
apache-2.0
perkinslr/pypyjs
website/js/pypy.js-0.2.0/lib/modules/test/test_fractions.py
97
24516
"""Tests for Lib/fractions.py.""" from decimal import Decimal from test.test_support import run_unittest import math import numbers import operator import fractions import sys import unittest from copy import copy, deepcopy from cPickle import dumps, loads F = fractions.Fraction gcd = fractions.gcd # decorator for skipping tests on non-IEEE 754 platforms requires_IEEE_754 = unittest.skipUnless( float.__getformat__("double").startswith("IEEE"), "test requires IEEE 754 doubles") class DummyFloat(object): """Dummy float class for testing comparisons with Fractions""" def __init__(self, value): if not isinstance(value, float): raise TypeError("DummyFloat can only be initialized from float") self.value = value def _richcmp(self, other, op): if isinstance(other, numbers.Rational): return op(F.from_float(self.value), other) elif isinstance(other, DummyFloat): return op(self.value, other.value) else: return NotImplemented def __eq__(self, other): return self._richcmp(other, operator.eq) def __le__(self, other): return self._richcmp(other, operator.le) def __lt__(self, other): return self._richcmp(other, operator.lt) def __ge__(self, other): return self._richcmp(other, operator.ge) def __gt__(self, other): return self._richcmp(other, operator.gt) # shouldn't be calling __float__ at all when doing comparisons def __float__(self): assert False, "__float__ should not be invoked for comparisons" # same goes for subtraction def __sub__(self, other): assert False, "__sub__ should not be invoked for comparisons" __rsub__ = __sub__ # Silence Py3k warning __hash__ = None class DummyRational(object): """Test comparison of Fraction with a naive rational implementation.""" def __init__(self, num, den): g = gcd(num, den) self.num = num // g self.den = den // g def __eq__(self, other): if isinstance(other, fractions.Fraction): return (self.num == other._numerator and self.den == other._denominator) else: return NotImplemented def __lt__(self, other): return(self.num * other._denominator < self.den * other._numerator) def __gt__(self, other): return(self.num * other._denominator > self.den * other._numerator) def __le__(self, other): return(self.num * other._denominator <= self.den * other._numerator) def __ge__(self, other): return(self.num * other._denominator >= self.den * other._numerator) # this class is for testing comparisons; conversion to float # should never be used for a comparison, since it loses accuracy def __float__(self): assert False, "__float__ should not be invoked" # Silence Py3k warning __hash__ = None class DummyFraction(fractions.Fraction): """Dummy Fraction subclass for copy and deepcopy testing.""" class GcdTest(unittest.TestCase): def testMisc(self): self.assertEqual(0, gcd(0, 0)) self.assertEqual(1, gcd(1, 0)) self.assertEqual(-1, gcd(-1, 0)) self.assertEqual(1, gcd(0, 1)) self.assertEqual(-1, gcd(0, -1)) self.assertEqual(1, gcd(7, 1)) self.assertEqual(-1, gcd(7, -1)) self.assertEqual(1, gcd(-23, 15)) self.assertEqual(12, gcd(120, 84)) self.assertEqual(-12, gcd(84, -120)) def _components(r): return (r.numerator, r.denominator) class FractionTest(unittest.TestCase): def assertTypedEquals(self, expected, actual): """Asserts that both the types and values are the same.""" self.assertEqual(type(expected), type(actual)) self.assertEqual(expected, actual) def assertRaisesMessage(self, exc_type, message, callable, *args, **kwargs): """Asserts that callable(*args, **kwargs) raises exc_type(message).""" try: callable(*args, **kwargs) except exc_type, e: self.assertEqual(message, str(e)) else: self.fail("%s not raised" % exc_type.__name__) def testInit(self): self.assertEqual((0, 1), _components(F())) self.assertEqual((7, 1), _components(F(7))) self.assertEqual((7, 3), _components(F(F(7, 3)))) self.assertEqual((-1, 1), _components(F(-1, 1))) self.assertEqual((-1, 1), _components(F(1, -1))) self.assertEqual((1, 1), _components(F(-2, -2))) self.assertEqual((1, 2), _components(F(5, 10))) self.assertEqual((7, 15), _components(F(7, 15))) self.assertEqual((10**23, 1), _components(F(10**23))) self.assertEqual((3, 77), _components(F(F(3, 7), 11))) self.assertEqual((-9, 5), _components(F(2, F(-10, 9)))) self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113)))) self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)", F, 12, 0) self.assertRaises(TypeError, F, 1.5 + 3j) self.assertRaises(TypeError, F, "3/2", 3) self.assertRaises(TypeError, F, 3, 0j) self.assertRaises(TypeError, F, 3, 1j) @requires_IEEE_754 def testInitFromFloat(self): self.assertEqual((5, 2), _components(F(2.5))) self.assertEqual((0, 1), _components(F(-0.0))) self.assertEqual((3602879701896397, 36028797018963968), _components(F(0.1))) self.assertRaises(TypeError, F, float('nan')) self.assertRaises(TypeError, F, float('inf')) self.assertRaises(TypeError, F, float('-inf')) def testInitFromDecimal(self): self.assertEqual((11, 10), _components(F(Decimal('1.1')))) self.assertEqual((7, 200), _components(F(Decimal('3.5e-2')))) self.assertEqual((0, 1), _components(F(Decimal('.000e20')))) self.assertRaises(TypeError, F, Decimal('nan')) self.assertRaises(TypeError, F, Decimal('snan')) self.assertRaises(TypeError, F, Decimal('inf')) self.assertRaises(TypeError, F, Decimal('-inf')) def testFromString(self): self.assertEqual((5, 1), _components(F("5"))) self.assertEqual((3, 2), _components(F("3/2"))) self.assertEqual((3, 2), _components(F(" \n +3/2"))) self.assertEqual((-3, 2), _components(F("-3/2 "))) self.assertEqual((13, 2), _components(F(" 013/02 \n "))) self.assertEqual((13, 2), _components(F(u" 013/02 \n "))) self.assertEqual((16, 5), _components(F(" 3.2 "))) self.assertEqual((-16, 5), _components(F(u" -3.2 "))) self.assertEqual((-3, 1), _components(F(u" -3. "))) self.assertEqual((3, 5), _components(F(u" .6 "))) self.assertEqual((1, 3125), _components(F("32.e-5"))) self.assertEqual((1000000, 1), _components(F("1E+06"))) self.assertEqual((-12300, 1), _components(F("-1.23e4"))) self.assertEqual((0, 1), _components(F(" .0e+0\t"))) self.assertEqual((0, 1), _components(F("-0.000e0"))) self.assertRaisesMessage( ZeroDivisionError, "Fraction(3, 0)", F, "3/0") self.assertRaisesMessage( ValueError, "Invalid literal for Fraction: '3/'", F, "3/") self.assertRaisesMessage( ValueError, "Invalid literal for Fraction: '/2'", F, "/2") self.assertRaisesMessage( ValueError, "Invalid literal for Fraction: '3 /2'", F, "3 /2") self.assertRaisesMessage( # Denominators don't need a sign. ValueError, "Invalid literal for Fraction: '3/+2'", F, "3/+2") self.assertRaisesMessage( # Imitate float's parsing. ValueError, "Invalid literal for Fraction: '+ 3/2'", F, "+ 3/2") self.assertRaisesMessage( # Avoid treating '.' as a regex special character. ValueError, "Invalid literal for Fraction: '3a2'", F, "3a2") self.assertRaisesMessage( # Don't accept combinations of decimals and fractions. ValueError, "Invalid literal for Fraction: '3/7.2'", F, "3/7.2") self.assertRaisesMessage( # Don't accept combinations of decimals and fractions. ValueError, "Invalid literal for Fraction: '3.2/7'", F, "3.2/7") self.assertRaisesMessage( # Allow 3. and .3, but not . ValueError, "Invalid literal for Fraction: '.'", F, ".") def testImmutable(self): r = F(7, 3) r.__init__(2, 15) self.assertEqual((7, 3), _components(r)) self.assertRaises(AttributeError, setattr, r, 'numerator', 12) self.assertRaises(AttributeError, setattr, r, 'denominator', 6) self.assertEqual((7, 3), _components(r)) # But if you _really_ need to: r._numerator = 4 r._denominator = 2 self.assertEqual((4, 2), _components(r)) # Which breaks some important operations: self.assertNotEqual(F(4, 2), r) def testFromFloat(self): self.assertRaises(TypeError, F.from_float, 3+4j) self.assertEqual((10, 1), _components(F.from_float(10))) bigint = 1234567890123456789 self.assertEqual((bigint, 1), _components(F.from_float(bigint))) self.assertEqual((0, 1), _components(F.from_float(-0.0))) self.assertEqual((10, 1), _components(F.from_float(10.0))) self.assertEqual((-5, 2), _components(F.from_float(-2.5))) self.assertEqual((99999999999999991611392, 1), _components(F.from_float(1e23))) self.assertEqual(float(10**23), float(F.from_float(1e23))) self.assertEqual((3602879701896397, 1125899906842624), _components(F.from_float(3.2))) self.assertEqual(3.2, float(F.from_float(3.2))) inf = 1e1000 nan = inf - inf self.assertRaisesMessage( TypeError, "Cannot convert inf to Fraction.", F.from_float, inf) self.assertRaisesMessage( TypeError, "Cannot convert -inf to Fraction.", F.from_float, -inf) self.assertRaisesMessage( TypeError, "Cannot convert nan to Fraction.", F.from_float, nan) def testFromDecimal(self): self.assertRaises(TypeError, F.from_decimal, 3+4j) self.assertEqual(F(10, 1), F.from_decimal(10)) self.assertEqual(F(0), F.from_decimal(Decimal("-0"))) self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5"))) self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3"))) self.assertEqual(F(5000), F.from_decimal(Decimal("5e3"))) self.assertEqual(1 - F(1, 10**30), F.from_decimal(Decimal("0." + "9" * 30))) self.assertRaisesMessage( TypeError, "Cannot convert Infinity to Fraction.", F.from_decimal, Decimal("inf")) self.assertRaisesMessage( TypeError, "Cannot convert -Infinity to Fraction.", F.from_decimal, Decimal("-inf")) self.assertRaisesMessage( TypeError, "Cannot convert NaN to Fraction.", F.from_decimal, Decimal("nan")) self.assertRaisesMessage( TypeError, "Cannot convert sNaN to Fraction.", F.from_decimal, Decimal("snan")) def testLimitDenominator(self): rpi = F('3.1415926535897932') self.assertEqual(rpi.limit_denominator(10000), F(355, 113)) self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113)) self.assertEqual(rpi.limit_denominator(113), F(355, 113)) self.assertEqual(rpi.limit_denominator(112), F(333, 106)) self.assertEqual(F(201, 200).limit_denominator(100), F(1)) self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101)) self.assertEqual(F(0).limit_denominator(10000), F(0)) for i in (0, -1): self.assertRaisesMessage( ValueError, "max_denominator should be at least 1", F(1).limit_denominator, i) def testConversions(self): self.assertTypedEquals(-1, math.trunc(F(-11, 10))) self.assertTypedEquals(-1, int(F(-11, 10))) self.assertTypedEquals(1, math.trunc(F(11, 10))) self.assertEqual(False, bool(F(0, 1))) self.assertEqual(True, bool(F(3, 2))) self.assertTypedEquals(0.1, float(F(1, 10))) # Check that __float__ isn't implemented by converting the # numerator and denominator to float before dividing. self.assertRaises(OverflowError, float, long('2'*400+'7')) self.assertAlmostEqual(2.0/3, float(F(long('2'*400+'7'), long('3'*400+'1')))) self.assertTypedEquals(0.1+0j, complex(F(1,10))) def testArithmetic(self): self.assertEqual(F(1, 2), F(1, 10) + F(2, 5)) self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5)) self.assertEqual(F(1, 25), F(1, 10) * F(2, 5)) self.assertEqual(F(1, 4), F(1, 10) / F(2, 5)) self.assertTypedEquals(2, F(9, 10) // F(2, 5)) self.assertTypedEquals(10**23, F(10**23, 1) // F(1)) self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2)) self.assertEqual(F(8, 27), F(2, 3) ** F(3)) self.assertEqual(F(27, 8), F(2, 3) ** F(-3)) self.assertTypedEquals(2.0, F(4) ** F(1, 2)) self.assertEqual(F(1, 1), +F(1, 1)) # Will return 1j in 3.0: self.assertRaises(ValueError, pow, F(-1), F(1, 2)) def testMixedArithmetic(self): self.assertTypedEquals(F(11, 10), F(1, 10) + 1) self.assertTypedEquals(1.1, F(1, 10) + 1.0) self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j)) self.assertTypedEquals(F(11, 10), 1 + F(1, 10)) self.assertTypedEquals(1.1, 1.0 + F(1, 10)) self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10)) self.assertTypedEquals(F(-9, 10), F(1, 10) - 1) self.assertTypedEquals(-0.9, F(1, 10) - 1.0) self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j)) self.assertTypedEquals(F(9, 10), 1 - F(1, 10)) self.assertTypedEquals(0.9, 1.0 - F(1, 10)) self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10)) self.assertTypedEquals(F(1, 10), F(1, 10) * 1) self.assertTypedEquals(0.1, F(1, 10) * 1.0) self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j)) self.assertTypedEquals(F(1, 10), 1 * F(1, 10)) self.assertTypedEquals(0.1, 1.0 * F(1, 10)) self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10)) self.assertTypedEquals(F(1, 10), F(1, 10) / 1) self.assertTypedEquals(0.1, F(1, 10) / 1.0) self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j)) self.assertTypedEquals(F(10, 1), 1 / F(1, 10)) self.assertTypedEquals(10.0, 1.0 / F(1, 10)) self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10)) self.assertTypedEquals(0, F(1, 10) // 1) self.assertTypedEquals(0.0, F(1, 10) // 1.0) self.assertTypedEquals(10, 1 // F(1, 10)) self.assertTypedEquals(10**23, 10**22 // F(1, 10)) self.assertTypedEquals(10.0, 1.0 // F(1, 10)) self.assertTypedEquals(F(1, 10), F(1, 10) % 1) self.assertTypedEquals(0.1, F(1, 10) % 1.0) self.assertTypedEquals(F(0, 1), 1 % F(1, 10)) self.assertTypedEquals(0.0, 1.0 % F(1, 10)) # No need for divmod since we don't override it. # ** has more interesting conversion rules. self.assertTypedEquals(F(100, 1), F(1, 10) ** -2) self.assertTypedEquals(F(100, 1), F(10, 1) ** 2) self.assertTypedEquals(0.1, F(1, 10) ** 1.0) self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j)) self.assertTypedEquals(4 , 2 ** F(2, 1)) # Will return 1j in 3.0: self.assertRaises(ValueError, pow, (-1), F(1, 2)) self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1)) self.assertTypedEquals(2.0 , 4 ** F(1, 2)) self.assertTypedEquals(0.25, 2.0 ** F(-2, 1)) self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10)) def testMixingWithDecimal(self): # Decimal refuses mixed comparisons. self.assertRaisesMessage( TypeError, "unsupported operand type(s) for +: 'Fraction' and 'Decimal'", operator.add, F(3,11), Decimal('3.1415926')) self.assertRaisesMessage( TypeError, "unsupported operand type(s) for +: 'Decimal' and 'Fraction'", operator.add, Decimal('3.1415926'), F(3,11)) self.assertNotEqual(F(5, 2), Decimal('2.5')) def testComparisons(self): self.assertTrue(F(1, 2) < F(2, 3)) self.assertFalse(F(1, 2) < F(1, 2)) self.assertTrue(F(1, 2) <= F(2, 3)) self.assertTrue(F(1, 2) <= F(1, 2)) self.assertFalse(F(2, 3) <= F(1, 2)) self.assertTrue(F(1, 2) == F(1, 2)) self.assertFalse(F(1, 2) == F(1, 3)) self.assertFalse(F(1, 2) != F(1, 2)) self.assertTrue(F(1, 2) != F(1, 3)) def testComparisonsDummyRational(self): self.assertTrue(F(1, 2) == DummyRational(1, 2)) self.assertTrue(DummyRational(1, 2) == F(1, 2)) self.assertFalse(F(1, 2) == DummyRational(3, 4)) self.assertFalse(DummyRational(3, 4) == F(1, 2)) self.assertTrue(F(1, 2) < DummyRational(3, 4)) self.assertFalse(F(1, 2) < DummyRational(1, 2)) self.assertFalse(F(1, 2) < DummyRational(1, 7)) self.assertFalse(F(1, 2) > DummyRational(3, 4)) self.assertFalse(F(1, 2) > DummyRational(1, 2)) self.assertTrue(F(1, 2) > DummyRational(1, 7)) self.assertTrue(F(1, 2) <= DummyRational(3, 4)) self.assertTrue(F(1, 2) <= DummyRational(1, 2)) self.assertFalse(F(1, 2) <= DummyRational(1, 7)) self.assertFalse(F(1, 2) >= DummyRational(3, 4)) self.assertTrue(F(1, 2) >= DummyRational(1, 2)) self.assertTrue(F(1, 2) >= DummyRational(1, 7)) self.assertTrue(DummyRational(1, 2) < F(3, 4)) self.assertFalse(DummyRational(1, 2) < F(1, 2)) self.assertFalse(DummyRational(1, 2) < F(1, 7)) self.assertFalse(DummyRational(1, 2) > F(3, 4)) self.assertFalse(DummyRational(1, 2) > F(1, 2)) self.assertTrue(DummyRational(1, 2) > F(1, 7)) self.assertTrue(DummyRational(1, 2) <= F(3, 4)) self.assertTrue(DummyRational(1, 2) <= F(1, 2)) self.assertFalse(DummyRational(1, 2) <= F(1, 7)) self.assertFalse(DummyRational(1, 2) >= F(3, 4)) self.assertTrue(DummyRational(1, 2) >= F(1, 2)) self.assertTrue(DummyRational(1, 2) >= F(1, 7)) def testComparisonsDummyFloat(self): x = DummyFloat(1./3.) y = F(1, 3) self.assertTrue(x != y) self.assertTrue(x < y or x > y) self.assertFalse(x == y) self.assertFalse(x <= y and x >= y) self.assertTrue(y != x) self.assertTrue(y < x or y > x) self.assertFalse(y == x) self.assertFalse(y <= x and y >= x) def testMixedLess(self): self.assertTrue(2 < F(5, 2)) self.assertFalse(2 < F(4, 2)) self.assertTrue(F(5, 2) < 3) self.assertFalse(F(4, 2) < 2) self.assertTrue(F(1, 2) < 0.6) self.assertFalse(F(1, 2) < 0.4) self.assertTrue(0.4 < F(1, 2)) self.assertFalse(0.5 < F(1, 2)) self.assertFalse(float('inf') < F(1, 2)) self.assertTrue(float('-inf') < F(0, 10)) self.assertFalse(float('nan') < F(-3, 7)) self.assertTrue(F(1, 2) < float('inf')) self.assertFalse(F(17, 12) < float('-inf')) self.assertFalse(F(144, -89) < float('nan')) def testMixedLessEqual(self): self.assertTrue(0.5 <= F(1, 2)) self.assertFalse(0.6 <= F(1, 2)) self.assertTrue(F(1, 2) <= 0.5) self.assertFalse(F(1, 2) <= 0.4) self.assertTrue(2 <= F(4, 2)) self.assertFalse(2 <= F(3, 2)) self.assertTrue(F(4, 2) <= 2) self.assertFalse(F(5, 2) <= 2) self.assertFalse(float('inf') <= F(1, 2)) self.assertTrue(float('-inf') <= F(0, 10)) self.assertFalse(float('nan') <= F(-3, 7)) self.assertTrue(F(1, 2) <= float('inf')) self.assertFalse(F(17, 12) <= float('-inf')) self.assertFalse(F(144, -89) <= float('nan')) def testBigFloatComparisons(self): # Because 10**23 can't be represented exactly as a float: self.assertFalse(F(10**23) == float(10**23)) # The first test demonstrates why these are important. self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1))) self.assertTrue(1e23 < F(math.trunc(1e23) + 1)) self.assertFalse(1e23 <= F(math.trunc(1e23) - 1)) self.assertTrue(1e23 > F(math.trunc(1e23) - 1)) self.assertFalse(1e23 >= F(math.trunc(1e23) + 1)) def testBigComplexComparisons(self): self.assertFalse(F(10**23) == complex(10**23)) self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23)) self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23)) x = F(3, 8) z = complex(0.375, 0.0) w = complex(0.375, 0.2) self.assertTrue(x == z) self.assertFalse(x != z) self.assertFalse(x == w) self.assertTrue(x != w) for op in operator.lt, operator.le, operator.gt, operator.ge: self.assertRaises(TypeError, op, x, z) self.assertRaises(TypeError, op, z, x) self.assertRaises(TypeError, op, x, w) self.assertRaises(TypeError, op, w, x) def testMixedEqual(self): self.assertTrue(0.5 == F(1, 2)) self.assertFalse(0.6 == F(1, 2)) self.assertTrue(F(1, 2) == 0.5) self.assertFalse(F(1, 2) == 0.4) self.assertTrue(2 == F(4, 2)) self.assertFalse(2 == F(3, 2)) self.assertTrue(F(4, 2) == 2) self.assertFalse(F(5, 2) == 2) self.assertFalse(F(5, 2) == float('nan')) self.assertFalse(float('nan') == F(3, 7)) self.assertFalse(F(5, 2) == float('inf')) self.assertFalse(float('-inf') == F(2, 5)) def testStringification(self): self.assertEqual("Fraction(7, 3)", repr(F(7, 3))) self.assertEqual("Fraction(6283185307, 2000000000)", repr(F('3.1415926535'))) self.assertEqual("Fraction(-1, 100000000000000000000)", repr(F(1, -10**20))) self.assertEqual("7/3", str(F(7, 3))) self.assertEqual("7", str(F(7, 1))) def testHash(self): self.assertEqual(hash(2.5), hash(F(5, 2))) self.assertEqual(hash(10**50), hash(F(10**50))) self.assertNotEqual(hash(float(10**23)), hash(F(10**23))) def testApproximatePi(self): # Algorithm borrowed from # http://docs.python.org/lib/decimal-recipes.html three = F(3) lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24 while abs(s - lasts) > F(1, 10**9): lasts = s n, na = n+na, na+8 d, da = d+da, da+32 t = (t * n) / d s += t self.assertAlmostEqual(math.pi, s) def testApproximateCos1(self): # Algorithm borrowed from # http://docs.python.org/lib/decimal-recipes.html x = F(1) i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1 while abs(s - lasts) > F(1, 10**9): lasts = s i += 2 fact *= i * (i-1) num *= x * x sign *= -1 s += num / fact * sign self.assertAlmostEqual(math.cos(1), s) def test_copy_deepcopy_pickle(self): r = F(13, 7) dr = DummyFraction(13, 7) self.assertEqual(r, loads(dumps(r))) self.assertEqual(id(r), id(copy(r))) self.assertEqual(id(r), id(deepcopy(r))) self.assertNotEqual(id(dr), id(copy(dr))) self.assertNotEqual(id(dr), id(deepcopy(dr))) self.assertTypedEquals(dr, copy(dr)) self.assertTypedEquals(dr, deepcopy(dr)) def test_slots(self): # Issue 4998 r = F(13, 7) self.assertRaises(AttributeError, setattr, r, 'a', 10) def test_main(): run_unittest(FractionTest, GcdTest) if __name__ == '__main__': test_main()
mit
ahmadRagheb/goldenHR
erpnext/hr/doctype/job_applicant/job_applicant.py
23
1515
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt # For license information, please see license.txt from __future__ import unicode_literals from frappe.model.document import Document import frappe from frappe import _ from frappe.utils import comma_and, validate_email_add sender_field = "email_id" class DuplicationError(frappe.ValidationError): pass class JobApplicant(Document): def onload(self): offer_letter = frappe.get_all("Offer Letter", filters={"job_applicant": self.name}) if offer_letter: self.get("__onload").offer_letter = offer_letter[0].name def autoname(self): keys = filter(None, (self.applicant_name, self.email_id, self.job_title)) if not keys: frappe.throw(_("Name or Email is mandatory"), frappe.NameError) self.name = " - ".join(keys) def validate(self): self.check_email_id_is_unique() if self.email_id: validate_email_add(self.email_id, True) if not self.applicant_name and self.email_id: guess = self.email_id.split('@')[0] self.applicant_name = ' '.join([p.capitalize() for p in guess.split('.')]) def check_email_id_is_unique(self): if self.email_id: names = frappe.db.sql_list("""select name from `tabJob Applicant` where email_id=%s and name!=%s and job_title=%s""", (self.email_id, self.name, self.job_title)) if names: frappe.throw(_("Email Address must be unique, already exists for {0}").format(comma_and(names)), frappe.DuplicateEntryError)
gpl-3.0
carljm/django
django/contrib/gis/utils/layermapping.py
137
27371
# LayerMapping -- A Django Model/OGR Layer Mapping Utility """ The LayerMapping class provides a way to map the contents of OGR vector files (e.g. SHP files) to Geographic-enabled Django models. For more information, please consult the GeoDjango documentation: https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/ """ import sys from decimal import Decimal, InvalidOperation as DecimalInvalidOperation from django.contrib.gis.db.models import GeometryField from django.contrib.gis.gdal import ( CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType, SpatialReference, ) from django.contrib.gis.gdal.field import ( OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString, OFTTime, ) from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist from django.db import connections, models, router, transaction from django.utils import six from django.utils.encoding import force_text # LayerMapping exceptions. class LayerMapError(Exception): pass class InvalidString(LayerMapError): pass class InvalidDecimal(LayerMapError): pass class InvalidInteger(LayerMapError): pass class MissingForeignKey(LayerMapError): pass class LayerMapping(object): "A class that maps OGR Layers to GeoDjango Models." # Acceptable 'base' types for a multi-geometry type. MULTI_TYPES = {1: OGRGeomType('MultiPoint'), 2: OGRGeomType('MultiLineString'), 3: OGRGeomType('MultiPolygon'), OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'), OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'), OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'), } # Acceptable Django field types and corresponding acceptable OGR # counterparts. FIELD_TYPES = { models.AutoField: OFTInteger, models.BigAutoField: OFTInteger64, models.IntegerField: (OFTInteger, OFTReal, OFTString), models.FloatField: (OFTInteger, OFTReal), models.DateField: OFTDate, models.DateTimeField: OFTDateTime, models.EmailField: OFTString, models.TimeField: OFTTime, models.DecimalField: (OFTInteger, OFTReal), models.CharField: OFTString, models.SlugField: OFTString, models.TextField: OFTString, models.URLField: OFTString, models.BigIntegerField: (OFTInteger, OFTReal, OFTString), models.SmallIntegerField: (OFTInteger, OFTReal, OFTString), models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString), } def __init__(self, model, data, mapping, layer=0, source_srs=None, encoding='utf-8', transaction_mode='commit_on_success', transform=True, unique=None, using=None): """ A LayerMapping object is initialized using the given Model (not an instance), a DataSource (or string path to an OGR-supported data file), and a mapping dictionary. See the module level docstring for more details and keyword argument usage. """ # Getting the DataSource and the associated Layer. if isinstance(data, six.string_types): self.ds = DataSource(data, encoding=encoding) else: self.ds = data self.layer = self.ds[layer] self.using = using if using is not None else router.db_for_write(model) self.spatial_backend = connections[self.using].ops # Setting the mapping & model attributes. self.mapping = mapping self.model = model # Checking the layer -- initialization of the object will fail if # things don't check out before hand. self.check_layer() # Getting the geometry column associated with the model (an # exception will be raised if there is no geometry column). if connections[self.using].features.supports_transform: self.geo_field = self.geometry_field() else: transform = False # Checking the source spatial reference system, and getting # the coordinate transformation object (unless the `transform` # keyword is set to False) if transform: self.source_srs = self.check_srs(source_srs) self.transform = self.coord_transform() else: self.transform = transform # Setting the encoding for OFTString fields, if specified. if encoding: # Making sure the encoding exists, if not a LookupError # exception will be thrown. from codecs import lookup lookup(encoding) self.encoding = encoding else: self.encoding = None if unique: self.check_unique(unique) transaction_mode = 'autocommit' # Has to be set to autocommit. self.unique = unique else: self.unique = None # Setting the transaction decorator with the function in the # transaction modes dictionary. self.transaction_mode = transaction_mode if transaction_mode == 'autocommit': self.transaction_decorator = None elif transaction_mode == 'commit_on_success': self.transaction_decorator = transaction.atomic else: raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode) # #### Checking routines used during initialization #### def check_fid_range(self, fid_range): "This checks the `fid_range` keyword." if fid_range: if isinstance(fid_range, (tuple, list)): return slice(*fid_range) elif isinstance(fid_range, slice): return fid_range else: raise TypeError else: return None def check_layer(self): """ This checks the Layer metadata, and ensures that it is compatible with the mapping information and model. Unlike previous revisions, there is no need to increment through each feature in the Layer. """ # The geometry field of the model is set here. # TODO: Support more than one geometry field / model. However, this # depends on the GDAL Driver in use. self.geom_field = False self.fields = {} # Getting lists of the field names and the field types available in # the OGR Layer. ogr_fields = self.layer.fields ogr_field_types = self.layer.field_types # Function for determining if the OGR mapping field is in the Layer. def check_ogr_fld(ogr_map_fld): try: idx = ogr_fields.index(ogr_map_fld) except ValueError: raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld) return idx # No need to increment through each feature in the model, simply check # the Layer metadata against what was given in the mapping dictionary. for field_name, ogr_name in self.mapping.items(): # Ensuring that a corresponding field exists in the model # for the given field name in the mapping. try: model_field = self.model._meta.get_field(field_name) except FieldDoesNotExist: raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name) # Getting the string name for the Django field class (e.g., 'PointField'). fld_name = model_field.__class__.__name__ if isinstance(model_field, GeometryField): if self.geom_field: raise LayerMapError('LayerMapping does not support more than one GeometryField per model.') # Getting the coordinate dimension of the geometry field. coord_dim = model_field.dim try: if coord_dim == 3: gtype = OGRGeomType(ogr_name + '25D') else: gtype = OGRGeomType(ogr_name) except GDALException: raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name) # Making sure that the OGR Layer's Geometry is compatible. ltype = self.layer.geom_type if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)): raise LayerMapError('Invalid mapping geometry; model has %s%s, ' 'layer geometry type is %s.' % (fld_name, '(dim=3)' if coord_dim == 3 else '', ltype)) # Setting the `geom_field` attribute w/the name of the model field # that is a Geometry. Also setting the coordinate dimension # attribute. self.geom_field = field_name self.coord_dim = coord_dim fields_val = model_field elif isinstance(model_field, models.ForeignKey): if isinstance(ogr_name, dict): # Is every given related model mapping field in the Layer? rel_model = model_field.remote_field.model for rel_name, ogr_field in ogr_name.items(): idx = check_ogr_fld(ogr_field) try: rel_model._meta.get_field(rel_name) except FieldDoesNotExist: raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' % (rel_name, rel_model.__class__.__name__)) fields_val = rel_model else: raise TypeError('ForeignKey mapping must be of dictionary type.') else: # Is the model field type supported by LayerMapping? if model_field.__class__ not in self.FIELD_TYPES: raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name) # Is the OGR field in the Layer? idx = check_ogr_fld(ogr_name) ogr_field = ogr_field_types[idx] # Can the OGR field type be mapped to the Django field type? if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]): raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' % (ogr_field, ogr_field.__name__, fld_name)) fields_val = model_field self.fields[field_name] = fields_val def check_srs(self, source_srs): "Checks the compatibility of the given spatial reference object." if isinstance(source_srs, SpatialReference): sr = source_srs elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()): sr = source_srs.srs elif isinstance(source_srs, (int, six.string_types)): sr = SpatialReference(source_srs) else: # Otherwise just pulling the SpatialReference from the layer sr = self.layer.srs if not sr: raise LayerMapError('No source reference system defined.') else: return sr def check_unique(self, unique): "Checks the `unique` keyword parameter -- may be a sequence or string." if isinstance(unique, (list, tuple)): # List of fields to determine uniqueness with for attr in unique: if attr not in self.mapping: raise ValueError elif isinstance(unique, six.string_types): # Only a single field passed in. if unique not in self.mapping: raise ValueError else: raise TypeError('Unique keyword argument must be set with a tuple, list, or string.') # Keyword argument retrieval routines #### def feature_kwargs(self, feat): """ Given an OGR Feature, this will return a dictionary of keyword arguments for constructing the mapped model. """ # The keyword arguments for model construction. kwargs = {} # Incrementing through each model field and OGR field in the # dictionary mapping. for field_name, ogr_name in self.mapping.items(): model_field = self.fields[field_name] if isinstance(model_field, GeometryField): # Verify OGR geometry. try: val = self.verify_geom(feat.geom, model_field) except GDALException: raise LayerMapError('Could not retrieve geometry from feature.') elif isinstance(model_field, models.base.ModelBase): # The related _model_, not a field was passed in -- indicating # another mapping for the related Model. val = self.verify_fk(feat, model_field, ogr_name) else: # Otherwise, verify OGR Field type. val = self.verify_ogr_field(feat[ogr_name], model_field) # Setting the keyword arguments for the field name with the # value obtained above. kwargs[field_name] = val return kwargs def unique_kwargs(self, kwargs): """ Given the feature keyword arguments (from `feature_kwargs`) this routine will construct and return the uniqueness keyword arguments -- a subset of the feature kwargs. """ if isinstance(self.unique, six.string_types): return {self.unique: kwargs[self.unique]} else: return {fld: kwargs[fld] for fld in self.unique} # #### Verification routines used in constructing model keyword arguments. #### def verify_ogr_field(self, ogr_field, model_field): """ Verifies if the OGR Field contents are acceptable to the Django model field. If they are, the verified value is returned, otherwise the proper exception is raised. """ if (isinstance(ogr_field, OFTString) and isinstance(model_field, (models.CharField, models.TextField))): if self.encoding: # The encoding for OGR data sources may be specified here # (e.g., 'cp437' for Census Bureau boundary files). val = force_text(ogr_field.value, self.encoding) else: val = ogr_field.value if model_field.max_length and len(val) > model_field.max_length: raise InvalidString('%s model field maximum string length is %s, given %s characters.' % (model_field.name, model_field.max_length, len(val))) elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField): try: # Creating an instance of the Decimal value to use. d = Decimal(str(ogr_field.value)) except DecimalInvalidOperation: raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value) # Getting the decimal value as a tuple. dtup = d.as_tuple() digits = dtup[1] d_idx = dtup[2] # index where the decimal is # Maximum amount of precision, or digits to the left of the decimal. max_prec = model_field.max_digits - model_field.decimal_places # Getting the digits to the left of the decimal place for the # given decimal. if d_idx < 0: n_prec = len(digits[:d_idx]) else: n_prec = len(digits) + d_idx # If we have more than the maximum digits allowed, then throw an # InvalidDecimal exception. if n_prec > max_prec: raise InvalidDecimal( 'A DecimalField with max_digits %d, decimal_places %d must ' 'round to an absolute value less than 10^%d.' % (model_field.max_digits, model_field.decimal_places, max_prec) ) val = d elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField): # Attempt to convert any OFTReal and OFTString value to an OFTInteger. try: val = int(ogr_field.value) except ValueError: raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value) else: val = ogr_field.value return val def verify_fk(self, feat, rel_model, rel_mapping): """ Given an OGR Feature, the related model and its dictionary mapping, this routine will retrieve the related model for the ForeignKey mapping. """ # TODO: It is expensive to retrieve a model for every record -- # explore if an efficient mechanism exists for caching related # ForeignKey models. # Constructing and verifying the related model keyword arguments. fk_kwargs = {} for field_name, ogr_name in rel_mapping.items(): fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name)) # Attempting to retrieve and return the related model. try: return rel_model.objects.using(self.using).get(**fk_kwargs) except ObjectDoesNotExist: raise MissingForeignKey( 'No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs) ) def verify_geom(self, geom, model_field): """ Verifies the geometry -- will construct and return a GeometryCollection if necessary (for example if the model field is MultiPolygonField while the mapped shapefile only contains Polygons). """ # Downgrade a 3D geom to a 2D one, if necessary. if self.coord_dim != geom.coord_dim: geom.coord_dim = self.coord_dim if self.make_multi(geom.geom_type, model_field): # Constructing a multi-geometry type to contain the single geometry multi_type = self.MULTI_TYPES[geom.geom_type.num] g = OGRGeometry(multi_type) g.add(geom) else: g = geom # Transforming the geometry with our Coordinate Transformation object, # but only if the class variable `transform` is set w/a CoordTransform # object. if self.transform: g.transform(self.transform) # Returning the WKT of the geometry. return g.wkt # #### Other model methods #### def coord_transform(self): "Returns the coordinate transformation object." SpatialRefSys = self.spatial_backend.spatial_ref_sys() try: # Getting the target spatial reference system target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs # Creating the CoordTransform object return CoordTransform(self.source_srs, target_srs) except Exception as msg: new_msg = 'Could not translate between the data source and model geometry: %s' % msg six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2]) def geometry_field(self): "Returns the GeometryField instance associated with the geographic column." # Use `get_field()` on the model's options so that we # get the correct field instance if there's model inheritance. opts = self.model._meta return opts.get_field(self.geom_field) def make_multi(self, geom_type, model_field): """ Given the OGRGeomType for a geometry and its associated GeometryField, determine whether the geometry should be turned into a GeometryCollection. """ return (geom_type.num in self.MULTI_TYPES and model_field.__class__.__name__ == 'Multi%s' % geom_type.django) def save(self, verbose=False, fid_range=False, step=False, progress=False, silent=False, stream=sys.stdout, strict=False): """ Saves the contents from the OGR DataSource Layer into the database according to the mapping dictionary given at initialization. Keyword Parameters: verbose: If set, information will be printed subsequent to each model save executed on the database. fid_range: May be set with a slice or tuple of (begin, end) feature ID's to map from the data source. In other words, this keyword enables the user to selectively import a subset range of features in the geographic data source. step: If set with an integer, transactions will occur at every step interval. For example, if step=1000, a commit would occur after the 1,000th feature, the 2,000th feature etc. progress: When this keyword is set, status information will be printed giving the number of features processed and successfully saved. By default, progress information will pe printed every 1000 features processed, however, this default may be overridden by setting this keyword with an integer for the desired interval. stream: Status information will be written to this file handle. Defaults to using `sys.stdout`, but any object with a `write` method is supported. silent: By default, non-fatal error notifications are printed to stdout, but this keyword may be set to disable these notifications. strict: Execution of the model mapping will cease upon the first error encountered. The default behavior is to attempt to continue. """ # Getting the default Feature ID range. default_range = self.check_fid_range(fid_range) # Setting the progress interval, if requested. if progress: if progress is True or not isinstance(progress, int): progress_interval = 1000 else: progress_interval = progress def _save(feat_range=default_range, num_feat=0, num_saved=0): if feat_range: layer_iter = self.layer[feat_range] else: layer_iter = self.layer for feat in layer_iter: num_feat += 1 # Getting the keyword arguments try: kwargs = self.feature_kwargs(feat) except LayerMapError as msg: # Something borked the validation if strict: raise elif not silent: stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg)) else: # Constructing the model using the keyword args is_update = False if self.unique: # If we want unique models on a particular field, handle the # geometry appropriately. try: # Getting the keyword arguments and retrieving # the unique model. u_kwargs = self.unique_kwargs(kwargs) m = self.model.objects.using(self.using).get(**u_kwargs) is_update = True # Getting the geometry (in OGR form), creating # one from the kwargs WKT, adding in additional # geometries, and update the attribute with the # just-updated geometry WKT. geom = getattr(m, self.geom_field).ogr new = OGRGeometry(kwargs[self.geom_field]) for g in new: geom.add(g) setattr(m, self.geom_field, geom.wkt) except ObjectDoesNotExist: # No unique model exists yet, create. m = self.model(**kwargs) else: m = self.model(**kwargs) try: # Attempting to save. m.save(using=self.using) num_saved += 1 if verbose: stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m)) except Exception as msg: if strict: # Bailing out if the `strict` keyword is set. if not silent: stream.write( 'Failed to save the feature (id: %s) into the ' 'model with the keyword arguments:\n' % feat.fid ) stream.write('%s\n' % kwargs) raise elif not silent: stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg)) # Printing progress information, if requested. if progress and num_feat % progress_interval == 0: stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved)) # Only used for status output purposes -- incremental saving uses the # values returned here. return num_saved, num_feat if self.transaction_decorator is not None: _save = self.transaction_decorator(_save) nfeat = self.layer.num_feat if step and isinstance(step, int) and step < nfeat: # Incremental saving is requested at the given interval (step) if default_range: raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.') beg, num_feat, num_saved = (0, 0, 0) indices = range(step, nfeat, step) n_i = len(indices) for i, end in enumerate(indices): # Constructing the slice to use for this step; the last slice is # special (e.g, [100:] instead of [90:100]). if i + 1 == n_i: step_slice = slice(beg, None) else: step_slice = slice(beg, end) try: num_feat, num_saved = _save(step_slice, num_feat, num_saved) beg = end except Exception: # Deliberately catch everything stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice)) raise else: # Otherwise, just calling the previously defined _save() function. _save()
bsd-3-clause