gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# -*- coding: utf-8 -*- from datetime import date class CVList(): def __init__(self, **kwargs): self.items = {} self.order = {} for (k, v) in kwargs.items(): self.add_item(k, v) def add_item(self, item_name, item_value): v = self.__prepare_value(item_value) if v: l = self.items.get(item_name) if l: l.extend(v) else: self.order[item_name] = len(self.items) self.items[item_name] = v def insert_item(self, item_name, item_value, position): self.add_item(item_name, item_value) self.replace_item(item_name, position) def replace_item(self, item_name, position): for (k, v) in [(k, v) for (k, v) in self.order.items() if v >= position]: self.order[k] = position if k == item_name else v + 1 @staticmethod def __prepare_value(value): if not value: return None if isinstance(value, list): return value else: return [value] def __getitem__(self, key): res = None for k in self.items: if key.lower() == k.lower(): return k, self.items[k] if key.lower() in k.lower(): res = k, self.items[k] if res: return res raise KeyError(key) def __iter__(self): return CVListIter(self) def __str__(self): itms = [] for (k, v) in self: itms.append('%s\n' % k.upper()) for i in v: itms.append(str(i)) return '\n'.join(itms) def get(self, key): try: return self.__getitem__(key) except KeyError: return None class CVListIter(): def __init__(self, lo): self.order = sorted(lo.order.items(), key=lambda x: x[1]) self.lo = lo self.idx = -1 self.N = len(self.order) def __inter__(self): return self def __next__(self): self.idx += 1 if self.idx < self.N: k = self.order[self.idx][0] return k, self.lo.items[k] else: raise StopIteration def next(self): return self.__next__() class CVPerson(CVList): def __init__(self, aim, name, lname=None, pname=None, **kwargs): self.name = name self.lname = lname self.pname = pname self._aim = aim super().__init__(**kwargs) @property def aim(self): return self._aim @aim.setter def aim(self, value): if isinstance(value, str): self._aim = [value] else: self._aim = list(value) def add_section(self, name, section): self.add_item(name, section) @property def full_name(self): fname = [self.name] if self.pname: fname.append(self.pname) if self.lname: fname.append(self.lname) return ' '.join(fname) def __str__(self): frmt = '\n{0}\n{1}\n{2}\n' return frmt.format(self.full_name, self.aim, self.__items_str()) def __items_str(self): itms = [] for (k, v) in self: itms.append('\n%s' % k) for i in v: if isinstance(i, tuple): itms.append('* {0}|{1}'.format(*i)) else: itms.append(str(i)) return '\n'.join(itms) class CVWorkExperience(CVList): def __init__(self, company, position, start, end=None, resp=None, tech=None, **kwargs): if not (company and position and start): txt = 'Wrong work parameters: {0}, {1}'.format(company, position) raise CVCVWorkExperienceException(txt) self._end = None self._start = None self.company = company self.position = position self.start = start self.end = end super().__init__(**kwargs) if resp: self.insert_item( 'Responsibilities and Key Accomplishments', resp, 0) if tech: self.insert_item('Technologies', tech, 1) @property def start(self): return self._start @property def end(self): return self._end @start.setter def start(self, value): v = value if isinstance(value, PartialDate) else PartialDate(value) if self._end and v > self._end: txt = 'Start date can not be later then end date' raise CVCVWorkExperienceException(txt) self._start = v @end.setter def end(self, value): if value: v = value if isinstance(value, PartialDate) else PartialDate(value) if v < self._start: txt = 'Start date can not be later then end date' raise CVCVWorkExperienceException(txt) self._end = v else: self._end = None def __str__(self): frm = '{0}\n{1}\n{2}\n{3}\n{4}' return frm.format(self.start, self.end, self.company, self.position, self.__items_str()) def __items_str(self): itms = [] for (k, v) in self: itms.append(k) itms.extend(['* %s' % i for i in v]) return '\n'.join(itms) class CVEducation(): def __init__(self, start, end, name, dep=None, grad=None): self._start = None self._end = None self.start = start self.end = end self.name = name if name and name.lower() != 'none' else None self.dep = dep if dep and dep.lower() != 'none' else None self.grad = grad if grad and grad.lower() != 'none' else None @property def start(self): return self._start @property def end(self): return self._end @start.setter def start(self, value): v = value if isinstance(value, PartialDate) else PartialDate(value) if self._end and v > self._end: txt = 'Start date can not be later then end date' raise CVCVWorkExperienceException(txt) self._start = v @end.setter def end(self, value): if value: if isinstance(value, str) and value.lower() == 'none': self.__end = None else: v = value if isinstance(value, PartialDate) else PartialDate(value) if v < self._start: txt = 'Start date can not be later then end date' raise CVCVWorkExperienceException(txt) self._end = v else: self._end = None def __str__(self): res = (str(self.start), str(self.end) if self.end else 'None', self.name if self.name else 'None', self.dep if self.dep else 'None', self.grad if self.grad else 'None') return '{0}\n{1}\n{2}\n{3}\n{4}'.format(*res) class PartialDate(date): def __new__(cls, year_or_date, month=None, day=None): if isinstance(year_or_date, date): res = super().__new__(cls, year_or_date.year, year_or_date.month, year_or_date.day) elif isinstance(year_or_date, str): ymd = [int(itm) for itm in year_or_date.split()] ymd += [1] * (3 - len(ymd)) res = super().__new__(cls, *ymd) else: __year = int(year_or_date) if __year < 1000: __year += 2000 if day and month: res = super().__new__(cls, __year, int(month), int(day)) elif month: res = super().__new__(cls, __year, int(month), 1) else: res = super().__new__(cls, __year, 1, 1) return res def __init__(self, year_or_date, month=None, day=None): if isinstance(year_or_date, str): ymd = year_or_date.split() self.period = ['y', 'm', 'd'][len(ymd) - 1] else: isd = isinstance(year_or_date, date) or (day and month) self.period = 'd' if isd else 'm' if month else 'y' def __str__(self): if self.period == 'd': return self.strftime('%Y %m %d') else: if self.period == 'm': m = date(self.year, self.month, 1) return m.strftime('%Y %m') else: return str(self.year) def strf(self): if self.period == 'd': return self.strftime('%x') else: if self.period == 'm': m = date(1900, self.month, 1).strftime('%B') return '%s %s' % (m, self.year) else: return str(self.year) class CVException(Exception): pass class CVCVWorkExperienceException(CVException): pass class CVPersonException(CVException): pass
import numpy as np from numcodecs import (BZ2, AsType, Blosc, Categorize, Delta, FixedScaleOffset, PackBits, Quantize, Zlib) from numpy.testing import assert_array_almost_equal, assert_array_equal from zarr.creation import array compressors = [ None, Zlib(), BZ2(), Blosc(), ] try: from numcodecs import LZMA except ImportError: # pragma: no cover LZMA = None else: compressors.append(LZMA()) def test_array_with_delta_filter(): # setup astype = 'u1' dtype = 'i8' filters = [Delta(astype=astype, dtype=dtype)] data = np.arange(100, dtype=dtype) for compressor in compressors: a = array(data, chunks=10, compressor=compressor, filters=filters) # check round-trip assert_array_equal(data, a[:]) # check chunks for i in range(10): cdata = a.store[str(i)] if compressor: chunk = compressor.decode(cdata) else: chunk = cdata actual = np.frombuffer(chunk, dtype=astype) expect = np.array([i * 10] + ([1] * 9), dtype=astype) assert_array_equal(expect, actual) def test_array_with_astype_filter(): # setup encode_dtype = 'i1' decode_dtype = 'i8' filters = [AsType(encode_dtype=encode_dtype, decode_dtype=decode_dtype)] chunks = 10 chunk_size = 10 shape = chunks * chunk_size data = np.arange(shape, dtype=decode_dtype) for compressor in compressors: a = array(data, chunks=chunks, compressor=compressor, filters=filters) # check round-trip assert data.dtype == a.dtype assert_array_equal(data, a[:]) # check chunks for i in range(chunks): cdata = a.store[str(i)] if compressor: chunk = compressor.decode(cdata) else: chunk = cdata actual = np.frombuffer(chunk, dtype=encode_dtype) expect = data.astype(encode_dtype)[i*chunk_size:(i+1)*chunk_size] assert_array_equal(expect, actual) def test_array_with_scaleoffset_filter(): # setup astype = 'u1' dtype = 'f8' flt = FixedScaleOffset(scale=10, offset=1000, astype=astype, dtype=dtype) filters = [flt] data = np.linspace(1000, 1001, 34, dtype='f8') for compressor in compressors: a = array(data, chunks=5, compressor=compressor, filters=filters) # check round-trip assert_array_almost_equal(data, a[:], decimal=1) # check chunks for i in range(6): cdata = a.store[str(i)] if compressor: chunk = compressor.decode(cdata) else: chunk = cdata actual = np.frombuffer(chunk, dtype=astype) expect = flt.encode(data[i*5:(i*5)+5]) assert_array_equal(expect, actual) def test_array_with_quantize_filter(): # setup dtype = 'f8' digits = 3 flt = Quantize(digits=digits, dtype=dtype) filters = [flt] data = np.linspace(0, 1, 34, dtype=dtype) for compressor in compressors: a = array(data, chunks=5, compressor=compressor, filters=filters) # check round-trip assert_array_almost_equal(data, a[:], decimal=digits) # check chunks for i in range(6): cdata = a.store[str(i)] if compressor: chunk = compressor.decode(cdata) else: chunk = cdata actual = np.frombuffer(chunk, dtype=dtype) expect = flt.encode(data[i*5:(i*5)+5]) assert_array_equal(expect, actual) def test_array_with_packbits_filter(): # setup flt = PackBits() filters = [flt] data = np.random.randint(0, 2, size=100, dtype=bool) for compressor in compressors: a = array(data, chunks=5, compressor=compressor, filters=filters) # check round-trip assert_array_equal(data, a[:]) # check chunks for i in range(20): cdata = a.store[str(i)] if compressor: chunk = compressor.decode(cdata) else: chunk = cdata actual = np.frombuffer(chunk, dtype='u1') expect = flt.encode(data[i*5:(i*5)+5]) assert_array_equal(expect, actual) def test_array_with_categorize_filter(): # setup data = np.random.choice(['foo', 'bar', 'baz'], size=100) flt = Categorize(dtype=data.dtype, labels=['foo', 'bar', 'baz']) filters = [flt] for compressor in compressors: a = array(data, chunks=5, compressor=compressor, filters=filters) # check round-trip assert_array_equal(data, a[:]) # check chunks for i in range(20): cdata = a.store[str(i)] if a.compressor: chunk = a.compressor.decode(cdata) else: chunk = cdata actual = np.frombuffer(chunk, dtype='u1') expect = flt.encode(data[i*5:(i*5)+5]) assert_array_equal(expect, actual) def test_compressor_as_filter(): for compressor in compressors: if compressor is None: # skip continue # setup filters dtype = 'i8' filters = [ Delta(dtype=dtype), compressor ] # setup data and arrays data = np.arange(10000, dtype=dtype) a1 = array(data, chunks=1000, compressor=None, filters=filters) a2 = array(data, chunks=1000, compressor=compressor, filters=filters[:1]) # check storage for i in range(10): x = bytes(a1.store[str(i)]) y = bytes(a2.store[str(i)]) assert x == y # check data assert_array_equal(data, a1[:]) assert_array_equal(a1[:], a2[:])
import numpy as np import pandas as pd import json class TrafficServer: """Emulates the data plane of a communication system TrafficServer can provide certain service for the traffic generated by a TrafficEmulator. It is essentially the data-plane of a communication system, and decide service under the instruction of the control-plane, i.e. Controller. To use it, first feed it with some traffic and wait it to update its internal state, then make observation from it and propagate it to the controller, who will issue specific control commands. Using these control commands, one can get services for the traffic fed, and also receive a operational cost at the same time. Note all functions are not necessarily fully controlled by the controller, and the TrafficServer may have some sovereign over miscellaneous functions and only leave the most important parts for the Controller to decide. To this regard, the TrafficServer can have some hidden state that is not observable by the controller. A important implementation feature in our current project is to give TrafficServer the ability to sleep and queue requests. These operational states are reflected in both the service generated and the cost emitted. """ def __init__(self, verbose=0, cost=None): self.epoch = 0 self.q = {} self.verbose = verbose # verbosity level self.last_traffic_ob = 0 self.last_q_ob = 0 self.last_sleep_flag = True if cost is None: self.OP_COST, self.SW_COST = -1, -0.1 else: self.OP_COST, self.SW_COST = cost if verbose>0: print " "*4 + "TrafficServer.__init__():", print "new TrafficServer with params:" print " "*8 + "Op cost: {}, Sw cost: {}".format(self.OP_COST, self.SW_COST) # Public Methods def observe(self, traffic_df): """Compile observation traffic_observation = self.observe_traffic_(traffic_df_cp) q_observation = self.observe_q_(self.q) Observe from traffic and queue, then compile a summary as information. """ if traffic_df is None: raise ValueError("Please feed traffic.") # Compile observation last_traffic_ob = self.last_traffic_ob last_q_ob = self.last_q_ob new_q_ob = self.observe_q_(self.q) self.last_traffic_ob = self.observe_traffic_(traffic_df) self.last_q_ob = new_q_ob # Enqueue traffic traffic_df_cp = traffic_df.copy() traffic_df_cp['arriveTime_epoch'] = self.epoch if len(traffic_df_cp) > 0: self.q[self.epoch] = traffic_df_cp return last_q_ob, last_traffic_ob, new_q_ob def get_service_and_cost(self, control): """Generate service based on the control commands """ sleep_flag, control_req = control # extract control commands\ cost = 0 if sleep_flag: service = pd.DataFrame(columns=['sessionID', 'service_per_request_domain']) else: service = self.serve_requests_(control_req) cost += self.OP_COST cost += self.SW_COST if self.last_sleep_flag!=sleep_flag else 0 self.last_sleep_flag = sleep_flag self.epoch += 1 return service, cost def reset(self): self.epoch = 0 self.q = {} self.last_traffic_ob = 0 self.last_q_ob = 0 self.last_sleep_flag = True # Private Methods @staticmethod def observe_traffic_(traffic_df): """Summary information from current traffic Currently the total number of requests and bytes (tuple) """ num_req = 0 num_bytes = 0 for idx in traffic_df.index: bytesSent_req_domain = json.loads(traffic_df.get_value(idx, 'bytesSent_per_request_domain')) num_req += sum([len(bytesSent_req_domain[domain]) for domain in bytesSent_req_domain]) num_bytes += sum([bytesSent_req_domain[domain][reqID] for domain in bytesSent_req_domain for reqID in bytesSent_req_domain[domain]]) return num_req @staticmethod def observe_q_(q): """Summary queue state for observation Currently the summary is the # of requests left in to queue :return: queue observation """ q_len = 0 for epoch, df in q.iteritems(): for idx in df.index: bytesSent_req_domain = json.loads(df.get_value(idx, 'bytesSent_per_request_domain')) q_len += sum([len(bytesSent_req_domain[domain]) for domain in bytesSent_req_domain]) return q_len def dequeue_all_traffic_(self): q = self.q self.q = {} return q def serve_requests_(self, control_req): """Serve queued requests. :param control_req: :return: """ service_df = pd.DataFrame(columns=['sessionID', 'service_per_request_domain']) num_req_serve = 0 num_req_queue = 0 num_drops = 0 # iterate through q, append to service_df, merge duplicated session ID, update q drop_df_keys = [] for epoch_key, df in self.q.iteritems(): drop_indices = [] for idx in df.index: sessionID = int(df.get_value(idx, 'sessionID')) bytesSent_req_domain = json.loads(df.get_value(idx, 'bytesSent_per_request_domain')) service_req_domain, bytesSent_req_domain_updated, num_req_serve_row, num_req_queue_row = self.serve_row_( control_req=control_req, bytesSent_req_domain=bytesSent_req_domain ) # append to service_df (with dedup) service_df = self.append_service_row(service_df=service_df, sessionID=sessionID, service_req_domain=service_req_domain) # update queue if num_req_queue_row == 0: drop_indices.append(idx) else: df.set_value(idx, 'bytesSent_req_domain', json.dumps(bytesSent_req_domain_updated)) # update counter num_req_serve += num_req_serve_row num_req_queue += num_req_queue_row df.drop(drop_indices, inplace=True) num_drops += len(drop_indices) drop_df_keys.append(epoch_key) if len(df)==0 else None for epoch_key in drop_df_keys: del self.q[epoch_key] # Verbose message if False: print " "*8 + "TrafficServer.serve_requests_():", if control_req == 'serve_all': print "Serving all {} requests in queue.".format(num_req_serve) elif control_req == 'queue_all': print "Queuing all {} requests in queue.".format(num_req_queue) elif control_req == 'random_serve_and_queue': print "Serving {} request and queued {} requests.".format( num_req_serve, num_req_queue) else: print "Control command not understood, return empty service_df." if False: print " "*8 + "TrafficServer.serve_requests_():", print "Dropped {} q entries to deduplicate".format(num_drops) return service_df @staticmethod def serve_row_(control_req, bytesSent_req_domain): service_req_domain = {} bytesSent_req_domain_updated = {} num_req_serve = 0 num_req_queue = 0 for domain in bytesSent_req_domain: # for each domain in the entry for reqID in bytesSent_req_domain[domain]: # for each request under the domain if domain not in service_req_domain: service_req_domain[domain] = {} r = np.random.rand() if control_req == 'serve_all' or (control_req == 'random_serve_and_queue' and r<0.5): service_req_domain[domain][reqID] = 'serve' num_req_serve += 1 else: service_req_domain[domain][reqID] = 'queue' num_req_queue += 1 # for queued requests, copy to byteSent dict as update if domain not in bytesSent_req_domain_updated: bytesSent_req_domain_updated[domain] = {} bytesSent_req_domain_updated[domain][reqID] = bytesSent_req_domain[domain][reqID] return service_req_domain, bytesSent_req_domain_updated, num_req_serve, num_req_queue @staticmethod def append_service_row(service_df, sessionID, service_req_domain): flags = (int(sessionID) == service_df['sessionID']) if len(flags.nonzero()[0]) == 0: # no duplication service_df = service_df.append( {'sessionID': sessionID, 'service_per_request_domain': json.dumps(service_req_domain)}, ignore_index=True) elif len(flags.nonzero()[0]) == 1: # merge with existing row idx_old = flags.nonzero()[0][0] service_req_domain_old = json.loads(service_df.get_value(idx_old, 'service_per_request_domain')) for domain in service_req_domain: for reqID in service_req_domain[domain]: if domain not in service_req_domain_old: service_req_domain_old[domain] = {} service_req_domain_old[domain][reqID] = service_req_domain[domain][reqID] service_df.set_value(idx_old, 'service_per_request_domain', json.dumps(service_req_domain_old)) else: raise ValueError("More than one existing entry found in service_df!") return service_df
# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test class for iSCSI deploy mechanism.""" import os import tempfile from ironic_lib import disk_utils from ironic_lib import utils as ironic_utils import mock from oslo_config import cfg from oslo_utils import fileutils from ironic.common import dhcp_factory from ironic.common import driver_factory from ironic.common import exception from ironic.common import keystone from ironic.common import pxe_utils from ironic.common import states from ironic.common import utils from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.drivers.modules import agent_base_vendor from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils from ironic.drivers.modules import iscsi_deploy from ironic.drivers.modules import pxe from ironic.tests.unit.conductor import mgr_utils from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils from ironic.tests.unit.objects import utils as obj_utils CONF = cfg.CONF INST_INFO_DICT = db_utils.get_test_pxe_instance_info() DRV_INFO_DICT = db_utils.get_test_pxe_driver_info() DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info() class IscsiDeployPrivateMethodsTestCase(db_base.DbTestCase): def setUp(self): super(IscsiDeployPrivateMethodsTestCase, self).setUp() n = { 'driver': 'fake_pxe', 'instance_info': INST_INFO_DICT, 'driver_info': DRV_INFO_DICT, 'driver_internal_info': DRV_INTERNAL_INFO_DICT, } mgr_utils.mock_the_extension_manager(driver="fake_pxe") self.node = obj_utils.create_test_node(self.context, **n) def test__save_disk_layout(self): info = dict(INST_INFO_DICT) info['ephemeral_gb'] = 10 info['swap_mb'] = 0 info['root_gb'] = 10 info['preserve_ephemeral'] = False self.node.instance_info = info iscsi_deploy._save_disk_layout(self.node, info) self.node.refresh() for param in ('ephemeral_gb', 'swap_mb', 'root_gb'): self.assertEqual( info[param], self.node.driver_internal_info['instance'][param] ) def test__get_image_dir_path(self): self.assertEqual(os.path.join(CONF.pxe.images_path, self.node.uuid), iscsi_deploy._get_image_dir_path(self.node.uuid)) def test__get_image_file_path(self): self.assertEqual(os.path.join(CONF.pxe.images_path, self.node.uuid, 'disk'), iscsi_deploy._get_image_file_path(self.node.uuid)) class IscsiDeployMethodsTestCase(db_base.DbTestCase): def setUp(self): super(IscsiDeployMethodsTestCase, self).setUp() instance_info = dict(INST_INFO_DICT) instance_info['deploy_key'] = 'fake-56789' n = { 'driver': 'fake_pxe', 'instance_info': instance_info, 'driver_info': DRV_INFO_DICT, 'driver_internal_info': DRV_INTERNAL_INFO_DICT, } mgr_utils.mock_the_extension_manager(driver="fake_pxe") self.node = obj_utils.create_test_node(self.context, **n) @mock.patch.object(disk_utils, 'get_image_mb', autospec=True) def test_check_image_size(self, get_image_mb_mock): get_image_mb_mock.return_value = 1000 with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.instance_info['root_gb'] = 1 iscsi_deploy.check_image_size(task) get_image_mb_mock.assert_called_once_with( iscsi_deploy._get_image_file_path(task.node.uuid)) @mock.patch.object(disk_utils, 'get_image_mb', autospec=True) def test_check_image_size_fails(self, get_image_mb_mock): get_image_mb_mock.return_value = 1025 with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.instance_info['root_gb'] = 1 self.assertRaises(exception.InstanceDeployFailure, iscsi_deploy.check_image_size, task) get_image_mb_mock.assert_called_once_with( iscsi_deploy._get_image_file_path(task.node.uuid)) @mock.patch.object(deploy_utils, 'fetch_images', autospec=True) def test_cache_instance_images_master_path(self, mock_fetch_image): temp_dir = tempfile.mkdtemp() self.config(images_path=temp_dir, group='pxe') self.config(instance_master_path=os.path.join(temp_dir, 'instance_master_path'), group='pxe') fileutils.ensure_tree(CONF.pxe.instance_master_path) (uuid, image_path) = iscsi_deploy.cache_instance_image(None, self.node) mock_fetch_image.assert_called_once_with(None, mock.ANY, [(uuid, image_path)], True) self.assertEqual('glance://image_uuid', uuid) self.assertEqual(os.path.join(temp_dir, self.node.uuid, 'disk'), image_path) @mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True) @mock.patch.object(utils, 'rmtree_without_raise', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) def test_destroy_images(self, mock_cache, mock_rmtree, mock_unlink): self.config(images_path='/path', group='pxe') iscsi_deploy.destroy_images('uuid') mock_cache.return_value.clean_up.assert_called_once_with() mock_unlink.assert_called_once_with('/path/uuid/disk') mock_rmtree.assert_called_once_with('/path/uuid') @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) def test_continue_deploy_fail(self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} deploy_mock.side_effect = exception.InstanceDeployFailure( "test deploy error") self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: params = iscsi_deploy.get_deploy_info(task.node, **kwargs) self.assertRaises(exception.InstanceDeployFailure, iscsi_deploy.continue_deploy, task, **kwargs) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertIsNotNone(task.node.last_error) deploy_mock.assert_called_once_with(**params) power_mock.assert_called_once_with(task, states.POWER_OFF) mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertFalse(mock_disk_layout.called) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) def test_continue_deploy_fail_no_root_uuid_or_disk_id( self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} deploy_mock.return_value = {} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: params = iscsi_deploy.get_deploy_info(task.node, **kwargs) self.assertRaises(exception.InstanceDeployFailure, iscsi_deploy.continue_deploy, task, **kwargs) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertIsNotNone(task.node.last_error) deploy_mock.assert_called_once_with(**params) power_mock.assert_called_once_with(task, states.POWER_OFF) mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertFalse(mock_disk_layout.called) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) def test_continue_deploy_fail_empty_root_uuid( self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} deploy_mock.return_value = {'root uuid': ''} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: params = iscsi_deploy.get_deploy_info(task.node, **kwargs) self.assertRaises(exception.InstanceDeployFailure, iscsi_deploy.continue_deploy, task, **kwargs) self.assertEqual(states.DEPLOYFAIL, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertIsNotNone(task.node.last_error) deploy_mock.assert_called_once_with(**params) power_mock.assert_called_once_with(task, states.POWER_OFF) mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertFalse(mock_disk_layout.called) @mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True) @mock.patch.object(iscsi_deploy, 'LOG', autospec=True) @mock.patch.object(iscsi_deploy, 'get_deploy_info', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True) def test_continue_deploy(self, deploy_mock, power_mock, mock_image_cache, mock_deploy_info, mock_log, mock_disk_layout): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() mock_deploy_info.return_value = { 'address': '123456', 'boot_option': 'netboot', 'configdrive': "I've got the power", 'ephemeral_format': None, 'ephemeral_mb': 0, 'image_path': (u'/var/lib/ironic/images/1be26c0b-03f2-4d2e-ae87-' u'c02d7f33c123/disk'), 'iqn': 'aaa-bbb', 'lun': '1', 'node_uuid': u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123', 'port': '3260', 'preserve_ephemeral': True, 'root_mb': 102400, 'swap_mb': 0, } log_params = mock_deploy_info.return_value.copy() # Make sure we don't log the full content of the configdrive log_params['configdrive'] = '***' expected_dict = { 'node': self.node.uuid, 'params': log_params, } uuid_dict_returned = {'root uuid': '12345678-87654321'} deploy_mock.return_value = uuid_dict_returned with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: mock_log.isEnabledFor.return_value = True retval = iscsi_deploy.continue_deploy(task, **kwargs) mock_log.debug.assert_called_once_with( mock.ANY, expected_dict) self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertIsNone(task.node.last_error) mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertEqual(uuid_dict_returned, retval) mock_disk_layout.assert_called_once_with(task.node, mock.ANY) @mock.patch.object(iscsi_deploy, 'LOG', autospec=True) @mock.patch.object(iscsi_deploy, 'get_deploy_info', autospec=True) @mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(deploy_utils, 'deploy_disk_image', autospec=True) def test_continue_deploy_whole_disk_image( self, deploy_mock, power_mock, mock_image_cache, mock_deploy_info, mock_log): kwargs = {'address': '123456', 'iqn': 'aaa-bbb'} self.node.provision_state = states.DEPLOYWAIT self.node.target_provision_state = states.ACTIVE self.node.save() mock_deploy_info.return_value = { 'address': '123456', 'image_path': (u'/var/lib/ironic/images/1be26c0b-03f2-4d2e-ae87-' u'c02d7f33c123/disk'), 'iqn': 'aaa-bbb', 'lun': '1', 'node_uuid': u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123', 'port': '3260', } log_params = mock_deploy_info.return_value.copy() expected_dict = { 'node': self.node.uuid, 'params': log_params, } uuid_dict_returned = {'disk identifier': '87654321'} deploy_mock.return_value = uuid_dict_returned with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.driver_internal_info['is_whole_disk_image'] = True mock_log.isEnabledFor.return_value = True retval = iscsi_deploy.continue_deploy(task, **kwargs) mock_log.debug.assert_called_once_with( mock.ANY, expected_dict) self.assertEqual(states.DEPLOYWAIT, task.node.provision_state) self.assertEqual(states.ACTIVE, task.node.target_provision_state) self.assertIsNone(task.node.last_error) mock_image_cache.assert_called_once_with() mock_image_cache.return_value.clean_up.assert_called_once_with() self.assertEqual(uuid_dict_returned, retval) def _test_get_deploy_info(self, extra_instance_info=None): if extra_instance_info is None: extra_instance_info = {} instance_info = self.node.instance_info instance_info.update(extra_instance_info) self.node.instance_info = instance_info kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn'} ret_val = iscsi_deploy.get_deploy_info(self.node, **kwargs) self.assertEqual('1.1.1.1', ret_val['address']) self.assertEqual('target-iqn', ret_val['iqn']) return ret_val def test_get_deploy_info_boot_option_default(self): ret_val = self._test_get_deploy_info() self.assertEqual('netboot', ret_val['boot_option']) def test_get_deploy_info_netboot_specified(self): capabilities = {'capabilities': {'boot_option': 'netboot'}} ret_val = self._test_get_deploy_info(extra_instance_info=capabilities) self.assertEqual('netboot', ret_val['boot_option']) def test_get_deploy_info_localboot(self): capabilities = {'capabilities': {'boot_option': 'local'}} ret_val = self._test_get_deploy_info(extra_instance_info=capabilities) self.assertEqual('local', ret_val['boot_option']) def test_get_deploy_info_disk_label(self): capabilities = {'capabilities': {'disk_label': 'msdos'}} ret_val = self._test_get_deploy_info(extra_instance_info=capabilities) self.assertEqual('msdos', ret_val['disk_label']) def test_get_deploy_info_not_specified(self): ret_val = self._test_get_deploy_info() self.assertNotIn('disk_label', ret_val) def test_get_deploy_info_portal_port(self): self.config(portal_port=3266, group='iscsi') ret_val = self._test_get_deploy_info() self.assertEqual(3266, ret_val['port']) @mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True) def test_do_agent_iscsi_deploy_okay(self, continue_deploy_mock): agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient) agent_client_mock.start_iscsi_target.return_value = { 'command_status': 'SUCCESS', 'command_error': None} driver_internal_info = {'agent_url': 'http://1.2.3.4:1234'} self.node.driver_internal_info = driver_internal_info self.node.save() uuid_dict_returned = {'root uuid': 'some-root-uuid'} continue_deploy_mock.return_value = uuid_dict_returned expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: ret_val = iscsi_deploy.do_agent_iscsi_deploy( task, agent_client_mock) agent_client_mock.start_iscsi_target.assert_called_once_with( task.node, expected_iqn, 3260, wipe_disk_metadata=True) continue_deploy_mock.assert_called_once_with( task, iqn=expected_iqn, address='1.2.3.4') self.assertEqual( 'some-root-uuid', task.node.driver_internal_info['root_uuid_or_disk_id']) self.assertEqual(ret_val, uuid_dict_returned) @mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True) def test_do_agent_iscsi_deploy_preserve_ephemeral(self, continue_deploy_mock): """Ensure the disk is not wiped if preserve_ephemeral is True.""" agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient) agent_client_mock.start_iscsi_target.return_value = { 'command_status': 'SUCCESS', 'command_error': None} driver_internal_info = { 'agent_url': 'http://1.2.3.4:1234'} self.node.driver_internal_info = driver_internal_info self.node.save() uuid_dict_returned = {'root uuid': 'some-root-uuid'} continue_deploy_mock.return_value = uuid_dict_returned expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.node.instance_info['preserve_ephemeral'] = True iscsi_deploy.do_agent_iscsi_deploy( task, agent_client_mock) agent_client_mock.start_iscsi_target.assert_called_once_with( task.node, expected_iqn, 3260, wipe_disk_metadata=False) def test_do_agent_iscsi_deploy_start_iscsi_failure(self): agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient) agent_client_mock.start_iscsi_target.return_value = { 'command_status': 'FAILED', 'command_error': 'booom'} self.node.provision_state = states.DEPLOYING self.node.target_provision_state = states.ACTIVE self.node.save() expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: self.assertRaises(exception.InstanceDeployFailure, iscsi_deploy.do_agent_iscsi_deploy, task, agent_client_mock) agent_client_mock.start_iscsi_target.assert_called_once_with( task.node, expected_iqn, 3260, wipe_disk_metadata=True) self.node.refresh() self.assertEqual(states.DEPLOYFAIL, self.node.provision_state) self.assertEqual(states.ACTIVE, self.node.target_provision_state) self.assertIsNotNone(self.node.last_error) @mock.patch.object(keystone, 'get_service_url', autospec=True) def test_validate_good_api_url_from_config_file(self, mock_ks): # not present in the keystone catalog mock_ks.side_effect = exception.KeystoneFailure self.config(group='conductor', api_url='http://foo') with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: iscsi_deploy.validate(task) self.assertFalse(mock_ks.called) @mock.patch.object(keystone, 'get_service_url', autospec=True) def test_validate_good_api_url_from_keystone(self, mock_ks): # present in the keystone catalog mock_ks.return_value = 'http://127.0.0.1:1234' # not present in the config file self.config(group='conductor', api_url=None) with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: iscsi_deploy.validate(task) mock_ks.assert_called_once_with() @mock.patch.object(keystone, 'get_service_url', autospec=True) def test_validate_fail_no_api_url(self, mock_ks): # not present in the keystone catalog mock_ks.side_effect = exception.KeystoneFailure # not present in the config file self.config(group='conductor', api_url=None) with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertRaises(exception.InvalidParameterValue, iscsi_deploy.validate, task) mock_ks.assert_called_once_with() def test_validate_invalid_root_device_hints(self): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: task.node.properties['root_device'] = {'size': 'not-int'} self.assertRaises(exception.InvalidParameterValue, iscsi_deploy.validate, task) class ISCSIDeployTestCase(db_base.DbTestCase): def setUp(self): super(ISCSIDeployTestCase, self).setUp() mgr_utils.mock_the_extension_manager(driver="fake_pxe") self.driver = driver_factory.get_driver("fake_pxe") self.driver.vendor = iscsi_deploy.VendorPassthru() self.node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.node.driver_internal_info['agent_url'] = 'http://1.2.3.4:1234' self.task = mock.MagicMock(spec=task_manager.TaskManager) self.task.shared = False self.task.node = self.node self.task.driver = self.driver self.task.context = self.context dhcp_factory.DHCPFactory._dhcp_provider = None def test_get_properties(self): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: self.assertEqual({}, task.driver.deploy.get_properties()) @mock.patch.object(iscsi_deploy, 'validate', autospec=True) @mock.patch.object(deploy_utils, 'validate_capabilities', autospec=True) @mock.patch.object(pxe.PXEBoot, 'validate', autospec=True) def test_validate(self, pxe_validate_mock, validate_capabilities_mock, validate_mock): with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: task.driver.deploy.validate(task) pxe_validate_mock.assert_called_once_with(task.driver.boot, task) validate_capabilities_mock.assert_called_once_with(task.node) validate_mock.assert_called_once_with(task) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' 'add_provisioning_network', spec_set=True, autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True) def test_prepare_node_active(self, prepare_instance_mock, add_provisioning_net_mock): with task_manager.acquire(self.context, self.node.uuid) as task: task.node.provision_state = states.ACTIVE task.driver.deploy.prepare(task) prepare_instance_mock.assert_called_once_with( task.driver.boot, task) self.assertEqual(0, add_provisioning_net_mock.call_count) @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True) @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' 'add_provisioning_network', spec_set=True, autospec=True) def test_prepare_node_deploying(self, add_provisioning_net_mock, mock_prepare_ramdisk, mock_agent_options): mock_agent_options.return_value = {'c': 'd'} with task_manager.acquire(self.context, self.node.uuid) as task: task.node.provision_state = states.DEPLOYING task.driver.deploy.prepare(task) mock_agent_options.assert_called_once_with(task.node) mock_prepare_ramdisk.assert_called_once_with( task.driver.boot, task, {'c': 'd'}) add_provisioning_net_mock.assert_called_once_with(mock.ANY, task) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True) @mock.patch.object(iscsi_deploy, 'cache_instance_image', autospec=True) def test_deploy(self, mock_cache_instance_image, mock_check_image_size, mock_node_power_action): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: state = task.driver.deploy.deploy(task) self.assertEqual(state, states.DEPLOYWAIT) mock_cache_instance_image.assert_called_once_with( self.context, task.node) mock_check_image_size.assert_called_once_with(task) mock_node_power_action.assert_called_once_with(task, states.REBOOT) @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.' 'unconfigure_tenant_networks', autospec=True) @mock.patch.object(manager_utils, 'node_power_action', autospec=True) def test_tear_down(self, node_power_action_mock, unconfigure_tenant_nets_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: state = task.driver.deploy.tear_down(task) self.assertEqual(state, states.DELETED) node_power_action_mock.assert_called_once_with(task, states.POWER_OFF) unconfigure_tenant_nets_mock.assert_called_once_with(mock.ANY, task) @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider') @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp') @mock.patch.object(pxe.PXEBoot, 'clean_up_instance', autospec=True) @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True) @mock.patch.object(iscsi_deploy, 'destroy_images', autospec=True) def test_clean_up(self, destroy_images_mock, clean_up_ramdisk_mock, clean_up_instance_mock, clean_dhcp_mock, set_dhcp_provider_mock): with task_manager.acquire(self.context, self.node.uuid, shared=False) as task: task.driver.deploy.clean_up(task) destroy_images_mock.assert_called_once_with(task.node.uuid) clean_up_ramdisk_mock.assert_called_once_with( task.driver.boot, task) clean_up_instance_mock.assert_called_once_with( task.driver.boot, task) set_dhcp_provider_mock.assert_called_once_with() clean_dhcp_mock.assert_called_once_with(task) @mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True) def test_prepare_cleaning(self, prepare_inband_cleaning_mock): prepare_inband_cleaning_mock.return_value = states.CLEANWAIT with task_manager.acquire(self.context, self.node.uuid) as task: self.assertEqual( states.CLEANWAIT, task.driver.deploy.prepare_cleaning(task)) prepare_inband_cleaning_mock.assert_called_once_with( task, manage_boot=True) @mock.patch.object(deploy_utils, 'tear_down_inband_cleaning', autospec=True) def test_tear_down_cleaning(self, tear_down_cleaning_mock): with task_manager.acquire(self.context, self.node.uuid) as task: task.driver.deploy.tear_down_cleaning(task) tear_down_cleaning_mock.assert_called_once_with( task, manage_boot=True) @mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps', autospec=True) def test_get_clean_steps(self, mock_get_clean_steps): # Test getting clean steps self.config(group='deploy', erase_devices_priority=10) mock_steps = [{'priority': 10, 'interface': 'deploy', 'step': 'erase_devices'}] self.node.driver_internal_info = {'agent_url': 'foo'} self.node.save() mock_get_clean_steps.return_value = mock_steps with task_manager.acquire(self.context, self.node.uuid) as task: steps = task.driver.deploy.get_clean_steps(task) mock_get_clean_steps.assert_called_once_with( task, interface='deploy', override_priorities={ 'erase_devices': 10}) self.assertEqual(mock_steps, steps) @mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True) def test_execute_clean_step(self, agent_execute_clean_step_mock): with task_manager.acquire(self.context, self.node.uuid) as task: task.driver.deploy.execute_clean_step( task, {'some-step': 'step-info'}) agent_execute_clean_step_mock.assert_called_once_with( task, {'some-step': 'step-info'}) class TestVendorPassthru(db_base.DbTestCase): def setUp(self): super(TestVendorPassthru, self).setUp() mgr_utils.mock_the_extension_manager() self.driver = driver_factory.get_driver("fake") self.driver.vendor = iscsi_deploy.VendorPassthru() self.node = obj_utils.create_test_node( self.context, driver='fake', instance_info=INST_INFO_DICT, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.node.driver_internal_info['agent_url'] = 'http://1.2.3.4:1234' self.task = mock.MagicMock(spec=task_manager.TaskManager) self.task.shared = False self.task.node = self.node self.task.driver = self.driver self.task.context = self.context def test_vendor_routes(self): expected = ['heartbeat'] with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: vendor_routes = task.driver.vendor.vendor_routes self.assertIsInstance(vendor_routes, dict) self.assertEqual(sorted(expected), sorted(list(vendor_routes))) def test_driver_routes(self): expected = ['lookup'] with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: driver_routes = task.driver.vendor.driver_routes self.assertIsInstance(driver_routes, dict) self.assertEqual(sorted(expected), sorted(list(driver_routes))) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True) def test_continue_deploy_netboot(self, do_agent_iscsi_deploy_mock, reboot_and_finish_deploy_mock): uuid_dict_returned = {'root uuid': 'some-root-uuid'} do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned self.driver.vendor.continue_deploy(self.task) do_agent_iscsi_deploy_mock.assert_called_once_with( self.task, self.driver.vendor._client) reboot_and_finish_deploy_mock.assert_called_once_with( mock.ANY, self.task) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', autospec=True) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'configure_local_boot', autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True) def test_continue_deploy_localboot(self, do_agent_iscsi_deploy_mock, configure_local_boot_mock, reboot_and_finish_deploy_mock): self.node.instance_info = { 'capabilities': {'boot_option': 'local'}} self.node.save() uuid_dict_returned = {'root uuid': 'some-root-uuid'} do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned self.driver.vendor.continue_deploy(self.task) do_agent_iscsi_deploy_mock.assert_called_once_with( self.task, self.driver.vendor._client) configure_local_boot_mock.assert_called_once_with( self.task.driver.vendor, self.task, root_uuid='some-root-uuid', efi_system_part_uuid=None) reboot_and_finish_deploy_mock.assert_called_once_with( self.task.driver.vendor, self.task) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_and_finish_deploy', autospec=True) @mock.patch.object(agent_base_vendor.BaseAgentVendor, 'configure_local_boot', autospec=True) @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True) def test_continue_deploy_localboot_uefi(self, do_agent_iscsi_deploy_mock, configure_local_boot_mock, reboot_and_finish_deploy_mock): self.node.instance_info = { 'capabilities': {'boot_option': 'local'}} self.node.save() uuid_dict_returned = {'root uuid': 'some-root-uuid', 'efi system partition uuid': 'efi-part-uuid'} do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned self.driver.vendor.continue_deploy(self.task) do_agent_iscsi_deploy_mock.assert_called_once_with( self.task, self.driver.vendor._client) configure_local_boot_mock.assert_called_once_with( self.task.driver.vendor, self.task, root_uuid='some-root-uuid', efi_system_part_uuid='efi-part-uuid') reboot_and_finish_deploy_mock.assert_called_once_with( self.task.driver.vendor, self.task) # Cleanup of iscsi_deploy with pxe boot interface class CleanUpFullFlowTestCase(db_base.DbTestCase): def setUp(self): super(CleanUpFullFlowTestCase, self).setUp() self.config(image_cache_size=0, group='pxe') # Configure node mgr_utils.mock_the_extension_manager(driver="fake_pxe") instance_info = INST_INFO_DICT instance_info['deploy_key'] = 'fake-56789' self.node = obj_utils.create_test_node( self.context, driver='fake_pxe', instance_info=instance_info, driver_info=DRV_INFO_DICT, driver_internal_info=DRV_INTERNAL_INFO_DICT, ) self.port = obj_utils.create_test_port(self.context, node_id=self.node.id) # Configure temporary directories pxe_temp_dir = tempfile.mkdtemp() self.config(tftp_root=pxe_temp_dir, group='pxe') tftp_master_dir = os.path.join(CONF.pxe.tftp_root, 'tftp_master') self.config(tftp_master_path=tftp_master_dir, group='pxe') os.makedirs(tftp_master_dir) instance_temp_dir = tempfile.mkdtemp() self.config(images_path=instance_temp_dir, group='pxe') instance_master_dir = os.path.join(CONF.pxe.images_path, 'instance_master') self.config(instance_master_path=instance_master_dir, group='pxe') os.makedirs(instance_master_dir) self.pxe_config_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg') os.makedirs(self.pxe_config_dir) # Populate some file names self.master_kernel_path = os.path.join(CONF.pxe.tftp_master_path, 'kernel') self.master_instance_path = os.path.join(CONF.pxe.instance_master_path, 'image_uuid') self.node_tftp_dir = os.path.join(CONF.pxe.tftp_root, self.node.uuid) os.makedirs(self.node_tftp_dir) self.kernel_path = os.path.join(self.node_tftp_dir, 'kernel') self.node_image_dir = iscsi_deploy._get_image_dir_path(self.node.uuid) os.makedirs(self.node_image_dir) self.image_path = iscsi_deploy._get_image_file_path(self.node.uuid) self.config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid) self.mac_path = pxe_utils._get_pxe_mac_path(self.port.address) # Create files self.files = [self.config_path, self.master_kernel_path, self.master_instance_path] for fname in self.files: # NOTE(dtantsur): files with 0 size won't be cleaned up with open(fname, 'w') as fp: fp.write('test') os.link(self.config_path, self.mac_path) os.link(self.master_kernel_path, self.kernel_path) os.link(self.master_instance_path, self.image_path) dhcp_factory.DHCPFactory._dhcp_provider = None @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider') @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp') @mock.patch.object(pxe, '_get_instance_image_info', autospec=True) @mock.patch.object(pxe, '_get_deploy_image_info', autospec=True) def test_clean_up_with_master(self, mock_get_deploy_image_info, mock_get_instance_image_info, clean_dhcp_mock, set_dhcp_provider_mock): image_info = {'kernel': ('kernel_uuid', self.kernel_path)} mock_get_instance_image_info.return_value = image_info mock_get_deploy_image_info.return_value = {} with task_manager.acquire(self.context, self.node.uuid, shared=True) as task: task.driver.deploy.clean_up(task) mock_get_instance_image_info.assert_called_with(task.node, task.context) mock_get_deploy_image_info.assert_called_with(task.node) set_dhcp_provider_mock.assert_called_once_with() clean_dhcp_mock.assert_called_once_with(task) for path in ([self.kernel_path, self.image_path, self.config_path] + self.files): self.assertFalse(os.path.exists(path), '%s is not expected to exist' % path)
############################################################################### ## ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the University of Utah nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### """ File for the window used when opening VisTrails objects from the database QOpenDBWindow QDBConnectionList QDBConnectionListItem QDBObjectList QDBObjectListItem QConnectionDBSetupWindow """ from PyQt4 import QtCore, QtGui from db import VistrailsDBException import db.services.io from core.external_connection import ExtConnectionList, DBConnection from core.db.locator import DBLocator from core.system import default_connections_file from gui.theme import CurrentTheme from gui.utils import show_info, show_warning, show_question, \ NO_BUTTON, YES_BUTTON from core import debug class QOpenDBWindow(QtGui.QDialog): """ QOpenDBWindow is a dialog containing two panels. the left panel shows all the stored database connections and the right paanel shows the vistrails available on the selected database connection. """ _instance = None def __init__(self, parent=None): """ __init__(parent: QWidget) -> QOpenDBWindow Construct the dialog with the two panels """ QtGui.QDialog.__init__(self,parent) self.setWindowTitle("Choose a vistrail") self.save = False mainLayout = QtGui.QVBoxLayout() panelsLayout = QtGui.QGridLayout() self.createActions() self.saveasLayout = QtGui.QHBoxLayout() self.saveasLabel = QtGui.QLabel("Save As:") self.saveasEdt = QtGui.QLineEdit("") self.saveasEdt.setFixedWidth(200) self.saveasEdt.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) self.saveasLayout.addWidget(self.saveasLabel) self.saveasLayout.addWidget(self.saveasEdt) self.saveasLabel.setVisible(False) self.saveasEdt.setVisible(False) self.connectionList = QDBConnectionList(self) self.objectList = QDBObjectList(self) dbLabel = QtGui.QLabel("Databases:") self.vtLabel = QtGui.QLabel("Vistrails: ") panelsLayout.addWidget(dbLabel,0,0,1,1) panelsLayout.setColumnMinimumWidth(1,10) panelsLayout.addWidget(self.vtLabel,0,2,1,2) panelsLayout.addWidget(self.connectionList,1,0,1,1) panelsLayout.addWidget(self.objectList,1,2,1,2) self.addButton = QtGui.QToolButton() self.addButton.setDefaultAction(self.addAct) self.addButton.setAutoRaise(True) self.removeButton = QtGui.QToolButton() self.removeButton.setDefaultAction(self.removeAct) self.removeButton.setAutoRaise(True) self.removeButton.setEnabled(False) panelButtonsLayout = QtGui.QHBoxLayout() panelButtonsLayout.setMargin(0) panelButtonsLayout.setSpacing(0) panelButtonsLayout.addWidget(self.addButton) panelButtonsLayout.addWidget(self.removeButton) panelsLayout.addLayout(panelButtonsLayout,2,0,1,1, QtCore.Qt.AlignLeft) buttonsLayout = QtGui.QHBoxLayout() self.cancelButton = QtGui.QPushButton('Cancel') self.cancelButton.setAutoDefault(False) self.openButton = QtGui.QPushButton('Open') self.openButton.setEnabled(False) self.openButton.setAutoDefault(True) buttonsLayout.addStretch(1) buttonsLayout.addWidget(self.cancelButton) buttonsLayout.addWidget(self.openButton) mainLayout.addLayout(self.saveasLayout) mainLayout.addLayout(panelsLayout) mainLayout.addLayout(buttonsLayout) mainLayout.setAlignment(self.saveasLayout, QtCore.Qt.AlignHCenter) self.setLayout(mainLayout) self.connectSignals() QOpenDBWindow._instance = self def createActions(self): """ createActions() -> None Create actions related to context menu """ self.addAct = QtGui.QAction("+", self) self.addAct.setStatusTip("Create a new connection") self.removeAct = QtGui.QAction("-", self) self.removeAct.setStatusTip("Remove the selected connection from list") def showEvent(self, e): """showEvent(e: QShowEvent) -> None If the use doesn't have any connection set up, we will ask him to create one. """ if self.connectionList.count() == 0: text = "You don't seem to have any connection available. \ Would you like to create one?" res = show_question('Vistrails', text, [NO_BUTTON, YES_BUTTON], NO_BUTTON) if res == YES_BUTTON: self.showConnConfig() else: self.updateDBObjectsList() def connectSignals(self): """ connectSignals() -> None Map signals between GUI components """ self.connect(self.cancelButton, QtCore.SIGNAL('clicked()'), self.reject) self.connect(self.openButton, QtCore.SIGNAL('clicked()'), self.accept) self.connect(self.addAct, QtCore.SIGNAL('triggered()'), self.showConnConfig) self.connect(self.removeAct, QtCore.SIGNAL('triggered()'), self.connectionList.removeConnection) self.connect(self.connectionList, QtCore.SIGNAL('itemSelectionChanged()'), self.updateDBObjectsList) self.connect(self.connectionList, QtCore.SIGNAL('itemSelectionChanged()'), self.updateButtons) self.connect(self.connectionList, QtCore.SIGNAL("reloadConnections"), self.updateDBObjectsList) self.connect(self.objectList, QtCore.SIGNAL('itemSelectionChanged()'), self.updateButtons) self.connect(self.saveasEdt, QtCore.SIGNAL('textChanged(QString)'), self.updateButtons) self.connect(self.objectList, QtCore.SIGNAL('itemDoubleClicked(QListWidgetItem *)'), self.accept) def updateDBObjectsList(self): """ updateDBObjectsList() -> None It reloads the vistrails object list for the selected connection. If nothing is selected, it will clear the list. """ conn = self.connectionList.getCurrentItemId() self.objectList.updateContents(conn) self.updateEditButtons(conn) def updateButtons(self): """updateButtons() -> None It will enable the open button if a vistrail is selected or in case of saving a vistrail, if a connection is selected and the name is valid """ vtlist = self.objectList if not self.save: if len(vtlist.selectedItems()) > 0: self.openButton.setEnabled(True) else: self.openButton.setEnabled(False) else: if (len(self.connectionList.selectedItems()) > 0 and self.saveasEdt.text() != '' and len(vtlist.findItems(self.saveasEdt.text(), QtCore.Qt.MatchFixedString)) == 0): self.openButton.setEnabled(True) else: self.openButton.setEnabled(False) def updateEditButtons(self, id): """updateEditButtons(id: int) -> None It will enable/disable the connections buttons according to the selection """ if id != -1: self.removeButton.setEnabled(True) else: self.removeButton.setEnabled(False) def showConnConfig(self, *args, **keywords): """showConnConfig(*args, **keywords) -> None shows a window to configure the connection. The valid keywords are defined in QConnectionDBSetupWindow.__init__() """ keywords["parent"] = self dialog = QConnectionDBSetupWindow(**keywords) if dialog.exec_() == QtGui.QDialog.Accepted: config = {'id': int(dialog.id), 'name': str(dialog.nameEdt.text()), 'host': str(dialog.hostEdt.text()), 'port': int(dialog.portEdt.value()), 'user': str(dialog.userEdt.text()), 'passwd': str(dialog.passwdEdt.text()), 'db': str(dialog.databaseEdt.text())} id = self.connectionList.setConnectionInfo(**config) self.connectionList.setCurrentId(id) return True else: return False def prepareForOpening(self, obj_type): """prepareForOpening() -> None It will prepare the dialog to be a Open Dialog """ self.setWindowTitle("Choose a %s" % obj_type.capitalize()) if obj_type != 'vistrail': self.vtLabel.text = 'VisTrails %ss' % obj_type.capitalize() else: self.vtLabel.text = 'Vistrails' self.save = False self.objectList.obj_type = obj_type self.objectList.setEnabled(True) self.saveasLabel.setVisible(False) self.saveasEdt.setVisible(False) self.openButton.setEnabled(False) self.openButton.setText("Open") def prepareForSaving(self, obj_type): """prepareForSaving() -> None It will prepare the dialog to be a save as dialog """ self.setWindowTitle("Save %s..." % obj_type.capitalize()) if obj_type != 'vistrail': self.vtLabel.text = 'VisTrails %ss' % obj_type.capitalize() else: self.vtLabel.text = 'Vistrails' self.save = True self.objectList.obj_type = obj_type self.objectList.setEnabled(False) self.saveasLabel.setVisible(True) self.saveasEdt.setVisible(True) self.openButton.setText("Save") self.openButton.setEnabled(False) @staticmethod def getInstance(): """getInstance() -> QOpenDBWindow Returns the current instance of the QOpenDBWindow """ if QOpenDBWindow._instance is None: QOpenDBWindow._instance = QOpenDBWindow() return QOpenDBWindow._instance @staticmethod def getOpenDBObject(obj_type): """getOpenDBObject(obj_type : str) -> (dict,int) Creates a dialog for opening a vistrails object from the database. It will return the selected connection configuration information and the object id. """ if QOpenDBWindow._instance: dlg = QOpenDBWindow._instance else: dlg = QOpenDBWindow() dlg.prepareForOpening(obj_type) if dlg.exec_() == QtGui.QDialog.Accepted: return (dlg.connectionList.getCurrentConnConfig(), dlg.objectList.currentItem().id, dlg.objectList.currentItem().name) else: return({},-1,'') @staticmethod def getSaveDBObject(obj_type): """getSaveDBObject(obj_type : str) -> (dict, str) Creates a dialog for saving a vistrails object to the database. It will return the selected connection configuration information and the object name """ if QOpenDBWindow._instance: dlg = QOpenDBWindow._instance else: dlg = QOpenDBWindow() dlg.prepareForSaving(obj_type) if dlg.exec_() == QtGui.QDialog.Accepted: return (dlg.connectionList.getCurrentConnConfig(), str(dlg.saveasEdt.text()).strip(' \n\t')) else: return({},'') ################################################################################ class QDBConnectionList(QtGui.QListWidget): """ QDBConnection list is a widget to show the available databases """ def __init__(self, parent=None): QtGui.QListWidget.__init__(self,parent) self.__list = ExtConnectionList.getInstance(default_connections_file()) self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.setIconSize(QtCore.QSize(32,32)) self.loadConnections() self.editAct = QtGui.QAction("Edit", self) self.editAct.setStatusTip("Edit the selected connection") self.connect(self.editAct, QtCore.SIGNAL("triggered()"), self.editConnection) def getCurrentItemId(self): """getCurrentItemId() -> int Returns the id of the selected item. If there is no item selected, it will return -1. """ item = None if len(self.selectedItems()) > 0: item = self.selectedItems()[0] if item != None: return int(item.id) else: return -1 def contextMenuEvent(self, e): """contextMenuEvent(e: QContextMenuEvent) -> None Shows a popup menu for the connection """ item = self.currentItem() if item: menu = QtGui.QMenu() menu.addAction(self.editAct) menu.exec_(e.globalPos()) def editConnection(self): """editConnection() -> None Method called to edit a connection. It will get the information from the selected connection and show the dialog so the user can update the fields """ conn_id = self.getCurrentItemId() config = self.getConnectionInfo(conn_id) if config != None: config["create"] = False self.parent().showConnConfig(**config) def updateGUI(self): """updateGUI() -> None Update GUI list to be consistent with the the list of connections """ self.clear() for (id, c) in self.__list.items(): cItem = QDBConnectionListItem(CurrentTheme.DB_ICON, int(id), str(c.name)) self.addItem(cItem) self.emit(QtCore.SIGNAL("reloadConnections")) def loadConnections(self): """loadConnections() -> None Loads the internal connections and updates the GUI """ self.__list.clear() self.__list.load_connections() self.updateGUI() def getConnectionInfo(self, id): """getConnectionInfo(id: int) -> dict Returns info of ExtConnection """ conn = self.__list.get_connection(id) key = str(conn.id) + "." + conn.name + "." + conn.host passwd = DBLocator.keyChain.get_key(key) if conn != None: config = {'id': conn.id, 'name': conn.name, 'host': conn.host, 'port': conn.port, 'user': conn.user, 'passwd': passwd, 'db': conn.database} else: config = None return config def findConnectionInfo(self, host, port, db): """findConnection(host:str, port: int, db: str) -> dict Returns complete info of a connection with the given parameters """ id = self.__list.find_db_connection(host,port,db) if id != -1: return self.getConnectionInfo(id) else: return None def removeConnection(self): """removeConnection() -> None Removes the selected connection """ id = self.getCurrentItemId() self.takeItem(self.currentRow()) self.__list.remove_connection(id) def get_connection(self, id): """get_connection() -> None Returns the selected connection """ return self.__list.get_connection(id) def setConnectionInfo(self, *args, **kwargs): """setConnectionInfo(id: int, name: str, host: str, port:int, user:str, passwd:str, db:str) -> None If the connection exists it will update it, else it will add it """ if kwargs.has_key("id"): id = kwargs["id"] if kwargs.has_key("name"): name = kwargs["name"] if kwargs.has_key("host"): host = kwargs["host"] if kwargs.has_key("port"): port = kwargs["port"] if kwargs.has_key("user"): user = kwargs["user"] if kwargs.has_key("passwd"): passwd = kwargs["passwd"] if kwargs.has_key("db"): db = kwargs["db"] conn = DBConnection(id=id, name=name, host=host, port=port, user=user, passwd='', database=db, dbtype='MySQL') if self.__list.has_connection(id): self.__list.set_connection(id,conn) else: if conn.id == -1: conn.id = self.__list.get_fresh_id() self.__list.add_connection(conn) self.updateGUI() key = str(conn.id) + "." + conn.name + "." + conn.host DBLocator.keyChain.set_key(key,passwd) return conn.id def setCurrentId(self, id): """setCurrentId(id: int) -> None set the connection with id 'id' to be the current selected connection """ conn = self.__list.get_connection(id) for i in self.findItems(conn.name, QtCore.Qt.MatchFixedString): if i.id == id: self.setCurrentItem(i) break self.emit(QtCore.SIGNAL("reloadConnections"), id) def getCurrentConnConfig(self): """getCurrentConnConfig() -> dict Return dictionary of parameters of the current connection to pass to MySQLdb """ conn_id = self.currentItem().id conn = self.__list.get_connection(conn_id) config = self.getConnectionInfo(conn_id) if conn.dbtype == 'MySQL': #removing extra keyword arguments for MySQldb del config['name'] return config def getDBObjectList(self, conn_id, obj_type): """getDBObjectList(conn_id: int, obj_type : str) -> list Returns list of vistrails objects """ conn = self.__list.get_connection(conn_id) config = self.getConnectionInfo(conn_id) if conn.dbtype == 'MySQL': #removing extra keyword arguments for MySQldb config_name = config['name'] del config['name'] config_id = config['id'] del config['id'] vt_list = db.services.io.get_db_object_list(config, obj_type) if conn.dbtype == 'MySQL': config['name'] = config_name config['id'] = config_id return vt_list ################################################################################ class QDBConnectionListItem(QtGui.QListWidgetItem): def __init__(self, icon, id, text, parent=None): """__init__(icon: QIcon, id: int, text: QString, parent: QListWidget) -> QDBConnectionListItem Creates an item with id """ QtGui.QListWidgetItem.__init__(self,icon, text, parent) self.id = id ################################################################################ class QDBObjectList(QtGui.QListWidget): """ QDBObjectList is a widget to show the vistrails available in the selected database """ def __init__(self, parent=None, obj_type='vistrail'): QtGui.QListWidget.__init__(self, parent) self.obj_type = obj_type def updateContents(self, conn_id=-1): """updateContents(connection_id: int) -> None Reloads vistrails from the given connection """ self.clear() if conn_id != -1: parent = self.parent() try: objs = parent.connectionList.getDBObjectList(int(conn_id), self.obj_type) for (id,obj,date) in objs: item = QDBObjectListItem(CurrentTheme.FILE_ICON, int(id), str(obj), str(date)) self.addItem(item) except VistrailsDBException, e: #show connection setup config = parent.connectionList.getConnectionInfo(int(conn_id)) if config != None: config["create"] = False parent.showConnConfig(**config) else: raise e ################################################################################ class QDBObjectListItem(QtGui.QListWidgetItem): def __init__(self, icon, id, name, date, parent=None): """__init__(icon: QIcon, id: int, name: QString, date: QString, user: QString, parent: QListWidget) -> QDBObjectListItem Creates an item with id """ QtGui.QListWidgetItem.__init__(self, icon, name, parent) self.id = id self.name = name self.date = date self.setToolTip("Last Modified on %s" % date) ################################################################################ class QConnectionDBSetupWindow(QtGui.QDialog): """ QConnectionDBSetupWindow is a dialog for creating a DB connection. """ def __init__(self, parent=None, id=-1, name ='', host="", port=3306, user="", passwd="", db="", create=True): """ __init__(parent: QWidget, id: int, name: str, host:str, port:int, user:str, passwd:str, db:str, create:Boolean) -> QConnectionDBSetupWindow Construct the dialog with the information provided create tells if the caption of the button is Create or Update """ QtGui.QDialog.__init__(self,parent) if create: self.setWindowTitle("Create a new connection") else: self.setWindowTitle("Update a connection") mainLayout = QtGui.QVBoxLayout() infoLayout = QtGui.QGridLayout() self.id = id nameLabel = QtGui.QLabel("Save as Connection Name:", self) self.nameEdt = QtGui.QLineEdit(name, self) hostLabel = QtGui.QLabel("Server Hostname:", self) self.hostEdt = QtGui.QLineEdit(host, self) portLabel = QtGui.QLabel("Port:", self) self.portEdt = QtGui.QSpinBox(self) self.portEdt.setMaximum(65535) self.portEdt.setValue(port) userLabel = QtGui.QLabel("Username:", self) self.userEdt = QtGui.QLineEdit(user, self) passwdLabel = QtGui.QLabel("Password:", self) self.passwdEdt = QtGui.QLineEdit(passwd,self) self.passwdEdt.setEchoMode(QtGui.QLineEdit.Password) self.passwdEdt.setToolTip("For your protection, your " "password will not be saved.") databaseLabel = QtGui.QLabel("Database:", self) self.databaseEdt = QtGui.QLineEdit(db,self) mainLayout.addLayout(infoLayout) infoLayout.addWidget(nameLabel,0,0,1,1) infoLayout.addWidget(self.nameEdt,0,1,1,1) infoLayout.addWidget(hostLabel,1,0,1,1) infoLayout.addWidget(self.hostEdt,1,1,1,1) infoLayout.addWidget(portLabel,1,2,1,1) infoLayout.addWidget(self.portEdt,1,3,1,1) infoLayout.addWidget(userLabel,2,0,1,1) infoLayout.addWidget(self.userEdt,2,1,1,3) infoLayout.addWidget(passwdLabel,3,0,1,1) infoLayout.addWidget(self.passwdEdt,3,1,1,3) infoLayout.addWidget(databaseLabel,4,0,1,1) infoLayout.addWidget(self.databaseEdt,4,1,1,3) buttonsLayout = QtGui.QHBoxLayout() if create: caption = 'Create' else: caption = 'Update' self.createButton = QtGui.QPushButton(caption, self) self.createButton.setDefault(True) self.cancelButton = QtGui.QPushButton('Cancel', self) self.testButton = QtGui.QPushButton('Test', self) buttonsLayout.addStretch(1) buttonsLayout.addWidget(self.cancelButton) buttonsLayout.addWidget(self.testButton) buttonsLayout.addWidget(self.createButton) mainLayout.addLayout(buttonsLayout) self.setLayout(mainLayout) self.connectSignals() self.updateButtons() def connectSignals(self): """ connectSignals() -> None Map signals between GUI components """ self.connect(self.cancelButton, QtCore.SIGNAL('clicked()'), self.reject) self.connect(self.createButton, QtCore.SIGNAL('clicked()'), self.accept) self.connect(self.testButton, QtCore.SIGNAL('clicked()'), self.testConnection) self.connect(self.nameEdt, QtCore.SIGNAL('textChanged(QString)'), self.updateButtons) self.connect(self.hostEdt, QtCore.SIGNAL('textChanged(QString)'), self.updateButtons) self.connect(self.userEdt, QtCore.SIGNAL('textChanged(QString)'), self.updateButtons) self.connect(self.passwdEdt, QtCore.SIGNAL('textChanged(QString)'), self.updateButtons) self.connect(self.databaseEdt, QtCore.SIGNAL('textChanged(QString)'), self.updateButtons) self.connect(self.portEdt, QtCore.SIGNAL('valueChanged(int)'), self.updateButtons) def testConnection(self): """testConnection() -> None """ config = {'host': str(self.hostEdt.text()), 'port': int(self.portEdt.value()), 'user': str(self.userEdt.text()), 'passwd': str(self.passwdEdt.text()), 'db': str(self.databaseEdt.text())} try: db.services.io.test_db_connection(config) show_info('Vistrails',"Connection succeeded!") except Exception, e: debug.critical('An error has occurred', str(e)) def updateButtons(self): """updateButtons() -> None enables button if there's enough information in the dialog """ if (self.nameEdt.text() != "" and self.hostEdt.text() != "" and self.portEdt.value() != 0 and self.userEdt.text() != "" and self.databaseEdt.text() != ""): self.createButton.setEnabled(True) else: self.createButton.setEnabled(False)
# -*- coding: utf-8 -*- import os from threading import Thread, Lock from time import sleep from urllib import quote_plus import xbmc, xbmcgui, xbmcaddon from kodi65.actionhandler import ActionHandler from .customhomemenu import CUSTOM_MENU from .image_cache import ImageCache from .pylms.callbackserver import CallbackServer from .simplelms.artworkresolver import ArtworkResolver from .simplelms.simplelms import LMSServer from .simplelms.menu import LMSMenuHandler from .simplelms.menuitems import menu_type as lms_menu_type # Debugging level DEBUG_LEVEL = xbmc.LOGDEBUG # Initialise addon and get some basic info _A_ = xbmcaddon.Addon() _S_ = _A_.getSetting ADDON_PATH = _A_.getAddonInfo("path") ADDON_ID = _A_.getAddonInfo("id") ADDON_PROFILE = xbmc.translatePath(_A_.getAddonInfo('profile')).decode('utf-8') # Get key settings LMS_SERVER = _S_("server_ip") LMS_TELNET = int(_S_("telnet_port")) LMS_WEB = int(_S_("web_port")) # Define some paths and variable names for our images CACHE_PATH = os.path.join(ADDON_PROFILE, "cache") IMG_BACKGROUND = "backgrounds" IMG_ICON = "icons" IMG_PROGRESS = "swatch" CONTROL_DEFAULT = 10 CONTROL_PLAYLIST = 50 CONTROL_MENU_GROUP = 100 CONTROL_MENU = 101 CONTROL_MENU_PLAY = 414141 CONTROL_AUDIO_SUBMENU = 410100 CONTROL_SEARCH_SUBMENU = 410101 CONTROL_PLAYER_CONTROL = 410500 SKIN_CONTROLS = [CONTROL_PLAYLIST, CONTROL_MENU, CONTROL_MENU_GROUP, CONTROL_MENU_PLAY, 1011, 1012, 1013] SUBMENU_AUDIO_TYPES = ["audio", "playlist"] SUBMENU_SEARCH_TYPES = ["search"] SUBMENUS = {CONTROL_AUDIO_SUBMENU: [("Play", "play"), ("Play Next", "playnext"), ("Queue", "add")], CONTROL_SEARCH_SUBMENU: [("Search", "search")] } PLAYER_CONTROLS = [("previous", "Previous Track"), ("playpause", "Play/Pause"), ("stop", "Stop"), ("next", "Next Track"), ("volume", "Adjust Volume")] # Initialise the action handler ch = ActionHandler() def debug(message, level=DEBUG_LEVEL): """Basic debug function for outputting info to the log. Should not use anything higher than debug to avoid spamming the logfile. """ # Make sure encoding is ok for log file if isinstance (message, str): message = message.decode("utf-8") # Format the message and send to the logfile message = u"{}: {}".format(ADDON_ID, message) xbmc.log(msg=message.encode("utf-8"), level=level) class SqueezeInfo(xbmcgui.WindowXML): """Window class definition for showing now playing information from a Logitech Media Server. """ def __init__(self,strXMLname, strFallbackPath, strDefaultName, forceFallback): # Define some basic variables debug("Initialising screen and defining variables...") self.player = None self.players = None self.cur_player = None self.lock = Lock() self.duration = 0 self.elapsed = 0 self.playing = False self.connected = False self.abort = False self.show_playlist = False self.show_menu = False self.menu_history = [] self.server_connected = False self.has_playlist = False self.has_player = False # Set the location of the server self.hostname = LMS_SERVER self.telnet_port = LMS_TELNET self.web_port = LMS_WEB # Get a basic server object for retrieving data self.cmdserver = LMSServer(host=self.hostname, port=self.web_port) # Get the artwwork resolver to create urls for now playing tracks # The ImageCache processes images and saves to userdata folder debug("Creating artwork resolver and cache") self.awr = ArtworkResolver(host=self.hostname, port=self.web_port) self.cache = ImageCache() # Create a callback server to receive asynchronous announcements from # the server debug("Creating callback server") self.cbserver = CallbackServer(hostname=self.hostname, port=self.telnet_port) self.cbserver.daemon = True # Define the events that we want to listen for and assign callbacks debug("Adding callbacks") self.cbserver.add_callback(CallbackServer.PLAYLIST_CHANGED, callback=self.track_changed) self.cbserver.add_callback(CallbackServer.PLAYLIST_CHANGE_TRACK, callback=self.track_changed) self.cbserver.add_callback(CallbackServer.SERVER_ERROR, callback=self.no_server) self.cbserver.add_callback(CallbackServer.SERVER_CONNECT, callback=self.server_connect) self.cbserver.add_callback(CallbackServer.VOLUME_CHANGE, callback=self.vol_change) self.cbserver.add_callback(CallbackServer.PLAY_PAUSE, callback=self.play_pause) self.cbserver.add_callback(CallbackServer.CLIENT_ALL, callback=self.client_change) def onInit(self): # Once window has been initialised we can start setting properties debug("OnInit called") debug("OnInit - Setting windowID and properties") self.windowID = xbmcgui.getCurrentWindowId() self.setProperty("SQUEEZEINFO_SERVER_CONNECTED", "true") self.setProperty("SQUEEZEINFO_HAS_PLAYER", "false") self.setProperty("SQUEEZEINFO_HAS_NEXT_TRACK", "false") self.setProperty("SQUEEZEINFO_SHOW_MENU", "false") self.setProperty("SQUEEZEINFO_SHOW_CONTROLS", "false") self.setProperty("SQUEEZE_IMAGE_FOLDER", os.path.join(CACHE_PATH, IMG_ICON)) self.playlistbox = self.getControl(CONTROL_PLAYLIST) self.playlistbox.setVisibleCondition("String.IsEqual(Window.Property(SQUEEZEINFO_SHOW_PLAYLIST),true)") self.menubox = self.getControl(CONTROL_MENU_GROUP) self.menubox.setVisibleCondition("String.IsEqual(Window.Property(SQUEEZEINFO_SHOW_MENU),true)") self.playercontrols = self.getControl(410400) self.playercontrols.setVisibleCondition("String.IsEqual(Window.Property(SQUEEZEINFO_SHOW_CONTROLS),true)") self.audiosubmenu = self.getControl(CONTROL_AUDIO_SUBMENU) self.searchsubmenu = self.getControl(CONTROL_SEARCH_SUBMENU) self.build_submenus() self.build_player_controls() # self.submenubox.setVisibleCondition("String.IsEqual(Container(101).ListItem(0).Property(showsubmenu),true)") # Let's see if the server is working debug("OnInit - Getting server") self.get_server() # If the server's available then we need to see if there are any # squeezeplayers if self.server_connected: debug("OnInit - getting players") self.get_squeeze_players() else: debug("OnInit - no players") # ...and if we've got a player then we should get the metadata if self.player: debug("Player found: get track info.") self.get_info() # Check if track is playing debug("OnInit - getting player state") try: self.playing = self.player.get_mode() == "play" debug("OnInit - player status: {}".format(self.playing)) except: debug("OnInit - player state error - setting as False") self.playing = False # Get the progress bar control reference self.progress = self.getControl(41) debug("OnInit - Progress control: {}".format(self.progress)) debug("OnInit - starting progress bar thread") # Start a thread to update the progress bar track_progress = Thread(target=self.show_progress) track_progress.daemon = True track_progress.start() # Start the callback server to listen for events debug("OnInit - starting callback server") self.cbserver.start() def onAction(self, action): # Let the action handler deal with this using decorators on methods ch.serve_action(action, self.getFocusId(), self) def onClick(self, controlID): pass def onFocus(self, controlID): pass def setProperty(self, propname, value): """Simple method for setting window properties.""" debug(u"Setting property {} = {}".format(propname, value)) xbmcgui.Window(self.windowID).setProperty(propname, value) def clearProperty(self, propname): """Simple method for clearing window properties.""" debug("Clearing property {}".format(propname)) xbmcgui.Window(self.windowID).clearProperty(propname) def set_default_focus(self): self.setFocusId(CONTROL_DEFAULT) def get_server(self): """Method to check whether Logitech Media Server is online. Returns True if online and False if not. """ debug("Checking server connection...") connected = self.cmdserver.ping() debug("Server is {}connected".format("" if connected else "not ")) state = "true" if connected else "false" self.setProperty("SQUEEZEINFO_SERVER_CONNECTED", state) self.server_connected = connected def get_cur_player(self): """Method to get the current player. If the last used player is no longer available then a new player is assigned to be the current player. Returns a player object. """ debug("Getting current player.") if self.cur_player in self.players: debug("Current player found in list of players") return self.players[self.players.index(self.cur_player)] else: debug("Current player not found. Setting new default player") pl = self.players[0] self.cur_player = str(pl.ref) return pl def get_squeeze_players(self): """Method to obtain list of squeezeplayers connected to the server. Method assigns result to self.players and does not return any output. """ debug("Getting list of available players") # Server needs to be online. If it's not, try connecting one last time. if not self.server_connected: self.get_server() # If the server is online then get the players if self.server_connected: self.players = self.cmdserver.get_players() debug("{} available players: {}".format(len(self.players), self.players)) if self.players: # Setting this property will cause the "Now Playing" bar to be # displayed on the skin. self.setProperty("SQUEEZEINFO_HAS_PLAYER", "true") self.has_player = True self.player = self.get_cur_player() self.setProperty("SQUEEZEINFO_PLAYER_NAME", self.player.name) self.set_vol_label() self.get_sync_groups() else: # No players so we need to hide the "Now Playing" bar self.setProperty("SQUEEZEINFO_HAS_PLAYER", "false") self.has_player = False else: debug("Can't connect to server. No players.") self.players = [] def get_info(self): """Method to get track information from the current player.""" debug("Getting track info.") try: debug("Retrieving playlist info for player {}...".format(self.player)) # Get the current and next track info # (hard coded 2 tracks for now) track = self.player.playlist_get_current_detail(amount=2) debug("{} track(s) found.\n{}".format(len(track), track)) # If we can't get track info then we need to exit this method except AttributeError: debug("get_info: AttributeError - exiting.") return # If there's at least one track then we can display the Now Playing info if len(track) > 0: self.set_now_playing(track[0]) else: self.setProperty("SQUEEZEINFO_HAS_PLAYLIST", "false") self.has_playlist = False # If there are two tracks then we should display the next track too if len(track) == 2: self.setProperty("SQUEEZEINFO_HAS_NEXT_TRACK", "true") self.set_next_up(track[1]) # or hide the next track info if there's no track. else: self.setProperty("SQUEEZEINFO_HAS_NEXT_TRACK", "false") # Get track progress information try: e, d = self.player.get_track_elapsed_and_duration() except: e = d = 0.0 # These variables can be written in other places so let's make sure it # only happens at one time with a Lock with self.lock: self.elapsed = e self.duration = d # If the now playing bar is currently hidden, we only want to show it # after the data has been populated. if track: self.setProperty("SQUEEZEINFO_HAS_PLAYLIST", "true") self.has_playlist = True def get_metadata(self, track, process_image=True): """Method to output the track metadata.""" title = track.get("title", "Unknown Track") album = track.get("album", "Unknown Album") artist = track.get("artist", "Unknown Artist") debug("Metadata: {}".format(track)) # Use the ArtworkResolver to get the URL for the track. debug("Getting artwork url...") try: url = self.awr.getURL(track) except: debug("Error in artwork resolver") url = "" # By default, we'll use the URL for the image (but we'll see if we can # process it below) img_icon = url img_bg = url debug("Artwork url: {}".format(url)) if process_image: # Despite the name this image cache also handles the image processing. try: debug("Getting cached image paths.") img_bg = self.cache.getCachedImage(url, IMG_BACKGROUND) # For some reason, the cache isn't loading for the icon so we'll # use the url for now... # img_icon = self.cache.getCachedImage(url, IMG_ICON) except: debug("Error retrieving cache paths") # Return the necessary metadata return title, album, artist, img_icon, img_bg def set_now_playing(self, track): """Method to set window properties for the current track.""" debug("Setting now playing track info") title, album, artist, icon, bg = self.get_metadata(track) pos = track.get("playlist index", -1) self.setProperty("SQUEEZEINFO_NP_TITLE", title) self.setProperty("SQUEEZEINFO_NP_ARTIST", artist) self.setProperty("SQUEEZEINFO_NP_ALBUM", album) self.setProperty("SQUEEZEINFO_NP_BACKGROUND", bg) self.setProperty("SQUEEZEINFO_NP_ICON", icon) self.setProperty("SQUEEZEINFO_CURRENT_TRACK", str(pos + 1)) def set_next_up(self, track): """Method to set window properties for the next track.""" debug("Setting next track info") title, album, artist, icon, _ = self.get_metadata(track) self.setProperty("SQUEEZEINFO_NEXT_TITLE", title) self.setProperty("SQUEEZEINFO_NEXT_ARTIST", artist) self.setProperty("SQUEEZEINFO_NEXT_ALBUM", album) self.setProperty("SQUEEZEINFO_NEXT_ICON", icon) def getCallbackPlayer(self, event): """Return the player reference from the callback event.""" player = self.cur_player if event is None else event.split(" ")[0] debug("Callback player ref: {}".format(player)) return player def cur_or_sync(self, ref): """Method to determine if the event player is our player or in a sync group with our player. """ if ref == self.cur_player: debug("Event matches current player") return True else: debug("Checking event in sync groups") for gr in self.sync_groups: if ref in gr and self.cur_player in gr: debug("Event from player in sync group") return True debug("Event doesn't match player or sync group") return False def get_sync_groups(self): """Method to retrieve sync groups defined on the server.""" try: self.sync_groups = self.cmdserver.get_sync_groups() except: self.sync_groups = [] debug("Sync groups: {}".format(self.sync_groups)) def track_changed(self, event=None): """Method to trigger actions when a new track is triggered on server.""" debug("track_changed") debug(event) if self.cur_or_sync(self.getCallbackPlayer(event)): self.get_info() def no_server(self, event=None): """Method to trigger actions when server becomes unavailable.""" debug("no_server: {}".format(event)) self.setProperty("SQUEEZEINFO_SERVER_CONNECTED", "false") def server_connect(self, event=None): """Method to trigger actions when server becomes available.""" debug("server_connect: {}".format(event)) self.setProperty("SQUEEZEINFO_SERVER_CONNECTED", "true") self.get_squeeze_players() def play_pause(self, event=None): """Method to trigger actions when player state changes.""" debug("play_pause: {}".format(event)) if self.cur_or_sync(self.getCallbackPlayer(event)): if event.split()[3] == "1": self.playing = False else: self.playing = True debug("Player playing state now: {}".format(self.playing)) def client_change(self, event=None): """Method to trigger actions when client connects or disconnects.""" debug("client_change: {}".format(event)) self.get_squeeze_players() if self.players: self.get_info() def vol_change(self, event=None): """Method to trigger actions when volume changes.""" if self.getCallbackPlayer(event) == self.cur_player: self.set_vol_label() def change_player(self, step): self.setProperty("SQUEEZEINFO_CHANGE_PLAYER", "false") sleep(0.05) debug("Changing player. Index step {}".format(step)) index = self.players.index(self.cur_player) index = (index + step) % len(self.players) self.player = self.players[index] self.cur_player = str(self.player.ref) debug("New player: {}".format(self.player.name)) debug("Updating screen...") self.setProperty("SQUEEZEINFO_PLAYER_NAME", self.player.name) self.setProperty("SQUEEZEINFO_CHANGE_PLAYER", "true") self.get_info() def set_vol_label(self): label = "{}%".format(self.player.get_volume()) self.setProperty("SQUEEZEINFO_PLAYER_VOLUME", label) def vol_up(self): """Method to increase current player's volume.""" try: self.player.volume_up() self.set_vol_label() except: pass def vol_down(self): """Method to decrease current player's volume.""" try: self.player.volume_down() self.set_vol_label() except: pass def get_text(self, heading): text = xbmcgui.Dialog().input(heading, type=xbmcgui.INPUT_ALPHANUM) return text def display_playlist(self, hide=False): if hide: self.show_playlist = False self.setProperty("SQUEEZEINFO_SHOW_PLAYLIST", "false") self.set_default_focus() else: if not self.has_playlist: return self.set_playlist() self.show_playlist = True self.setProperty("SQUEEZEINFO_SHOW_PLAYLIST", "true") listbox = self.getControl(50) debug("Listbox control: {}".format(listbox)) sleep(0.6) self.setFocus(listbox) def display_menu(self, hide=False): if hide: self.show_menu = False current_item = self.getControl(CONTROL_MENU).getSelectedItem() current_item.setProperty("showaudiosubmenu", "false") current_item.setProperty("showsearchsubmenu", "false") self.setProperty("SQUEEZEINFO_SHOW_MENU", "false") self.setFocusId(CONTROL_DEFAULT) else: if not self.has_player: return self.set_menu() self.show_menu = True self.setProperty("SQUEEZEINFO_SHOW_MENU", "true") sleep(0.6) self.setFocusId(CONTROL_MENU) def menu_back(self): menucmd = None if len(self.menu_history) > 1: self.menu_history.pop() menucmd = self.menu_history.pop() else: self.menu_history = [] self.set_menu(menucmd) sleep(0.1) self.setFocusId(CONTROL_MENU) def menu_action(self): menu = self.getControl(CONTROL_MENU) item = menu.getSelectedItem() if item.getProperty("Type") in ["menu", "playlist"]: self.set_menu(item.getProperty("cmd")) sleep(0.1) self.setFocusId(CONTROL_MENU) def submenu_action(self, controlid): menu = self.getControl(CONTROL_MENU) menuitem = menu.getSelectedItem() submenu = self.getControl(controlid) item = submenu.getSelectedItem() action = item.getProperty("action") if controlid == CONTROL_AUDIO_SUBMENU: cmd = menuitem.getProperty(action) self.player.request(cmd) elif controlid == CONTROL_SEARCH_SUBMENU: text = self.get_text("Enter search terms...") if text: text = quote_plus(text) cmd = menuitem.getProperty("search") cmd = cmd.replace("__TAGGEDINPUT__", text) self.set_menu(menucmd=cmd) sleep(0.5) self.setFocusId(CONTROL_MENU) self.getControl(CONTROL_MENU).selectItem(0) def display_submenu(self): menu = self.getControl(CONTROL_MENU) item = menu.getSelectedItem() it_type = item.getProperty("Type") if it_type in ["audio", "playlist"]: self.setFocusId(CONTROL_AUDIO_SUBMENU) elif it_type in ["search"]: self.setFocusId(CONTROL_SEARCH_SUBMENU) def set_playlist(self): listbox = self.getControl(CONTROL_PLAYLIST) listbox.reset() pl_items = self.player.playlist_get_detail() items = [] for i, plitm in enumerate(pl_items): item = xbmcgui.ListItem() title, _, artist, icon, _ = self.get_metadata(plitm, process_image=False) item.setInfo("music", {"tracknumber": i + 1, "Title": title, "Artist": artist}) item.setIconImage(icon) items.append(item) listbox.addItems(items) pos = self.player.playlist_get_position() listbox.selectItem(pos) def set_menu(self, menucmd=None): handle = LMSMenuHandler(self.player) menubox = self.getControl(CONTROL_MENU) if not menucmd: menu = handle.getCustomMenu(CUSTOM_MENU) else: self.menu_history.append(menucmd) menu = handle.getMenu(menucmd=menucmd) menubox.reset() items = [] for item in menu: l_item = xbmcgui.ListItem() m_type = lms_menu_type(item) play = getattr(item, "cmd_play", "") playnext = getattr(item, "cmd_play_next", "") add = getattr(item, "cmd_add", "") search = getattr(item, "cmd_search", "") if m_type == "playlist": cmd = item.show_items_cmd else: cmd = item.cmdstring if m_type in SUBMENU_AUDIO_TYPES: showaudiosubmenu = "true" else: showaudiosubmenu = "false" if m_type in SUBMENU_SEARCH_TYPES: showsearchsubmenu = "true" else: showsearchsubmenu = "false" lines = item.text.split("\n") if len(lines) > 1: l_item.setProperty("multiline", "true") l_item.setLabel(lines[0]) l_item.setLabel2(lines[1]) else: l_item.setProperty("multiline", "false") l_item.setLabel(lines[0]) l_item.setInfo("music", {"Title": item.text}) l_item.setProperty("Type", m_type) l_item.setProperty("cmd", cmd) l_item.setProperty("play", play) l_item.setProperty("playnext", playnext) l_item.setProperty("add", add) l_item.setProperty("search", search) l_item.setProperty("showaudiosubmenu", showaudiosubmenu) l_item.setProperty("showsearchsubmenu", showsearchsubmenu) l_item.setIconImage(item.icon) items.append(l_item) menubox.addItems(items) def _build_submenu(self, control, items): submenu = self.getControl(control) submenu.reset() for item in items: l = xbmcgui.ListItem() l.setLabel(item[0]) l.setProperty("action", item[1]) submenu.addItem(l) def build_submenus(self): for control, items in SUBMENUS.iteritems(): self._build_submenu(control, items) def build_player_controls(self): control = self.getControl(CONTROL_PLAYER_CONTROL) control.reset() for item_name, item_label in PLAYER_CONTROLS: l = xbmcgui.ListItem() l.setProperty("action", item_name) l.setLabel(item_label) l.setIconImage("squeezeinfo-{}.png".format(item_name)) control.addItem(l) control.setVisibleCondition("true") def show_progress(self): """Method to increase progress bar state. Should be run as a thread to prevent blocking. """ # No debugs here to avoid massive spamming i = 0 # start a loop which stops when Kodi exits while not (xbmc.abortRequested or self.abort): # Every 20 cycles we make request to the server to check.. if not (i % 20): try: # ... playing status self.playing = self.player.get_mode() == "play" except: self.playing = False try: # ... track position e, d = self.player.get_track_elapsed_and_duration() except: e = d = 0.0 with self.lock: self.elapsed = e self.duration = d # Otherwise, just manually increase progress bar else: if self.playing: with self.lock: self.elapsed += 0.25 try: percent = float(self.elapsed/self.duration) * 100 except: percent = 0 # Draw the new progress bar state self.progress.setPercent(percent) # Increment our counter i = (i + 1) % 20 # And sleep... sleep(0.25) if not self.abort: self.exit("*") ############ ACTIONS ####################################################### ## Now Playing ############################################################# @ch.action("parentdir", CONTROL_DEFAULT) @ch.action("previousmenu", CONTROL_DEFAULT) def exit(self, controlid): self.cbserver.abort = True self.cbserver.join() if not self.abort: self.abort = True del self.cbserver del self.cmdserver del self.awr del self.cache del self.player self.close() @ch.action("left", CONTROL_DEFAULT) def on_left(self, controlid): self.change_player(-1) @ch.action("right", CONTROL_DEFAULT) def on_right(self, controlid): self.change_player(1) @ch.action("number0", CONTROL_DEFAULT) @ch.action("number0", CONTROL_PLAYLIST) def toggle_playlist(self, controlid): if self.show_playlist: self.display_playlist(hide=True) else: self.display_playlist() @ch.action("number9", CONTROL_DEFAULT) def toggle_menu(self, controlid): if self.show_menu: self.show_menu = False self.setProperty("SQUEEZEINFO_SHOW_MENU", "false") self.setFocusId(10) else: self.set_menu() self.show_menu = True self.setProperty("SQUEEZEINFO_SHOW_MENU", "true") sleep(0.6) self.setFocusId(CONTROL_MENU) @ch.action("number8", CONTROL_DEFAULT) def show_player_controls(self, controlid): self.setProperty("SQUEEZEINFO_SHOW_CONTROLS","true") sleep(0.1) self.setFocusId(CONTROL_PLAYER_CONTROL) ## Playlist ################################################################ @ch.action("parentdir", CONTROL_PLAYLIST) @ch.action("previousmenu", CONTROL_PLAYLIST) def close_playlist(self, controlid): self.display_playlist(hide=True) @ch.action("select", CONTROL_PLAYLIST) def click_playlist(self, controlid): listbox = self.getControl(controlid) index = listbox.getSelectedPosition() self.player.playlist_play_index(index) ## Squeezemenu ############################################################# @ch.action("right", CONTROL_MENU) def show_submenu(self, controlid): self.display_submenu() @ch.action("previousmenu", CONTROL_MENU) def close_menu(self, controlid): self.menu_history = [] self.display_menu(hide=True) @ch.action("parentdir", CONTROL_MENU) def previous_menu(self, controlid): if self.menu_history: self.menu_back() else: self.close_menu(controlid) @ch.action("select", CONTROL_MENU) def menu_select(self, controlid): self.menu_action() ## Squeezemenu - submenu ################################################### @ch.action("parentdir", CONTROL_AUDIO_SUBMENU) @ch.action("parentdir", CONTROL_SEARCH_SUBMENU) def submenu_back(self, controlid): if self.menu_history: self.menu_back() self.setFocusId(CONTROL_MENU) else: self.close_menu(controlid) @ch.action("previousmenu", CONTROL_AUDIO_SUBMENU) @ch.action("previousmenu", CONTROL_SEARCH_SUBMENU) def close_submenu(self, controlid): self.menu_history = [] self.display_menu(hide=True) @ch.action("select", CONTROL_AUDIO_SUBMENU) @ch.action("select", CONTROL_SEARCH_SUBMENU) def clickaction(self, controlid=None): self.submenu_action(controlid) ## Player controls ######################################################### @ch.action("up", CONTROL_PLAYER_CONTROL) def pl_vol_up(self, controlid): control = self.getControl(controlid) action = control.getSelectedItem().getProperty("action") if action == "volume": self.vol_up() @ch.action("down", CONTROL_PLAYER_CONTROL) def pl_vol_up(self, controlid): control = self.getControl(controlid) action = control.getSelectedItem().getProperty("action") if action == "volume": self.vol_down() @ch.action("select", CONTROL_PLAYER_CONTROL) def player_control_select(self, controlid): control = self.getControl(controlid) action = control.getSelectedItem().getProperty("action") if not self.has_player: return if action == "previous": self.player.prev() elif action == "playpause": self.player.toggle() elif action == "stop": self.player.stop() elif action == "next": self.player.next() @ch.action("parentdir", CONTROL_PLAYER_CONTROL) @ch.action("previousmenu", CONTROL_PLAYER_CONTROL) def close_player_controls(self, controlid): self.setProperty("SQUEEZEINFO_SHOW_CONTROLS","false") sleep(0.1) self.setFocusId(CONTROL_DEFAULT)
import os import json from contextlib import asynccontextmanager from urllib.parse import urlencode from .http import connect_unix, connect_tcp from .auth import read_config, server_name, resolve_auth, encode_header from .utils import cached_property CHUNK_SIZE = 65535 _TCP_PROTO = 'tcp://' _UNIX_PROTO = 'unix://' _DOCKER_HOST = os.environ.get('DOCKER_HOST', 'unix:///var/run/docker.sock') if _DOCKER_HOST.startswith(_TCP_PROTO): _HOST, _, _PORT_STR = _DOCKER_HOST[len(_TCP_PROTO):].partition(':') _PORT = int(_PORT_STR) def connect_docker(**kwargs): return connect_tcp(_HOST, _PORT, **kwargs) elif _DOCKER_HOST.startswith(_UNIX_PROTO): _PATH = _DOCKER_HOST[len(_UNIX_PROTO):] def connect_docker(**kwargs): return connect_unix(_PATH, **kwargs) else: raise RuntimeError(f'Invalid DOCKER_HOST environ variable: {_DOCKER_HOST}') async def _recv_json(stream, response): content_type = response.headers.get(b'content-type') assert content_type == b'application/json', response if b'content-length' in response.headers: content_length = response.headers[b'content-length'] data = await stream.recv_data(content_length=int(content_length)) elif b'transfer-encoding' in response.headers: transfer_encoding = response.headers[b'transfer-encoding'] assert transfer_encoding == b'chunked', response chunks = [c async for c in stream.recv_data_chunked()] data = b''.join(chunks) else: assert False, response return json.loads(data.decode('utf-8')) async def _request_json(method, path, data=None, *, _ok_statuses=None): if _ok_statuses is None: _ok_statuses = frozenset({200, 201, 204}) async with connect_docker() as stream: headers = [ ('Host', 'localhost'), ('Connection', 'close'), ] if data is not None: json_data = json.dumps(data).encode('utf-8') headers.append(('Content-Type', 'application/json')) headers.append(('Content-Length', str(len(json_data)))) await stream.send_request(method, path, headers, end_stream=(data is None)) if data is not None: await stream.send_data(json_data) response = await stream.recv_response() if response.status_code == 204: return None if response.status_code in _ok_statuses: return await _recv_json(stream, response) else: raise response.error() async def _get_json(path, *, _ok_statuses=None): return await _request_json('GET', path, _ok_statuses=_ok_statuses) async def _post_json(path, data=None, *, _ok_statuses=None): return await _request_json('POST', path, data=data, _ok_statuses=_ok_statuses) async def _delete_json(path, *, _ok_statuses=None): return await _request_json('DELETE', path, _ok_statuses=_ok_statuses) class Docker: @cached_property def _docker_config(self): return read_config() async def _auth_header(self, image_name): registry = server_name(image_name) auth = await resolve_auth(self._docker_config, registry) if auth is not None: auth_header = encode_header(auth) return auth_header else: return None async def images(self): return await _get_json('/images/json') async def create_container(self, spec, *, params=None): uri = '/containers/create' if params: uri += '?' + urlencode(params) return await _post_json(uri, spec) async def resize(self, id_, *, params=None): assert isinstance(id_, str), id_ uri = '/containers/{id}/resize'.format(id=id_) if params: uri += '?' + urlencode(params) async with connect_docker() as stream: await stream.send_request('POST', uri, [ ('Host', 'localhost'), ]) response = await stream.recv_response() if response.status_code == 200: pass else: raise response.error() async def start(self, id_, *, params=None): assert isinstance(id_, str), id_ uri = '/containers/{id}/start'.format(id=id_) if params: uri += '?' + urlencode(params) async with connect_docker() as stream: await stream.send_request('POST', uri, [ ('Host', 'localhost'), ]) response = await stream.recv_response() if response.status_code == 204: pass elif response.status_code == 304: pass else: raise response.error() async def exec_create(self, id_, spec): assert isinstance(id_, str), id_ uri = '/containers/{id}/exec'.format(id=id_) return await _post_json(uri, spec) @asynccontextmanager async def exec_start(self, id_, spec, stdin_proto, stdout_proto): assert isinstance(id_, str), id_ uri = '/exec/{id}/start'.format(id=id_) async with connect_docker( stdin_proto=stdin_proto, stdout_proto=stdout_proto ) as stream: json_data = json.dumps(spec).encode('utf-8') await stream.send_request('POST', uri, [ ('Host', 'localhost'), ('Content-Type', 'application/json'), ('Content-Length', str(len(json_data))), ('Connection', 'Upgrade'), ('Upgrade', 'tcp'), ], end_stream=False) await stream.send_data(json_data) response = await stream.recv_response() if response.status_code == 101: yield stream.protocol else: raise response.error() async def exec_inspect(self, id_): assert isinstance(id_, str), id_ uri = '/exec/{id}/json'.format(id=id_) return await _get_json(uri) @asynccontextmanager async def attach(self, id_, stdin_proto, stdout_proto, *, params=None): assert isinstance(id_, str), id_ uri = '/containers/{id}/attach'.format(id=id_) if params: uri += '?' + urlencode(params) async with connect_docker( stdin_proto=stdin_proto, stdout_proto=stdout_proto ) as stream: await stream.send_request('POST', uri, [ ('Host', 'localhost'), ('Connection', 'Upgrade'), ('Upgrade', 'tcp'), ]) response = await stream.recv_response() if response.status_code == 101: yield stream.protocol else: raise response.error() async def remove_container(self, id_, *, params=None): assert isinstance(id_, str), id_ uri = '/containers/{id}'.format(id=id_) if params: uri += '?' + urlencode(params) async with connect_docker() as stream: await stream.send_request('DELETE', uri, [ ('Host', 'localhost'), ]) response = await stream.recv_response() if response.status_code == 204: pass else: raise response.error() async def create_image(self, *, params=None): uri = '/images/create' if params: uri += '?' + urlencode(params) headers = [('Host', 'localhost')] if 'fromImage' in params: auth_header = await self._auth_header(params['fromImage']) if auth_header: headers.append(('X-Registry-Auth', auth_header)) async with connect_docker() as stream: await stream.send_request('POST', uri, headers) response = await stream.recv_response() if response.status_code == 200: async for chunk in stream.recv_data_chunked(): yield chunk else: raise response.error() async def push(self, name, *, params): uri = '/images/{name}/push'.format(name=name) if params: uri += '?' + urlencode(params) headers = [('Host', 'localhost')] auth_header = await self._auth_header(name) if auth_header: headers.append(('X-Registry-Auth', auth_header)) async with connect_docker() as stream: await stream.send_request('POST', uri, headers) response = await stream.recv_response() if response.status_code == 200: async for chunk in stream.recv_data_chunked(): yield chunk else: raise response.error() async def containers(self, *, params): uri = '/containers/json' if params: uri += '?' + urlencode(params) return await _get_json(uri) async def remove_image(self, name): uri = '/images/{name}'.format(name=name) return await _delete_json(uri) async def wait(self, id_): assert isinstance(id_, str), id_ uri = '/containers/{id}/wait'.format(id=id_) return await _post_json(uri) async def stop(self, id_, *, params): assert isinstance(id_, str), id_ uri = '/containers/{id}/stop'.format(id=id_) if params: uri += '?' + urlencode(params) await _post_json(uri) async def pause(self, id_): assert isinstance(id_, str), id_ uri = '/containers/{id}/pause'.format(id=id_) await _post_json(uri) async def commit(self, *, params): uri = '/commit' if params: uri += '?' + urlencode(params) return await _post_json(uri) async def unpause(self, id_): assert isinstance(id_, str), id_ uri = '/containers/{id}/unpause'.format(id=id_) await _post_json(uri) async def create_network(self, *, data): uri = '/networks/create' return await _post_json(uri, data=data) async def put_archive(self, id_, arch, *, params): uri = '/containers/{id}/archive'.format(id=id_) if params: uri += '?' + urlencode(params) headers = [ ('Host', 'localhost'), ('transfer-encoding', 'chunked'), ] async with connect_docker() as stream: await stream.send_request('PUT', uri, headers, end_stream=False) while True: chunk = arch.read(CHUNK_SIZE) if len(chunk) == CHUNK_SIZE: await stream.send_data(chunk, end_stream=False) else: if chunk: await stream.send_data(chunk) else: await stream.end() break response = await stream.recv_response() if response.status_code != 200: raise response.error()
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from page_sets.system_health import platforms from page_sets.system_health import story_tags from page_sets.system_health import system_health_story from page_sets.login_helpers import dropbox_login from page_sets.login_helpers import google_login from page_sets.helpers import override_online from telemetry.util import js_template class _LoadingStory(system_health_story.SystemHealthStory): """Abstract base class for single-page System Health user stories.""" ABSTRACT_STORY = True def __init__(self, story_set, take_memory_measurement, extra_browser_args=None): super(_LoadingStory, self).__init__(story_set, take_memory_measurement, extra_browser_args) self.script_to_evaluate_on_commit = override_online.ALWAYS_ONLINE @classmethod def GenerateStoryDescription(cls): return 'Load %s' % cls.URL ################################################################################ # Search and e-commerce. ################################################################################ # TODO(petrcermak): Split these into 'portal' and 'shopping' stories. class LoadGoogleStory2018(_LoadingStory): NAME = 'load:search:google:2018' URL = 'https://www.google.co.uk/search?q=pepper' TAGS = [story_tags.YEAR_2018] class LoadBaiduStory2018(_LoadingStory): NAME = 'load:search:baidu:2018' URL = 'https://www.baidu.com/s?word=google' TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2018] class LoadYahooStory2018(_LoadingStory): NAME = 'load:search:yahoo:2018' # Use additional parameter to bypass consent screen. URL = 'https://search.yahoo.com/search;_ylt=?p=google&_guc_consent_skip=1541794498' TAGS = [story_tags.YEAR_2018] class LoadAmazonDesktopStory2018(_LoadingStory): NAME = 'load:search:amazon:2018' URL = 'https://www.amazon.com/s/?field-keywords=pixel' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadAmazonMobileStory2019(_LoadingStory): NAME = 'load:search:amazon:2019' URL = 'https://www.amazon.com/s/?field-keywords=pixel' TAGS = [story_tags.YEAR_2019] SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY class LoadTaobaoDesktopStory2018(_LoadingStory): NAME = 'load:search:taobao:2018' URL = 'https://world.taobao.com/' SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2018] class LoadFlipkartDesktop2018(_LoadingStory): NAME = 'load:search:flipkart:2018' URL = 'https://www.flipkart.com/search?q=sneakers' SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2018] class LoadTaobaoMobileStory2019(_LoadingStory): NAME = 'load:search:taobao:2019' URL = 'http://m.intl.taobao.com/' SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2019] class LoadYandexStory2018(_LoadingStory): NAME = 'load:search:yandex:2018' URL = 'https://yandex.ru/touchsearch?text=science' TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2018] class LoadEbayStory2018(_LoadingStory): NAME = 'load:search:ebay:2018' URL = 'https://www.ebay.com/sch/i.html?_nkw=headphones' TAGS = [story_tags.YEAR_2018] ################################################################################ # Social networks. ################################################################################ class LoadTwitterMoibleStory2019(_LoadingStory): NAME = 'load:social:twitter:2019' URL = 'https://www.twitter.com/nasa' TAGS = [story_tags.YEAR_2019] # Desktop version is already covered by # 'browse:social:twitter_infinite_scroll' SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY class LoadVkDesktopStory2018(_LoadingStory): NAME = 'load:social:vk:2018' URL = 'https://vk.com/sbeatles' SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY TAGS = [ story_tags.HEALTH_CHECK, story_tags.INTERNATIONAL, story_tags.YEAR_2018 ] class LoadInstagramDesktopStory2018(_LoadingStory): NAME = 'load:social:instagram:2018' URL = 'https://www.instagram.com/selenagomez/' TAGS = [story_tags.HEALTH_CHECK, story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadInstagramMobileStory2019(_LoadingStory): NAME = 'load:social:instagram:2019' URL = 'https://www.instagram.com/selenagomez/' TAGS = [story_tags.YEAR_2019] SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY class LoadPinterestStory2019(_LoadingStory): NAME = 'load:social:pinterest:2019' URL = 'https://uk.pinterest.com/categories/popular/' TAGS = [story_tags.JAVASCRIPT_HEAVY, story_tags.YEAR_2019] ################################################################################ # News, discussion and knowledge portals and blogs. ################################################################################ class LoadBbcDesktopStory2018(_LoadingStory): NAME = 'load:news:bbc:2018' URL = 'https://www.bbc.co.uk/news' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadBbcMobileStory2019(_LoadingStory): NAME = 'load:news:bbc:2019' URL = 'https://www.bbc.co.uk/news' TAGS = [story_tags.YEAR_2019] SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY class LoadCnnStory2018(_LoadingStory): NAME = 'load:news:cnn:2018' URL = 'https://edition.cnn.com' TAGS = [ story_tags.HEALTH_CHECK, story_tags.JAVASCRIPT_HEAVY, story_tags.YEAR_2018 ] class LoadFlipboardDesktopStory(_LoadingStory): NAME = 'load:news:flipboard' URL = 'https://flipboard.com/explore' TAGS = [story_tags.YEAR_2016] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadHackerNewsDesktopStory2018(_LoadingStory): NAME = 'load:news:hackernews:2018' URL = 'https://news.ycombinator.com' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadNytimesDesktopStory2018(_LoadingStory): NAME = 'load:news:nytimes:2018' URL = 'http://www.nytimes.com' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadNytimesMobileStory2019(_LoadingStory): NAME = 'load:news:nytimes:2019' URL = 'http://mobile.nytimes.com' SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY TAGS = [story_tags.YEAR_2019] class LoadQqMobileStory2019(_LoadingStory): NAME = 'load:news:qq:2019' URL = 'https://xw.qq.com/?f=c_news' TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2019] SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY class LoadQqDesktopStory2018(_LoadingStory): NAME = 'load:news:qq:2018' URL = 'https://news.qq.com' TAGS = [story_tags.INTERNATIONAL, story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadRedditDesktopStory2018(_LoadingStory): NAME = 'load:news:reddit:2018' URL = 'https://www.reddit.com/r/news/top/?sort=top&t=week' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadRedditMobileStory2019(_LoadingStory): NAME = 'load:news:reddit:2019' URL = 'https://www.reddit.com/r/news/top/?sort=top&t=week' SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY TAGS = [story_tags.YEAR_2019] class LoadWashingtonPostMobileStory2019(_LoadingStory): NAME = 'load:news:washingtonpost:2019' URL = 'https://www.washingtonpost.com/pwa' SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY TAGS = [story_tags.YEAR_2019] _CONTINUE_FREE_BUTTON_SELECTOR = '.continue-btn.button.free' _ACCEPT_GDPR_SELECTOR = '.agree-ckb' _CONTINUE_TO_SITE_SELECTOR = '.continue-btn.button.accept-consent' def _DidLoadDocument(self, action_runner): # Close the popup window. On Nexus 9 (and probably other tables) the popup # window does not have a "Close" button, instead it has only a "Send link # to phone" button. So on tablets we run with the popup window open. The # popup is transparent, so this is mostly an aesthetical issue. has_button = action_runner.EvaluateJavaScript( '!!document.querySelector({{ selector }})', selector=self._CONTINUE_FREE_BUTTON_SELECTOR) if has_button: action_runner.ClickElement(selector=self._CONTINUE_FREE_BUTTON_SELECTOR) action_runner.ScrollPageToElement(selector=self._ACCEPT_GDPR_SELECTOR) action_runner.ClickElement(selector=self._ACCEPT_GDPR_SELECTOR) element_function = js_template.Render( 'document.querySelectorAll({{ selector }})[{{ index }}]', selector=self._CONTINUE_TO_SITE_SELECTOR, index=0) action_runner.ClickElement(element_function=element_function) class LoadWikipediaStory2018(_LoadingStory): NAME = 'load:news:wikipedia:2018' URL = 'https://en.wikipedia.org/wiki/Science' TAGS = [story_tags.EMERGING_MARKET, story_tags.YEAR_2018] class LoadIrctcStory2019(_LoadingStory): NAME = 'load:news:irctc:2019' URL = 'https://www.irctc.co.in' SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY TAGS = [story_tags.EMERGING_MARKET, story_tags.YEAR_2019] def _Login(self, action_runner): # There is an error on replay that pops up the first time. If we # navigate again, the error disappears. action_runner.tab.WaitForDocumentReadyStateToBeComplete() action_runner.Navigate(self.URL) action_runner.tab.WaitForDocumentReadyStateToBeComplete() ################################################################################ # Audio, images, and video. ################################################################################ class LoadYouTubeStory2018(_LoadingStory): # No way to disable autoplay on desktop. NAME = 'load:media:youtube:2018' URL = 'https://www.youtube.com/watch?v=QGfhS1hfTWw&autoplay=false' TAGS = [ story_tags.HEALTH_CHECK, story_tags.EMERGING_MARKET, story_tags.YEAR_2018 ] class LoadDailymotionStory2019(_LoadingStory): NAME = 'load:media:dailymotion:2019' URL = ('https://www.dailymotion.com/video/x7paozv') TAGS = [story_tags.YEAR_2019] class LoadGoogleImagesStory2018(_LoadingStory): NAME = 'load:media:google_images:2018' URL = 'https://www.google.co.uk/search?tbm=isch&q=love' TAGS = [story_tags.YEAR_2018] class LoadSoundCloudStory2018(_LoadingStory): # No way to disable autoplay on desktop. Album artwork doesn't load due to NAME = 'load:media:soundcloud:2018' URL = 'https://soundcloud.com/lifeofdesiigner/desiigner-panda' TAGS = [story_tags.YEAR_2018] class Load9GagDesktopStory(_LoadingStory): NAME = 'load:media:9gag' URL = 'https://www.9gag.com/' TAGS = [story_tags.YEAR_2016] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY class LoadImgurStory2018(_LoadingStory): NAME = 'load:media:imgur:2018' URL = 'http://imgur.com/gallery/5UlBN' TAGS = [story_tags.YEAR_2018] class LoadFlickrStory2018(_LoadingStory): NAME = 'load:media:flickr:2018' URL = 'https://www.flickr.com/photos/tags/noiretblanc' TAGS = [story_tags.YEAR_2018] class LoadFacebookPhotosMobileStory2019(_LoadingStory): """Load a page of rihanna's facebook with a photo.""" NAME = 'load:media:facebook_photos:2019' URL = ( 'https://m.facebook.com/rihanna/photos/a.207477806675/10156574885461676/?type=3&source=54&ref=page_internal') SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY TAGS = [story_tags.EMERGING_MARKET, story_tags.YEAR_2019] class LoadFacebookPhotosDesktopStory2018(_LoadingStory): """Load a page of rihanna's facebook with a photo.""" NAME = 'load:media:facebook_photos:2018' URL = ( 'https://www.facebook.com/rihanna/photos/pb.10092511675.-2207520000.1541795576./10155941787036676/?type=3&theater') TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY ################################################################################ # Online tools (documents, emails, storage, ...). ################################################################################ class LoadDocsStory2019(_LoadingStory): """Load a typical google doc page (2019).""" NAME = 'load:tools:docs:2019' URL = ( 'https://docs.google.com/document/d/1GvzDP-tTLmJ0myRhUAfTYWs3ZUFilUICg8psNHyccwQ/edit?usp=sharing') TAGS = [story_tags.YEAR_2019] class _LoadGmailBaseStory(_LoadingStory): NAME = 'load:tools:gmail' URL = 'https://mail.google.com/mail/' ABSTRACT_STORY = True def _Login(self, action_runner): google_login.LoginGoogleAccount(action_runner, 'googletest') # Navigating to https://mail.google.com immediately leads to an infinite # redirection loop due to a bug in WPR (see # https://github.com/chromium/web-page-replay/issues/70). We therefore first # navigate to a sub-URL to set up the session and hit the resulting # redirection loop. Afterwards, we can safely navigate to # https://mail.google.com. action_runner.tab.WaitForDocumentReadyStateToBeComplete() action_runner.Navigate( 'https://mail.google.com/mail/mu/mp/872/trigger_redirection_loop') action_runner.tab.WaitForDocumentReadyStateToBeComplete() class LoadGmailStory2019(_LoadingStory): NAME = 'load:tools:gmail:2019' # Needs to be http and not https. URL = 'http://mail.google.com/' TAGS = [story_tags.HEALTH_CHECK, story_tags.YEAR_2019] SKIP_LOGIN = False def _Login(self, action_runner): google_login.NewLoginGoogleAccount(action_runner, 'googletest') # Navigating to http://mail.google.com immediately leads to an infinite # redirection loop due to a bug in WPR (see # https://bugs.chromium.org/p/chromium/issues/detail?id=1036791). We # therefore first navigate to a dummy sub-URL to set up the session and # hit the resulting redirection loop. Afterwards, we can safely navigate # to http://mail.google.com. action_runner.tab.WaitForDocumentReadyStateToBeComplete() action_runner.Navigate( 'https://mail.google.com/mail/mu/mp/872/trigger_redirection_loop') action_runner.tab.WaitForDocumentReadyStateToBeComplete() class LoadChatStory2020(_LoadingStory): NAME = 'load:tools:chat:2020' # Needs to be http and not https. URL = 'http://chat.google.com/' TAGS = [story_tags.YEAR_2020] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY SKIP_LOGIN = False def _Login(self, action_runner): google_login.NewLoginGoogleAccount(action_runner, 'chatfeature') action_runner.tab.WaitForDocumentReadyStateToBeComplete() class LoadStackOverflowStory2018(_LoadingStory): """Load a typical question & answer page of stackoverflow.com""" NAME = 'load:tools:stackoverflow:2018' URL = ( 'https://stackoverflow.com/questions/36827659/compiling-an-application-for-use-in-highly-radioactive-environments') TAGS = [story_tags.YEAR_2018] class LoadDropboxStory2019(_LoadingStory): NAME = 'load:tools:dropbox:2019' URL = 'https://www.dropbox.com/' TAGS = [story_tags.YEAR_2019] # Desktop fails to log in SUPPORTED_PLATFORMS = platforms.MOBILE_ONLY SKIP_LOGIN = False def _Login(self, action_runner): dropbox_login.LoginAccount(action_runner, 'dropbox') class LoadWeatherStory2019(_LoadingStory): NAME = 'load:tools:weather:2019' URL = 'https://weather.com/en-GB/weather/today/l/USCA0286:1:US' TAGS = [ story_tags.HEALTH_CHECK, story_tags.JAVASCRIPT_HEAVY, story_tags.YEAR_2019 ] class LoadDriveStory2019(_LoadingStory): NAME = 'load:tools:drive:2019' URL = 'https://drive.google.com/drive/my-drive' TAGS = [story_tags.JAVASCRIPT_HEAVY, story_tags.YEAR_2019] def _Login(self, action_runner): google_login.NewLoginGoogleAccount(action_runner, 'googletest') ################################################################################ # In-browser games (HTML5 and Flash). ################################################################################ class LoadBubblesStory2019(_LoadingStory): """Load "smarty bubbles" game on famobi.com""" NAME = 'load:games:bubbles:2019' URL = ( 'https://games.cdn.famobi.com/html5games/s/smarty-bubbles/v010/?fg_domain=play.famobi.com&fg_uid=d8f24956-dc91-4902-9096-a46cb1353b6f&fg_pid=4638e320-4444-4514-81c4-d80a8c662371&fg_beat=620') TAGS = [story_tags.YEAR_2019] class LoadLazorsStory(_LoadingStory): NAME = 'load:games:lazors' # Using "https://" hangs and shows "This site can't be reached". URL = 'http://www8.games.mobi/games/html5/lazors/lazors.html' TAGS = [story_tags.YEAR_2016] class LoadSpyChaseStory2018(_LoadingStory): NAME = 'load:games:spychase:2018' # Using "https://" shows "Your connection is not private". URL = 'http://playstar.mobi/games/spychase/index.php' TAGS = [story_tags.YEAR_2018] def _DidLoadDocument(self, action_runner): # The background of the game canvas is set when the "Tap screen to play" # caption is displayed. action_runner.WaitForJavaScriptCondition( 'document.querySelector("#game canvas").style.background !== ""') class LoadMiniclipStory2018(_LoadingStory): NAME = 'load:games:miniclip:2018' # Using "https://" causes "404 Not Found" during WPR recording. URL = 'http://www.miniclip.com/games/en/' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY # Requires Flash. class LoadAlphabettyStory2018(_LoadingStory): NAME = 'load:games:alphabetty:2018' URL = 'https://king.com/play/alphabetty' TAGS = [story_tags.YEAR_2018] SUPPORTED_PLATFORMS = platforms.DESKTOP_ONLY # Requires Flash.
# coding: utf-8 # # Porting Tensorflow tutorial "Deep MNIST for Experts" to polygoggles # # based on https://www.tensorflow.org/versions/r0.7/tutorials/mnist/pros/index.html # In[21]: import math import tensorflow as tf import datasets import make_polygon_pngs def get_data_sets_and_params(use_MNIST_instead_of_our_data=False): if use_MNIST_instead_of_our_data: params = dict( width = 28, height = 28, num_training_steps = 20000, batch_size = 50, ) else: params = dict( width = 70, height = 70, num_training_steps = 1000, batch_size = 50, training_images = 5000, test_images = 1000, allow_rotation = True, ) if use_MNIST_instead_of_our_data: from tensorflow.examples.tutorials.mnist import input_data data_sets = input_data.read_data_sets('MNIST_data', one_hot=True) else: collection_dir = make_polygon_pngs.make_collection(params['width'], params['height'], params['training_images'], params['test_images'], allow_rotation=params['allow_rotation']) data_sets = datasets.read_data_sets(collection_dir) return data_sets, params def run_regression(data_sets): sess = tf.InteractiveSession() flat_size = width * height num_labels = data_sets.train.labels.shape[1] x = tf.placeholder(tf.float32, shape=[None, flat_size]) y_ = tf.placeholder(tf.float32, shape=[None, num_labels]) W = tf.Variable(tf.zeros([flat_size, num_labels])) b = tf.Variable(tf.zeros([num_labels])) sess.run(tf.initialize_all_variables()) # We can now implement our regression model. It only takes one line! # We multiply the vectorized input images x by the weight matrix W, add the bias b, # and compute the softmax probabilities that are assigned to each class. y = tf.nn.softmax(tf.matmul(x,W) + b) # The cost function to be minimized during training can be specified just as easily. # Our cost function will be the cross-entropy between the target and the model's prediction. cross_entropy = -tf.reduce_sum(y_*tf.log(y)) # Now that we have defined our model and training cost function, it is straightforward to train using TensorFlow. # Because TensorFlow knows the entire computation graph, it can use automatic differentiation to find # the gradients of the cost with respect to each of the variables. # TensorFlow has a variety of builtin optimization algorithms. # For this example, we will use steepest gradient descent, with a step length of 0.01, to descend the cross entropy. train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) # What TensorFlow actually did in that single line was to add new operations to the computation graph. # These operations included ones to compute gradients, compute parameter update steps, and apply update # steps to the parameters. # # The returned operation train_step, when run, will apply the gradient descent updates to the parameters. # Training the model can therefore be accomplished by repeatedly running train_step. for i in range(1000): batch = data_sets.train.next_batch(50) train_step.run(feed_dict={x: batch[0], y_: batch[1]}) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) # That gives us a list of booleans. To determine what fraction are correct, we cast to floating point # numbers and then take the mean. For example, [True, False, True, True] would become [1,0,1,1] which would become 0.75. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Finally, we can evaluate our accuracy on the test data. (On MNIST this should be about 91% correct.) accuracy = accuracy.eval(feed_dict={x: data_sets.test.images, y_: data_sets.test.labels}) print("Accuracy: %.5f" % accuracy) return accuracy def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def run_multilayer_convolutional_networkd(): """ # # Build a Multilayer Convolutional Network # # Getting .91 accuracy on MNIST is bad. It's almost embarrassingly bad. In this section, we'll fix that, jumping from a very simple model to something moderately sophisticated: a small convolutional neural network. This will get us to around 99.2% accuracy -- not state of the art, but respectable. # # ## Weight Initialization # # To create this model, we're going to need to create a lot of weights and biases. One should generally initialize weights with a small amount of noise for symmetry breaking, and to prevent 0 gradients. Since we're using ReLU neurons, it is also good practice to initialize them with a slightly positive initial bias to avoid "dead neurons." Instead of doing this repeatedly while we build the model, let's create two handy functions to do it for us. """ # ## Convolution and Pooling # # TensorFlow also gives us a lot of flexibility in convolution and pooling operations. # How do we handle the boundaries? What is our stride size? In this example, we're always going # to choose the vanilla version. Our convolutions uses a stride of one and are zero padded so # that the output is the same size as the input. # Our pooling is plain old max pooling over 2x2 blocks. To keep our code cleaner, # let's also abstract those operations into functions. # ## First Convolutional Layer # # We can now implement our first layer. It will consist of convolution, followed by max pooling. The convolutional will compute 32 features for each 5x5 patch. Its weight tensor will have a shape of [5, 5, 1, 32]. The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel. # In[16]: W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) # To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels. # In[17]: x_image = tf.reshape(x, [-1, width, height,1]) # XXX not sure which is width and which is height # In[18]: # We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool. h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # ## Second Convolutional Layer # # In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch. # In[19]: W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # ## Densely Connected Layer # # Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU. # XXX where is the 7x7 coming from? # # when bumping to width, height of 50 each: # # InvalidArgumentError: Input to reshape is a tensor with 540800 values, but the requested shape requires a multiple of 3136 # # 7 x 7 x 64 = 3136 # # # 540800 / 64. = 8450 # # 13 x 13 x 50 x 64 = 540800 # # # # # # On MNIST, if I change the densely connected layer to fail (change the 7x7x64 to 7x7x65 in both W_fcl and h_pool2_flat # for example, then I get the following error as soon as start to train: # # InvalidArgumentError: Input to reshape is a tensor with 156800 values, but the requested shape requires a multiple of 3185 # # note 3185 = 7x7x65 # # 156800 = 7 * 7 * 64 * 50 # # 50 is batch size # # ##### with width & height = 70: # Input to reshape is a tensor with 1036800 values, but the requested shape requires a multiple of 10816 # # ##### with width & height = 150: # Input to reshape is a tensor with 4620800 values, but the requested shape requires a multiple of 20736 # In[23]: def get_size_reduced_to_from_input_tensor_size(input_tensor_size): size_reduced_to_squared = input_tensor_size / 64. / batch_size # last divide is 50., pretty sure it's batch size return math.sqrt(size_reduced_to_squared) print(get_size_reduced_to_from_input_tensor_size(4620800)) print(get_size_reduced_to_from_input_tensor_size(1036800)) # In[24]: if use_MNIST_instead_of_our_data: size_reduced_to = 7 else: # for width & height = 50, size_reduced_to seems to be 13 # for width & height = 70, size_reduced_to seems to be 18 # for width & height = 150, size_reduced_to seems to be 38 size_reduced_to = 18 #W_fc1 = weight_variable([7 * 7 * 64, 1024]) W_fc1 = weight_variable([size_reduced_to * size_reduced_to * 64, 1024]) b_fc1 = bias_variable([1024]) #h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_pool2_flat = tf.reshape(h_pool2, [-1, size_reduced_to*size_reduced_to*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # #### Dropout # # To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling. # In[25]: keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # ## Readout Layer # # Finally, we add a softmax layer, just like for the one layer softmax regression above. # In[26]: W_fc2 = weight_variable([1024, num_labels]) b_fc2 = bias_variable([num_labels]) y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) # ## Train and Evaluate the Model # # How well does this model do? To train and evaluate it we will use code that is nearly identical to that for the simple one layer SoftMax network above. The differences are that: we will replace the steepest gradient descent optimizer with the more sophisticated ADAM optimizer; we will include the additional parameter keep_prob in feed_dict to control the dropout rate; and we will add logging to every 100th iteration in the training process. # In[27]: cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.initialize_all_variables()) # In[ ]: for i in range(num_training_steps): batch = data_sets.train.next_batch(batch_size) if i%100 == 0: train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0}) print("step %d, training accuracy %g"%(i, train_accuracy)) train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) # In[ ]:
from data.entities import Entity import random class Mob(Entity): def init(self): super(Mob,self).init() self.set_attributes('01100') self.char = '@' self.name = 'mob' self.fgcol = (0,255,255) self.dead = 0 def collide(self, id): ent = self.parent[id] if self.parent.is_instance(id, "mob"): hit = ent.deal_damage(10) def was_collided(self, id): return False def deal_damage(self, amount, target=None): self.die() return self.check_damage() def check_damage(self): if not self.dead: return False return True def die(self): """Verify if not already dead, and change into corpse of being.""" if not self.dead: self.dead = 1 id = self.parent.add_entity("item") self.parent.set_pos(id,self.parent.get_pos(self.id)) self.parent[id].name = self.name + " corpse" self.parent.set_parent(self.id,0) def update(self): if not self.check_damage(): dx, dy = random.randint(-1,1), random.randint(-1,1) self.move(dx, dy) class Humanoid(Mob): def init(self): super(Humanoid,self).init() self.name = "humanoid" self.nodes = { } self.bodyparts = ['head', 'neck', 'chest', 'back', 'left hand', 'right hand', 'left leg', 'right leg'] for id in range(len(self.bodyparts)): self.add_node(self.bodyparts[id]) def was_collided(self, id): success = super(Humanoid,self).was_collided(id) ent = self.parent[id] if self.parent.is_instance(id, "mob"): success = self.check_damage() return success def get_injury(self,node): node = self.get_node(node) ents = self.parent.get_in(node) if ents: for id in self.parent.get_in(node): if self.parent.is_instance(id, "wound"): ent = self.parent.get_ent(id) return ent return None def get_injury_amount(self,node): node = self.get_node(node) total = 0 ents = self.parent.get_in(node) if ents: for id in self.parent.get_in(node): if self.parent.is_instance(id, "wound"): ent = self.parent[id] total += ent.damage return total def get_injuries(self): total = 0 for part in self.bodyparts: total += self.get_injury_amount(part) return total def get_node(self,name): if name not in self.nodes: raise IDNotAssignedError return self.nodes[name] def get_nodes(self): return self.nodes def add_node(self,name): for ent in self: if self.parent[ent].name == name: self.nodes[name] = ent return ent_id = self.parent.add_entity('bodypart') self.parent.ent_equip(self.id, ent_id) self.parent[ent_id].name = name self.nodes[name] = ent_id def deal_damage(self, amount, target=None): if not target: target = random.choice(self.bodyparts) if not self.get_injury(target): wound = self.parent.add_entity('wound') self.parent[wound].set_damage(amount) self.parent.set_parent(wound,self.get_node(target)) else: wound = self.get_injury(target) wound.set_damage(wound.damage + amount) return self.check_damage() def check_damage(self): amount = self.get_injuries() if amount >= 100: self.die() return True return False def update(self): self.check_damage() class Player(Humanoid): """Simple player class.""" def init(self): super(Player,self).init() self.char = 'player' self.fgcol = (255,255,255) self.name = "Player" self.pickup_queue = [ ] self.inventory = self.nodes['right hand'] def add_pickup(self, obj): self.pickup_queue.append(obj) def handle_pickups(self): if self.pickup_queue: s = "You lift the " for ent in self.pickup_queue: self.parent.set_parent(ent,self.inventory) s += self.parent[ent].name +', ' self.parent.post_message(s[:-2]+".") self.pickup_queue = [ ] def can_lift(self): #if not self.parent.get_in(self.inventory): # return True #return False return True def finished_dropping(self, id, success_value): super(Player,self).finished_dropping(id, success_value) if success_value: if self.inventory == id: self.inventory = self.nodes['right hand'] self.parent.post_message("You drop the "+self.parent.get_name(id)+".") def finished_lifting(self, id, success_value): super(Player,self).finished_lifting(id, success_value) if self.parent.is_instance(id, "item"): if self.can_lift() and success_value: self.add_pickup(id) def collide(self, id): super(Player,self).collide(id) if self.parent.is_instance(id, "mob"): ent = self.parent[id] self.parent.post_message("You slice at the "+self.parent.get_name(id)+'.') def finished_colliding(self, id, success_value): super(Player,self).finished_colliding(id, success_value) if success_value: if self.parent.is_instance(id, "level_end"): pass #to add level end code here elif self.parent.is_instance(id, "game_end"): pass #to add game end code here elif self.parent.is_instance(id, "mob"): self.parent.post_message("You kill the "+self.parent.get_name(id)+".") #to add enemy kill code here (xp?) else: if self.parent.is_instance(id, "door"): self.parent.post_message("You open the "+self.parent.get_name(id)+".") def die(self): if not self.dead: self.dead = 1 self.parent.set_parent(self.id,0) def update(self): super(Player,self).update() self.handle_pickups() class IDNotAssignedError(Exception): """Raised when an entity was asked to give its ID, but it has none assigned.""" pass
import pytest import sqlalchemy as sa from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy_utils.relationships import select_correlated_expression @pytest.fixture def group_user_tbl(Base): return sa.Table( 'group_user', Base.metadata, sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')), sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id')) ) @pytest.fixture def group_tbl(Base): class Group(Base): __tablename__ = 'group' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) return Group @pytest.fixture def friendship_tbl(Base): return sa.Table( 'friendships', Base.metadata, sa.Column( 'friend_a_id', sa.Integer, sa.ForeignKey('user.id'), primary_key=True ), sa.Column( 'friend_b_id', sa.Integer, sa.ForeignKey('user.id'), primary_key=True ) ) @pytest.fixture def User(Base, group_user_tbl, friendship_tbl): class User(Base): __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) groups = sa.orm.relationship( 'Group', secondary=group_user_tbl, backref='users' ) # this relationship is used for persistence friends = sa.orm.relationship( 'User', secondary=friendship_tbl, primaryjoin=id == friendship_tbl.c.friend_a_id, secondaryjoin=id == friendship_tbl.c.friend_b_id, ) friendship_union = ( sa.select([ friendship_tbl.c.friend_a_id, friendship_tbl.c.friend_b_id ]).union( sa.select([ friendship_tbl.c.friend_b_id, friendship_tbl.c.friend_a_id] ) ).alias() ) User.all_friends = sa.orm.relationship( 'User', secondary=friendship_union, primaryjoin=User.id == friendship_union.c.friend_a_id, secondaryjoin=User.id == friendship_union.c.friend_b_id, viewonly=True, order_by=User.id ) return User @pytest.fixture def Category(Base, group_user_tbl, friendship_tbl): class Category(Base): __tablename__ = 'category' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) created_at = sa.Column(sa.DateTime) parent_id = sa.Column(sa.Integer, sa.ForeignKey('category.id')) parent = sa.orm.relationship( 'Category', backref='subcategories', remote_side=[id], order_by=id ) return Category @pytest.fixture def Article(Base, Category, User): class Article(Base): __tablename__ = 'article' id = sa.Column('_id', sa.Integer, primary_key=True) name = sa.Column(sa.String) name_synonym = sa.orm.synonym('name') @hybrid_property def name_upper(self): return self.name.upper() if self.name else None @name_upper.expression def name_upper(cls): return sa.func.upper(cls.name) content = sa.Column(sa.String) category_id = sa.Column(sa.Integer, sa.ForeignKey(Category.id)) category = sa.orm.relationship(Category, backref='articles') author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id)) author = sa.orm.relationship( User, primaryjoin=author_id == User.id, backref='authored_articles' ) owner_id = sa.Column(sa.Integer, sa.ForeignKey(User.id)) owner = sa.orm.relationship( User, primaryjoin=owner_id == User.id, backref='owned_articles' ) return Article @pytest.fixture def Comment(Base, Article, User): class Comment(Base): __tablename__ = 'comment' id = sa.Column(sa.Integer, primary_key=True) content = sa.Column(sa.String) article_id = sa.Column(sa.Integer, sa.ForeignKey(Article.id)) article = sa.orm.relationship(Article, backref='comments') author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id)) author = sa.orm.relationship(User, backref='comments') Article.comment_count = sa.orm.column_property( sa.select([sa.func.count(Comment.id)]) .where(Comment.article_id == Article.id) .correlate_except(Article) ) return Comment @pytest.fixture def model_mapping(Article, Category, Comment, group_tbl, User): return { 'articles': Article, 'categories': Category, 'comments': Comment, 'groups': group_tbl, 'users': User } @pytest.fixture def init_models(Article, Category, Comment, group_tbl, User): pass @pytest.fixture def dataset( session, User, group_tbl, Article, Category, Comment ): group = group_tbl(name='Group 1') group2 = group_tbl(name='Group 2') user = User(id=1, name='User 1', groups=[group, group2]) user2 = User(id=2, name='User 2') user3 = User(id=3, name='User 3', groups=[group]) user4 = User(id=4, name='User 4', groups=[group2]) user5 = User(id=5, name='User 5') user.friends = [user2] user2.friends = [user3, user4] user3.friends = [user5] article = Article( name='Some article', author=user, owner=user2, category=Category( id=1, name='Some category', subcategories=[ Category( id=2, name='Subcategory 1', subcategories=[ Category( id=3, name='Subsubcategory 1', subcategories=[ Category( id=5, name='Subsubsubcategory 1', ), Category( id=6, name='Subsubsubcategory 2', ) ] ) ] ), Category(id=4, name='Subcategory 2'), ] ), comments=[ Comment( content='Some comment', author=user ) ] ) session.add(user3) session.add(user4) session.add(article) session.commit() @pytest.mark.usefixtures('dataset', 'postgresql_dsn') class TestSelectCorrelatedExpression(object): @pytest.mark.parametrize( ('model_key', 'related_model_key', 'path', 'result'), ( ( 'categories', 'categories', 'subcategories', [ (1, 2), (2, 1), (3, 2), (4, 0), (5, 0), (6, 0) ] ), ( 'articles', 'comments', 'comments', [ (1, 1), ] ), ( 'users', 'groups', 'groups', [ (1, 2), (2, 0), (3, 1), (4, 1), (5, 0) ] ), ( 'users', 'users', 'all_friends', [ (1, 1), (2, 3), (3, 2), (4, 1), (5, 1) ] ), ( 'users', 'users', 'all_friends.all_friends', [ (1, 3), (2, 2), (3, 3), (4, 3), (5, 2) ] ), ( 'users', 'users', 'groups.users', [ (1, 3), (2, 0), (3, 2), (4, 2), (5, 0) ] ), ( 'groups', 'articles', 'users.authored_articles', [ (1, 1), (2, 1), ] ), ( 'categories', 'categories', 'subcategories.subcategories', [ (1, 1), (2, 2), (3, 0), (4, 0), (5, 0), (6, 0) ] ), ( 'categories', 'categories', 'subcategories.subcategories.subcategories', [ (1, 2), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0) ] ), ) ) def test_returns_correct_results( self, session, model_mapping, model_key, related_model_key, path, result ): model = model_mapping[model_key] alias = sa.orm.aliased(model_mapping[related_model_key]) aggregate = select_correlated_expression( model, sa.func.count(sa.distinct(alias.id)), path, alias ) query = session.query( model.id, aggregate.label('count') ).order_by(model.id) assert query.all() == result def test_with_non_aggregate_function( self, session, User, Article ): aggregate = select_correlated_expression( Article, sa.func.json_build_object('name', User.name), 'comments.author', User ) query = session.query( Article.id, aggregate.label('author_json') ).order_by(Article.id) result = query.all() assert result == [ (1, {'name': 'User 1'}) ]
# Re-aligner small RNA sequence from SAM/BAM file (miRBase annotation) from __future__ import print_function import os.path as op import re import shutil import pandas as pd import pysam import argparse from seqcluster.libs import do from seqcluster.libs.utils import file_exists import seqcluster.libs.logger as mylog from seqcluster.install import _get_miraligner from seqcluster.seqbuster.snps import create_vcf from seqcluster.collapse import collapse_fastq from seqcluster.seqbuster.realign import * from mirtop.gff import reader logger = mylog.getLogger(__name__) def _download_mirbase(args, version="CURRENT"): """ Download files from mirbase """ if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna def _make_unique(name, idx): """Make name unique in case only counts there""" p = re.compile(".[aA-zZ]+_x[0-9]+") if p.match(name): tags = name[1:].split("_x") return ">%s_%s_x%s" % (tags[0], idx, tags[1]) return name.replace("@", ">") def _filter_seqs(fn): """Convert names of sequences to unique ids""" out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: line = in_handle.readline() while line: if line.startswith("@") or line.startswith(">"): fixed_name = _make_unique(line.strip(), idx) seq = in_handle.readline().strip() counts = _get_freq(fixed_name) if len(seq) < 26 and (counts > 1 or counts == 0): idx += 1 print(fixed_name, file=out_handle, end="\n") print(seq, file=out_handle, end="\n") if line.startswith("@"): in_handle.readline() in_handle.readline() line = in_handle.readline() return out_file def _convert_to_fasta(fn): out_file = op.splitext(fn)[0] + ".fa" with open(out_file, 'w') as out_handle: with open(fn) as in_handle: line = in_handle.readline() while line: if line.startswith("@"): seq = in_handle.readline() _ = in_handle.readline() qual = in_handle.readline() elif line.startswith(">"): seq = in_handle.readline() count = 2 if line.find("_x"): count = int(line.strip().split("_x")[1]) if count > 1: print(">%s" % line.strip()[1:], file=out_handle, end="") print(seq.strip(), file=out_handle, end="") line = in_handle.readline() return out_file def _get_pos(string): name = string.split(":")[0][1:] pos = string.split(":")[1][:-1].split("-") return name, map(int, pos) def _read_mature(matures, sps): mature = defaultdict(dict) with open(matures) as in_handle: for line in in_handle: if line.startswith(">") and line.find(sps) > -1: name = line.strip().replace(">", " ").split() mir5p = _get_pos(name[2]) mature[name[0]] = {mir5p[0]: mir5p[1]} if len(name) > 3: mir3p = _get_pos(name[3]) mature[name[0]].update({mir3p[0]: mir3p[1]}) return mature def _read_precursor(precursor, sps): """ Load precursor file for that species """ hairpin = defaultdict(str) name = None with open(precursor) as in_handle: for line in in_handle: if line.startswith(">"): if hairpin[name]: hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" name = line.strip().replace(">", " ").split()[0] else: hairpin[name] += line.strip() hairpin[name] = hairpin[name] + "NNNNNNNNNNNN" return hairpin def _read_gtf(gtf): """ Load GTF file with precursor positions on genome """ if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db def _coord(sequence, start, mirna, precursor, iso): """ Define t5 and t3 isomirs """ dif = abs(mirna[0] - start) if start < mirna[0]: iso.t5 = sequence[:dif].upper() elif start > mirna[0]: iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower() elif start == mirna[0]: iso.t5 = "NA" if dif > 4: logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format())) return None end = start + (len(sequence) - len(iso.add)) - 1 dif = abs(mirna[1] - end) if iso.add: sequence = sequence[:-len(iso.add)] # if dif > 3: # return None if end > mirna[1]: iso.t3 = sequence[-dif:].upper() elif end < mirna[1]: iso.t3 = precursor[mirna[1] - dif:mirna[1]].lower() elif end == mirna[1]: iso.t3 = "NA" if dif > 4: logger.debug("end > 3 %s %s %s %s %s" % (len(sequence), end, dif, mirna, iso.format())) return None logger.debug("%s %s %s %s %s %s" % (start, len(sequence), end, dif, mirna, iso.format())) return True def _annotate(reads, mirbase_ref, precursors): """ Using SAM/BAM coordinates, mismatches and realign to annotate isomiRs """ for r in reads: for p in reads[r].precursors: start = reads[r].precursors[p].start + 1 # convert to 1base end = start + len(reads[r].sequence) for mature in mirbase_ref[p]: mi = mirbase_ref[p][mature] is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p]) logger.debug(("{r} {p} {start} {is_iso} {mature} {mi} {mature_s}").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals())) if is_iso: reads[r].precursors[p].mirna = mature break return reads def _realign(seq, precursor, start): """ The actual fn that will realign the sequence """ error = set() pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]] for pos in range(0, len(seq)): if seq[pos] != precursor[(start + pos)]: error.add(pos) subs, add = [], [] for e in error: if e < len(seq) - 3: subs.append([e, seq[e], precursor[start + e]]) pattern, error_add = [], [] for e in range(len(seq) - 3, len(seq)): if e in error: pattern.append(1) error_add.append(e) else: pattern.append(0) for p in pattern_addition: if pattern == p: add = seq[error_add[0]:] break if not add and error_add: for e in error_add: subs.append([e, seq[e], precursor[start + e]]) return subs, add def _clean_hits(reads): """ Select only best matches """ new_reads = defaultdict(realign) for r in reads: world = {} sc = 0 for p in reads[r].precursors: world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence)) if sc < world[p]: sc = world[p] new_reads[r] = reads[r] for p in world: logger.debug("score %s %s %s" % (r, p, world[p])) if sc != world[p]: logger.debug("remove %s %s %s" % (r, p, world[p])) new_reads[r].remove_precursor(p) return new_reads def _sort_by_name(bam_fn): """ sort bam file by name sequence """ def _sam_to_bam(bam_fn): if bam_fn.endswith("bam"): bam_out = "%s.bam" % os.path.splitext(bam_fn)[0] cmd = "samtools view -Sbh {bam_fn} -o {bam_out}" do.run(cmd) return bam_out return bam_fn def _read_bam(bam_fn, precursors): """ read bam file and perform realignment of hits """ mode = "r" if bam_fn.endswith("sam") else "rb" handle = pysam.Samfile(bam_fn, mode) reads = defaultdict(realign) for line in handle: chrom = handle.getrname(line.reference_id) # print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom)) query_name = line.query_name if query_name not in reads: reads[query_name].sequence = line.query_sequence iso = isomir() iso.align = line iso.start = line.reference_start iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start) reads[query_name].set_precursor(chrom, iso) reads = _clean_hits(reads) return reads def _collapse_fastq(in_fn): """ collapse reads into unique sequences """ args = argparse.Namespace() args.fastq = in_fn args.minimum = 1 args.out = op.dirname(in_fn) return collapse_fastq(args) def _read_pyMatch(fn, precursors): """ read pyMatch file and perform realignment of hits """ with open(fn) as handle: reads = defaultdict(realign) for line in handle: query_name, seq, chrom, reference_start, end, mism, add = line.split() reference_start = int(reference_start) # chrom = handle.getrname(cols[1]) # print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom)) if query_name not in reads: reads[query_name].sequence = seq iso = isomir() iso.align = line iso.start = reference_start iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start) logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add)) if len(iso.subs) > 1: continue reads[query_name].set_precursor(chrom, iso) reads = _clean_hits(reads) return reads def _parse_mut(subs): """ Parse mutation tag from miraligner output """ if subs!="0": subs = [[subs.replace(subs[-2:], ""),subs[-2], subs[-1]]] return subs def _read_miraligner(fn): """Read ouput of miraligner and create compatible output.""" reads = defaultdict(realign) with open(fn) as in_handle: in_handle.readline() for line in in_handle: cols = line.strip().split("\t") iso = isomir() query_name, seq = cols[1], cols[0] chrom, reference_start = cols[-2], cols[3] iso.mirna = cols[3] subs, add, iso.t5, iso.t3 = cols[6:10] if query_name not in reads: reads[query_name].sequence = seq iso.align = line iso.start = reference_start iso.subs, iso.add = _parse_mut(subs), add logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add)) reads[query_name].set_precursor(chrom, iso) return reads def _cmd_miraligner(fn, out_file, species, hairpin, out): """ Run miraligner for miRNA annotation """ tool = _get_miraligner() path_db = op.dirname(op.abspath(hairpin)) cmd = "{tool} -freq -i {fn} -o {out_file} -s {species} -db {path_db} -sub 1 -trim 3 -add 3" if not file_exists(out_file): logger.info("Running miraligner with %s" % fn) do.run(cmd.format(**locals()), "miraligner with %s" % fn) shutil.move(out_file + ".mirna", out_file) return out_file def _mirtop(out_files, hairpin, gff3, species, out): """ Convert miraligner to mirtop format """ args = argparse.Namespace() args.hairpin = hairpin args.sps = species args.gtf = gff3 args.add_extra = True args.files = out_files args.format = "seqbuster" args.out_format = "gff" args.out = out reader(args) def _get_freq(name): """ Check if name read contains counts (_xNumber) """ try: counts = int(name.split("_x")[1]) except: return 0 return counts def _tab_output(reads, out_file, sample): seen = set() lines = [] lines_pre = [] seen_ann = {} dt = None with open(out_file, 'w') as out_handle: print("name\tseq\tfreq\tchrom\tstart\tend\tsubs\tadd\tt5\tt3\ts5\ts3\tDB\tprecursor\thits", file=out_handle, end="") for (r, read) in reads.items(): hits = set() [hits.add(mature.mirna) for mature in read.precursors.values() if mature.mirna] hits = len(hits) for (p, iso) in read.precursors.items(): if len(iso.subs) > 3 or not iso.mirna: continue if (r, iso.mirna) not in seen: seen.add((r, iso.mirna)) chrom = iso.mirna if not chrom: chrom = p count = _get_freq(r) seq = reads[r].sequence if iso.get_score(len(seq)) < 1: continue if iso.subs: iso.subs = [] if "N" in iso.subs[0] else iso.subs annotation = "%s:%s" % (chrom, iso.format(":")) res = ("{seq}\t{r}\t{count}\t{chrom}\tNA\tNA\t{format}\tNA\tNA\tmiRNA\t{p}\t{hits}").format(format=iso.format().replace("NA", "0"), **locals()) if annotation in seen_ann and seq.find("N") < 0 and seen_ann[annotation].split("\t")[0].find("N") < 0: raise ValueError("Same isomir %s from different sequence: \n%s and \n%s" % (annotation, res, seen_ann[annotation])) seen_ann[annotation] = res lines.append([annotation, chrom, count, sample, hits]) lines_pre.append([annotation, chrom, p, count, sample, hits]) print(res, file=out_handle, end="") if lines: dt = pd.DataFrame(lines) dt.columns = ["isomir", "chrom", "counts", "sample", "hits"] dt = dt[dt['hits']>0] dt = dt.loc[:, "isomir":"sample"] dt = dt.groupby(['isomir', 'chrom', 'sample'], as_index=False).sum() dt.to_csv(out_file + "_summary") dt_pre = pd.DataFrame(lines_pre) dt_pre.columns = ["isomir", "mature", "chrom", "counts", "sample", "hits"] dt_pre = dt_pre[dt_pre['hits']==1] dt_pre = dt_pre.loc[:, "isomir":"sample"] dt_pre = dt_pre.groupby(['isomir', 'chrom', 'mature', 'sample'], as_index=False).sum() return out_file, dt, dt_pre return None def _merge(dts): """ merge multiple samples in one matrix """ df = pd.concat(dts) ma = df.pivot(index='isomir', columns='sample', values='counts') ma_mirna = ma ma = ma.fillna(0) ma_mirna['mirna'] = [m.split(":")[0] for m in ma.index.values] ma_mirna = ma_mirna.groupby(['mirna']).sum() ma_mirna = ma_mirna.fillna(0) return ma, ma_mirna def _create_counts(out_dts, out_dir): """Summarize results into single files.""" ma, ma_mirna = _merge(out_dts) out_ma = op.join(out_dir, "counts.tsv") out_ma_mirna = op.join(out_dir, "counts_mirna.tsv") ma.to_csv(out_ma, sep="\t") ma_mirna.to_csv(out_ma_mirna, sep="\t") return out_ma_mirna, out_ma def miraligner(args): """ Realign BAM hits to miRBAse to get better accuracy and annotation """ hairpin, mirna = _download_mirbase(args) precursors = _read_precursor(args.hairpin, args.sps) matures = _read_mature(args.mirna, args.sps) gtf = _read_gtf(args.gtf) out_dts = [] out_files = [] for bam_fn in args.files: sample = op.splitext(op.basename(bam_fn))[0] logger.info("Reading %s" % bam_fn) if bam_fn.endswith("bam") or bam_fn.endswith("sam"): bam_fn = _sam_to_bam(bam_fn) bam_sort_by_n = op.splitext(bam_fn)[0] + "_sort" pysam.sort("-n", bam_fn, bam_sort_by_n) reads = _read_bam(bam_sort_by_n + ".bam", precursors) elif bam_fn.endswith("fasta") or bam_fn.endswith("fa") or \ bam_fn.endswith("fastq"): if args.collapse: bam_fn = _collapse_fastq(bam_fn) out_file = op.join(args.out, sample + ".premirna") bam_fn = _filter_seqs(bam_fn) if args.miraligner: _cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out) reads = _read_miraligner(out_file) out_files.append(out_file) else: raise ValueError("Format not recognized.") if args.miraligner: _mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out) if not args.miraligner: reads = _annotate(reads, matures, precursors) out_file = op.join(args.out, sample + ".mirna") out_file, dt, dt_pre = _tab_output(reads, out_file, sample) try: vcf_file = op.join(args.out, sample + ".vcf") if not file_exists(vcf_file): # if True: create_vcf(dt_pre, matures, gtf, vcf_file) try: import vcf vcf.Reader(filename=vcf_file) except Exception as e: logger.warning(e.__doc__) logger.warning(e) except Exception as e: # traceback.print_exc() logger.warning(e.__doc__) logger.warning(e) if isinstance(dt, pd.DataFrame): out_dts.append(dt) if out_dts: _create_counts(out_dts, args.out) else: print("No files analyzed!")
from __future__ import unicode_literals import json import warnings from django import forms from django.conf import settings from django.contrib.admin.utils import ( display_for_field, flatten_fieldsets, help_text_for_field, label_for_field, lookup_field, ) from django.core.exceptions import ObjectDoesNotExist from django.db.models.fields.related import ManyToManyRel from django.forms.utils import flatatt from django.template.defaultfilters import capfirst, linebreaksbr from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning from django.utils.encoding import force_text, smart_text from django.utils.html import conditional_escape, format_html from django.utils.safestring import mark_safe from django.utils.translation import ugettext, ugettext_lazy as _ ACTION_CHECKBOX_NAME = '_selected_action' class ActionForm(forms.Form): action = forms.ChoiceField(label=_('Action:')) select_across = forms.BooleanField( label='', required=False, initial=0, widget=forms.HiddenInput({'class': 'select-across'}), ) checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False) class AdminForm(object): def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None): self.form, self.fieldsets = form, fieldsets self.prepopulated_fields = [{ 'field': form[field_name], 'dependencies': [form[f] for f in dependencies] } for field_name, dependencies in prepopulated_fields.items()] self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for name, options in self.fieldsets: yield Fieldset( self.form, name, readonly_fields=self.readonly_fields, model_admin=self.model_admin, **options ) @property def media(self): media = self.form.media for fs in self: media = media + fs.media return media class Fieldset(object): def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(), description=None, model_admin=None): self.form = form self.name, self.fields = name, fields self.classes = ' '.join(classes) self.description = description self.model_admin = model_admin self.readonly_fields = readonly_fields @property def media(self): if 'collapse' in self.classes: extra = '' if settings.DEBUG else '.min' js = [ 'vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'collapse%s.js' % extra, ] return forms.Media(js=['admin/js/%s' % url for url in js]) return forms.Media() def __iter__(self): for field in self.fields: yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class Fieldline(object): def __init__(self, form, field, readonly_fields=None, model_admin=None): self.form = form # A django.forms.Form instance if not hasattr(field, "__iter__") or isinstance(field, six.text_type): self.fields = [field] else: self.fields = field self.has_visible_field = not all( field in self.form.fields and self.form.fields[field].widget.is_hidden for field in self.fields ) self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for i, field in enumerate(self.fields): if field in self.readonly_fields: yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin) else: yield AdminField(self.form, field, is_first=(i == 0)) def errors(self): return mark_safe( '\n'.join( self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields ).strip('\n') ) class AdminField(object): def __init__(self, form, field, is_first): self.field = form[field] # A django.forms.BoundField instance self.is_first = is_first # Whether this field is first on the line self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput) self.is_readonly = False def label_tag(self): classes = [] contents = conditional_escape(force_text(self.field.label)) if self.is_checkbox: classes.append('vCheckboxLabel') if self.field.field.required: classes.append('required') if not self.is_first: classes.append('inline') attrs = {'class': ' '.join(classes)} if classes else {} # checkboxes should not have a label suffix as the checkbox appears # to the left of the label. return self.field.label_tag( contents=mark_safe(contents), attrs=attrs, label_suffix='' if self.is_checkbox else None, ) def errors(self): return mark_safe(self.field.errors.as_ul()) class AdminReadonlyField(object): def __init__(self, form, field, is_first, model_admin=None): # Make self.field look a little bit like a field. This means that # {{ field.name }} must be a useful class name to identify the field. # For convenience, store other field-related data here too. if callable(field): class_name = field.__name__ if field.__name__ != '<lambda>' else '' else: class_name = field if form._meta.labels and class_name in form._meta.labels: label = form._meta.labels[class_name] else: label = label_for_field(field, form._meta.model, model_admin) if form._meta.help_texts and class_name in form._meta.help_texts: help_text = form._meta.help_texts[class_name] else: help_text = help_text_for_field(class_name, form._meta.model) self.field = { 'name': class_name, 'label': label, 'help_text': help_text, 'field': field, } self.form = form self.model_admin = model_admin self.is_first = is_first self.is_checkbox = False self.is_readonly = True self.empty_value_display = model_admin.get_empty_value_display() def label_tag(self): attrs = {} if not self.is_first: attrs["class"] = "inline" label = self.field['label'] return format_html('<label{}>{}:</label>', flatatt(attrs), capfirst(force_text(label))) def contents(self): from django.contrib.admin.templatetags.admin_list import _boolean_icon field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin try: f, attr, value = lookup_field(field, obj, model_admin) except (AttributeError, ValueError, ObjectDoesNotExist): result_repr = self.empty_value_display else: if f is None: boolean = getattr(attr, "boolean", False) if boolean: result_repr = _boolean_icon(value) else: if hasattr(value, "__html__"): result_repr = value else: result_repr = smart_text(value) if getattr(attr, "allow_tags", False): warnings.warn( "Deprecated allow_tags attribute used on %s. " "Use django.utils.safestring.format_html(), " "format_html_join(), or mark_safe() instead." % attr, RemovedInDjango20Warning ) result_repr = mark_safe(value) else: result_repr = linebreaksbr(result_repr) else: if isinstance(f.remote_field, ManyToManyRel) and value is not None: result_repr = ", ".join(map(six.text_type, value.all())) else: result_repr = display_for_field(value, f, self.empty_value_display) result_repr = linebreaksbr(result_repr) return conditional_escape(result_repr) class InlineAdminFormSet(object): """ A wrapper around an inline formset for use in the admin system. """ def __init__(self, inline, formset, fieldsets, prepopulated_fields=None, readonly_fields=None, model_admin=None): self.opts = inline self.formset = formset self.fieldsets = fieldsets self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields if prepopulated_fields is None: prepopulated_fields = {} self.prepopulated_fields = prepopulated_fields self.classes = ' '.join(inline.classes) if inline.classes else '' def __iter__(self): for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()): view_on_site_url = self.opts.get_view_on_site_url(original) yield InlineAdminForm( self.formset, form, self.fieldsets, self.prepopulated_fields, original, self.readonly_fields, model_admin=self.opts, view_on_site_url=view_on_site_url, ) for form in self.formset.extra_forms: yield InlineAdminForm( self.formset, form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts, ) yield InlineAdminForm( self.formset, self.formset.empty_form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts, ) def fields(self): fk = getattr(self.formset, "fk", None) for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)): if fk and fk.name == field_name: continue if field_name in self.readonly_fields: yield { 'label': label_for_field(field_name, self.opts.model, self.opts), 'widget': {'is_hidden': False}, 'required': False, 'help_text': help_text_for_field(field_name, self.opts.model), } else: form_field = self.formset.empty_form.fields[field_name] label = form_field.label if label is None: label = label_for_field(field_name, self.opts.model, self.opts) yield { 'label': label, 'widget': form_field.widget, 'required': form_field.required, 'help_text': form_field.help_text, } def inline_formset_data(self): verbose_name = self.opts.verbose_name return json.dumps({ 'name': '#%s' % self.formset.prefix, 'options': { 'prefix': self.formset.prefix, 'addText': ugettext('Add another %(verbose_name)s') % { 'verbose_name': capfirst(verbose_name), }, 'deleteText': ugettext('Remove'), } }) @property def media(self): media = self.opts.media + self.formset.media for fs in self: media = media + fs.media return media class InlineAdminForm(AdminForm): """ A wrapper around an inline form for use in the admin system. """ def __init__(self, formset, form, fieldsets, prepopulated_fields, original, readonly_fields=None, model_admin=None, view_on_site_url=None): self.formset = formset self.model_admin = model_admin self.original = original self.show_url = original and view_on_site_url is not None self.absolute_url = view_on_site_url super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin) def __iter__(self): for name, options in self.fieldsets: yield InlineFieldset( self.formset, self.form, name, self.readonly_fields, model_admin=self.model_admin, **options ) def needs_explicit_pk_field(self): # Auto fields are editable (oddly), so need to check for auto or non-editable pk if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable: return True # Also search any parents for an auto field. (The pk info is propagated to child # models so that does not need to be checked in parents.) for parent in self.form._meta.model._meta.get_parent_list(): if parent._meta.has_auto_field: return True return False def pk_field(self): return AdminField(self.form, self.formset._pk_field.name, False) def fk_field(self): fk = getattr(self.formset, "fk", None) if fk: return AdminField(self.form, fk.name, False) else: return "" def deletion_field(self): from django.forms.formsets import DELETION_FIELD_NAME return AdminField(self.form, DELETION_FIELD_NAME, False) def ordering_field(self): from django.forms.formsets import ORDERING_FIELD_NAME return AdminField(self.form, ORDERING_FIELD_NAME, False) class InlineFieldset(Fieldset): def __init__(self, formset, *args, **kwargs): self.formset = formset super(InlineFieldset, self).__init__(*args, **kwargs) def __iter__(self): fk = getattr(self.formset, "fk", None) for field in self.fields: if fk and fk.name == field: continue yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class AdminErrorList(forms.utils.ErrorList): """ Stores all errors for the form/formsets in an add/change stage view. """ def __init__(self, form, inline_formsets): super(AdminErrorList, self).__init__() if form.is_bound: self.extend(form.errors.values()) for inline_formset in inline_formsets: self.extend(inline_formset.non_form_errors()) for errors_in_inline_form in inline_formset.errors: self.extend(errors_in_inline_form.values())
# Copyright 2012-2016 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it # under the terms of the standard MIT license. See COPYING for more details. sizeof_workid = 8 import base58 as _base58 from binascii import b2a_hex as _b2a_hex from hashlib import sha256 as _sha256 from struct import pack as _pack from time import time as _time from blktemplate import _Transaction, request as _request MAX_BLOCK_VERSION = 4 coinbase_size_limit = 100 def _dblsha256(data): return _sha256(_sha256(data).digest()).digest() def init_generation3(tmpl, script, override_cb=False): if (not tmpl.cbtxn is None) and not (override_cb and ('generate' in tmpl.mutations)): return (0, False) if len(script) >= 0xfd: return (0, True) sh = b'' h = tmpl.height while h > 127: sh += _pack('<B', h & 0xff) h >>= 8 sh += _pack('<B', h) sh = _pack('<B', len(sh)) + sh if getattr(tmpl, 'auxs', None): auxcat = b'' for aux in tmpl.auxs.values(): auxcat += aux if len(auxcat): sh += _pack('<B', len(auxcat)) + auxcat if len(sh) > coinbase_size_limit: return (0, True) data = b'' data += b"\x01\0\0\0" # txn ver data += b"\x01" # input count data += b"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" # prevout data += b"\xff\xff\xff\xff" # index (-1) data += _pack('<B', len(sh)) # scriptSig length data += sh data += b"\xff\xff\xff\xff" # sequence data += b"\x01" # output count data += _pack('<Q', tmpl.cbvalue) data += _pack('<B', len(script)) data += script data += b'\0\0\0\0' # lock time if tmpl.txns_datasz + len(data) > tmpl.sizelimit: return (0, True) txn = _Transaction(None) txn.data = data tmpl.cbtxn = txn tmpl.mutations.add('coinbase/append') tmpl.mutations.add('coinbase') tmpl.mutations.add('generate') return (tmpl.cbvalue, True) init_generation2 = init_generation3 def init_generation(tmpl, script, override_cb=False): return init_generation2(tmpl, script, override_cb)[0] def _hash_transactions(tmpl): for txn in tmpl.txns: if hasattr(txn, 'hash_'): continue txn.hash_ = _dblsha256(txn.data) return True def _build_merkle_branches(tmpl): if hasattr(tmpl, '_mrklbranch'): return True if not _hash_transactions(tmpl): return False branchcount = len(tmpl.txns).bit_length() branches = [] merklehashes = [None] + [txn.hash_ for txn in tmpl.txns] while len(branches) < branchcount: branches.append(merklehashes[1]) if len(merklehashes) % 2: merklehashes.append(merklehashes[-1]) merklehashes = [None] + [_dblsha256(merklehashes[i] + merklehashes[i + 1]) for i in range(2, len(merklehashes), 2)] tmpl._mrklbranch = branches return True def _build_merkle_root(tmpl, coinbase): if not _build_merkle_branches(tmpl): return None lhs = _dblsha256(coinbase) for rhs in tmpl._mrklbranch: lhs = _dblsha256(lhs + rhs) return lhs _cbScriptSigLen = 4 + 1 + 36 def _append_cb(tmpl, append, appended_at_offset = None): coinbase = tmpl.cbtxn.data # The following can be done better in both Python 2 and Python 3, but this way works with both origLen = ord(coinbase[_cbScriptSigLen:_cbScriptSigLen+1]) appendsz = len(append) if origLen > coinbase_size_limit - appendsz: return None if len(tmpl.cbtxn.data) + tmpl.txns_datasz + appendsz > tmpl.sizelimit: return None cbExtraNonce = _cbScriptSigLen + 1 + origLen if not appended_at_offset is None: appended_at_offset[0] = cbExtraNonce newLen = origLen + appendsz coinbase = coinbase[:_cbScriptSigLen] + chr(newLen).encode('ascii') + coinbase[_cbScriptSigLen+1:cbExtraNonce] + append + coinbase[cbExtraNonce:] return coinbase def append_coinbase_safe2(tmpl, append, extranoncesz = 0, merkle_only = False): if 'coinbase/append' not in tmpl.mutations and 'coinbase' not in tmpl.mutations: raise RuntimeError('Coinbase appending not allowed by template') datasz = len(tmpl.cbtxn.data) if extranoncesz == sizeof_workid: extranoncesz += 1 elif not merkle_only: if extranoncesz < sizeof_workid: extranoncesz = sizeof_workid availsz = coinbase_size_limit - extranoncesz - ord(tmpl.cbtxn.data[_cbScriptSigLen:_cbScriptSigLen+1]) current_blocksize = len(tmpl.cbtxn.data) + tmpl.txns_datasz if current_blocksize > tmpl.sizelimit: return 0 availsz2 = tmpl.sizelimit - current_blocksize if availsz2 < availsz: availsz = availsz2 if len(append) > availsz: return availsz newcb = _append_cb(tmpl, append) if newcb is None: raise RuntimeError('Append failed') return availsz append_coinbase_safe = append_coinbase_safe2 def _extranonce(tmpl, workid): coinbase = tmpl.cbtxn.data if not workid: return coinbase extradata = _pack('<Q', workid) coinbase = _append_cb(tmpl, extradata) return coinbase def _set_times(tmpl, usetime = None, out_expire = None, can_roll_ntime = False): time_passed = int(usetime - tmpl._time_rcvd) timehdr = tmpl.curtime + time_passed if (timehdr > tmpl.maxtime): timehdr = tmpl.maxtime return _pack('<I', timehdr) if not out_expire is None: out_expire[0] = tmpl.expires - time_passed - 1 if can_roll_ntime: # If the caller can roll the time header, we need to expire before reaching the maxtime maxtime_expire_limit = (tmpl.maxtime - timehdr) + 1 if out_expire[0] > maxtime_expire_limit: out_expire[0] = maxtime_expire_limit def _sample_data(tmpl, dataid): cbuf = _pack('<I', tmpl.version) cbuf += tmpl.prevblk cbtxndata = _extranonce(tmpl, dataid) if not cbtxndata: return None merkleroot = _build_merkle_root(tmpl, cbtxndata) if not merkleroot: return None cbuf += merkleroot cbuf += _pack('<I', tmpl.curtime) cbuf += tmpl.diffbits return cbuf def get_data(tmpl, usetime = None, out_expire = None): if usetime is None: usetime = _time() if ((not (time_left(tmpl, usetime) and work_left(tmpl))) and not tmpl.cbtxn is None): return (None, None) dataid = tmpl.next_dataid tmpl.next_dataid += 1 cbuf = _sample_data(tmpl, dataid) if cbuf is None: return (None, None) cbuf = cbuf[:68] + _set_times(tmpl, usetime, out_expire) + cbuf[68+4:] return (cbuf, dataid) def get_mdata(tmpl, usetime = None, out_expire = None, extranoncesz = sizeof_workid, can_roll_ntime = True): if usetime is None: usetime = _time() if not (True and time_left(tmpl, usetime) and (not tmpl.cbtxn is None) and _build_merkle_branches(tmpl) ): return None if extranoncesz == sizeof_workid: # Avoid overlapping with blkmk_get_data use extranoncesz += 1 cbuf = _pack('<I', tmpl.version) cbuf += tmpl.prevblk dummy = b'\0' * extranoncesz cbextranonceoffset = [None] cbtxn = _append_cb(tmpl, dummy, cbextranonceoffset) if cbtxn is None: return None cbuf += b'\0' * 0x20 cbuf += _set_times(tmpl, usetime, out_expire, can_roll_ntime) cbuf += tmpl.diffbits return (cbuf, cbtxn, cbextranonceoffset[0], tmpl._mrklbranch) def time_left(tmpl, nowtime = None): if nowtime is None: nowtime = _time() age = (nowtime - tmpl._time_rcvd) if age >= tmpl.expires: return 0 return tmpl.expires - age def work_left(tmpl): if not tmpl.version: return 0 if 'coinbase/append' not in tmpl.mutations and 'coinbase' not in tmpl.mutations: return 1 return 0xffffffffffffffff - tmpl.next_dataid def _varintEncode(n): if n < 0xfd: return _pack('<B', n) # NOTE: Technically, there are more encodings for numbers bigger than # 16-bit, but transaction counts can't be that high with version 2 Bitcoin # blocks return b'\xfd' + _pack('<H', n) def _assemble_submission2_internal(tmpl, data, extranonce, nonce, foreign): data = data[:76] data += _pack('!I', nonce) if foreign or ('submit/truncate' not in tmpl.mutations or extranonce): data += _varintEncode(1 + len(tmpl.txns)) # Essentially _extranonce if extranonce: data += _append_cb(tmpl, extranonce) else: data += tmpl.cbtxn.data if foreign or ('submit/coinbase' not in tmpl.mutations): for i in range(len(tmpl.txns)): data += tmpl.txns[i].data return _b2a_hex(data).decode('ascii') def _assemble_submission2(tmpl, data, extranonce, dataid, nonce, foreign): if dataid: if extranonce: raise RuntimeError('Cannot specify both extranonce and dataid') extranonce = _pack('<Q', workid) elif extranonce and len(extranonce) == sizeof_workid: # Avoid overlapping with blkmk_get_data use extranonce += b'\0' return _assemble_submission2_internal(tmpl, data, extranonce, nonce, foreign) def propose(tmpl, caps, foreign): jreq = _request(caps) jparams = jreq['params'][0] jparams['mode'] = 'proposal' if (not getattr(tmpl, 'workid', None) is None) and not foreign: jparams['workid'] = tmpl.workid dataid = 0 if 'coinbase/append' in tmpl.mutations or 'coinbase' in tmpl.mutations: dataid = 1 sdata = _sample_data(tmpl, dataid) blkhex = _assemble_submission2(tmpl, sdata, None, dataid, 0, foreign) jparams['data'] = blkhex return jreq def _submit(tmpl, data, extranonce, dataid, nonce, foreign): blkhex = _assemble_submission2(tmpl, data, extranonce, dataid, nonce, foreign) info = {} if (not getattr(tmpl, 'workid', None) is None) and not foreign: info['workid'] = tmpl.workid return { 'id': 0, 'method': 'submitblock', 'params': [ blkhex, info ] } def submit(tmpl, data, dataid, nonce, foreign=False): return _submit(tmpl, data, None, dataid, nonce, foreign) def submit_foreign(tmpl, data, dataid, nonce): return _submit(tmpl, data, None, dataid, nonce, True) def submitm(tmpl, data, extranonce, nonce, foreign=False): return _submit(tmpl, data, extranonce, None, nonce, foreign) def address_to_script(addr): addrbin = _base58.b58decode(addr, 25) if addrbin is None: raise RuntimeError('Invalid address') addrver = _base58.get_bcaddress_version(addr) if addrver == 0 or addrver == 111: # Bitcoin pubkey hash or Testnet pubkey hash return b'' + b'\x76' # OP_DUP + b'\xa9' # OP_HASH160 + b'\x14' # push 20 bytes + addrbin + b'\x88' # OP_EQUALVERIFY + b'\xac' # OP_CHECKSIG if addrver == 5 or addrver == 196: # Bitcoin script hash or Testnet script hash return b'' + b'\xa9' # OP_HASH160 + b'\x14' # push 20 bytes + addrbin + b'\x87' # OP_EQUAL raise RuntimeError('Invalid address version')
from . import moog from .star import Star import numpy as np import datetime import logging from scipy import interpolate import os from .config import * from .tools import read_csv from collections import OrderedDict from bokeh.plotting import * from bokeh.models import HoverTool logger = logging.getLogger(__name__) def get_all(Data, output_file, species_ids=None, reference=None, grid='odfnew', errors=False): print('------------------------------------------------------') print('Initializing ...') start_time = datetime.datetime.now() print('- Date and time: '+start_time.strftime('%d-%b-%Y, %H:%M:%S')) print('- Model atmospheres: '+grid) print('- Star data: '+Data.star_data_fname) print('- Line list: '+Data.lines_fname) if reference: print('- Reference star: '+reference) print('------------------------------------------------------') if reference: ref = Star(reference) ref.get_data_from(Data) if hasattr(ref, 'feh_model'): ##### ref.feh = getattr(ref, 'feh_model') ##### ref.get_model_atmosphere(grid) else: ref = None fout = open(output_file, 'w') header = 'id' if species_ids == None: species_codes = sorted(set(Data.lines['species'])) species_ids = getsp_ids(species_codes) print('"species_ids" not provided') print('Lines found for the following species: '+\ ','.join(species_ids)) print('') for species_id in species_ids: header += ','+species_id+',e_'+species_id+',n_'+species_id if reference: header += ',['+species_id+'],e_['+species_id+\ '],n_['+species_id+']' if errors: header += ',err_'+species_id fout.write(header + '\n') for star_id in Data.star_data['id']: line = star_id print('') print('*'*len(star_id)) print(star_id) print('*'*len(star_id)) s = Star(star_id) try: s.get_data_from(Data) if hasattr(s, 'feh_model'): s.feh = getattr(s, 'feh_model') s.get_model_atmosphere(grid) except: print('No data available') logger.warning('Could not get all the necessary data') line += ','*(len(species_ids)*2) if reference: line += ','*(len(species_ids)*2) fout.write(line+'\n') continue print('Using [Fe/H] = {0:6.3f} for the model atmosphere'.format(s.feh)) get_one(s, species_ids, ref, errors=errors) for species_id in species_ids: print('\n'+species_id+'\n'+'-'*len(species_id)) if not hasattr(s, species_id): print('No data available') logger.warning('There are no '+species_id+' abundances '+\ 'for this star') line += ',,,' if reference: line += ',,,' if errors: line += ',' continue mab = np.mean(getattr(s, species_id)['ab']) sab = np.std(getattr(s, species_id)['ab']) nab = len(getattr(s, species_id)['ab']) print("ABS = {0:6.3f} +/- {1:6.3f} , n = {2:.0f}".\ format(mab, sab, nab)) line += ',{0:.3f},{1:.3f},{2:.0f}'.format(mab, sab, nab) if reference: da = getattr(s, species_id)['difab'] da = np.array(da, dtype=np.float) #convert None to np.nan mda = np.ma.masked_array(da, np.isnan(da)) mdifab = np.mean(mda) sdifab = np.std(mda) ndifab = mda.count() print("DIF = {0:6.3f} +/- {1:6.3f} , n = {2:.0f}".\ format(mdifab, sdifab, ndifab)) line += ',{0:.3f},{1:.3f},{2:.0f}'.\ format(mdifab, sdifab, ndifab) if errors: print("ERR = {0:5.3f} (DIF)".\ format(getattr(s, species_id)['err_difab'])) line += ',{0:.3f}'.\ format(getattr(s, species_id)['err_difab']) else: mdifab = 0 if errors: print("ERR = {0:5.3f} (ABS)".\ format(getattr(s, species_id)['err_ab'])) line += ',{0:.3f}'.\ format(getattr(s, species_id)['err_ab']) print('') llhd1 = 'Wavelength ABS RES ' llhd2 = '---------- ----- ------' if reference: llhd1 += ' DIF RES ' llhd2 += ' ----- -----' print(llhd1+'\n'+llhd2) for wi, ab, difab in \ zip(getattr(s, species_id)['ww'], getattr(s, species_id)['ab'], getattr(s, species_id)['difab']): if reference and difab != None: print("{0:10.4f} {1:6.3f} {2:6.3f} {3:6.3f} {4:6.3f}".\ format(wi, ab, ab-mab, difab, difab-mdifab)) else: print("{0:10.4f} {1:6.3f} {2:6.3f}".\ format(wi, ab, ab-mab)) fout.write(line+'\n') fout.close() print('') print('------------------------------------------------------') end_time = datetime.datetime.now() print('- Date and time: '+end_time.strftime('%d-%b-%Y, %H:%M:%S')) delta_t = (end_time - start_time).seconds hours, remainder = divmod(delta_t, 3600) minutes, seconds = divmod(remainder, 60) print('- Time elapsed: %sH %sM %sS' % (hours, minutes, seconds)) print('Done!') print('------------------------------------------------------') print('') def get_one(Star, species_ids=None, Ref=object, silent=True, errors=False): logger.info('Working on: '+Star.name) if species_ids == None: species_codes = sorted(set(Star.linelist['species'])) species_ids = getsp_ids(species_codes) if not silent: print('"species_ids" not provided') print('Lines found for the following species: '+\ ','.join(species_ids)) print('') for species_id in species_ids: species = getsp(species_id) if not silent: print("*** Begin "+species_id+":") if species == None: logger.warning('Not doing calculations for: '+species_id) continue logger.info('Working on: '+species_id) moog.abfind(Star, species, species_id) if not hasattr(Star, species_id): logger.warning('Did not calculate '+species_id+' abundances') continue if species_id == 'OI': if not silent: print('777 nm oxygen abundances will be NLTE corrected') ao = [] for wx in [7771.94, 7774.16, 7775.39]: k = np.where(abs(Star.OI['ww']-wx) < 0.05) if len(k[0]) == 1: ao.append(np.mean(Star.OI['ab'][k])) else: ao.append(0) aon = nlte_triplet(Star.teff, Star.logg, Star.feh, ao, silent=silent) k= np.where(np.array(ao) > 0) getattr(Star, species_id)['ab'] = aon[k] getattr(Star, species_id)['ref'] = None if hasattr(Ref, 'name'): logger.info('Diferential analysis: '+Ref.name) if Star.name == Ref.name: logger.warning('Reference star object redefined!') Ref = Star if not hasattr(Ref, species_id): logger.info('Calculating reference star abundances: '+Ref.name) moog.abfind(Ref, species, species_id) if species_id == 'OI': if not silent: print('777 nm oxygen abundances will be NLTE '\ +'corrected (Reference)') ao = [] for wx in [7771.94, 7774.16, 7775.39]: k = np.where(abs(Ref.OI['ww']-wx) < 0.05) if len(k[0]) == 1: ao.append(np.mean(Ref.OI['ab'][k])) else: ao.append(0) aon = nlte_triplet(Ref.teff, Ref.logg, Ref.feh, ao, silent=silent) k= np.where(np.array(ao) > 0) getattr(Ref, species_id)['ab'] = aon[k] else: logger.info('Reference star has '+species_id+\ ' abundances computed already: '+Ref.name) ws = getattr(Star, species_id)['ww'] wr = getattr(Ref, species_id)['ww'] ww = np.intersect1d(ws, wr) k = [i for i, w in zip(range(len(ws)), ws) if w in ww] kr = [i for i, w in zip(range(len(wr)), wr) if w in ww] a = getattr(Star, species_id)['ab'][k] - \ getattr(Ref, species_id)['ab'][kr] ax, ix = [], 0 for wx in ws: if wx in ww: ax.append(a[ix]) ix += 1 else: ax.append(None) aa = np.array(ax, dtype=np.float) #convert None to np.nan maa = np.ma.masked_array(aa, np.isnan(aa)) getattr(Star, species_id)['difab'] = maa getattr(Star, species_id)['ref'] = Ref.name if not silent: ab = getattr(Star, species_id)['ab'] difab = getattr(Star, species_id)['difab'] aa = np.array(ab, dtype=np.float) #convert None to np.nan maa = np.ma.masked_array(aa, np.isnan(aa)) da = np.array(difab, dtype=np.float) #convert None to np.nan mda = np.ma.masked_array(da, np.isnan(da)) print("A({0}) = {1:6.3f} +/- {2:5.3f} (# of lines = {3})".\ format(species_id, np.mean(maa), np.std(maa), maa.count())) if hasattr(Ref, 'name'): print("[{0}/H] = {1:6.3f} +/- {2:5.3f} (# of lines = {3})".\ format(species_id, np.mean(mda), np.std(mda), mda.count())) if errors: error(Star, species_id, Ref=Ref, silent=silent) if not silent: print('---' + species_id + ' done') if not silent and len(species_ids) >= 1: print('All species completed') def error(Star_in, species_id, Ref=object, silent=True): s = Star() s.__dict__ = Star_in.__dict__.copy() if not silent: print('-----------------------------') print('Error propagation for '+species_id+':') try: Ref.model_atmosphere_grid dab = getattr(Star_in, species_id)['difab'] l2l_sct = np.std(dab)/np.sqrt(max([len(dab),2])-1) abx = 'difab' except: try: ab = getattr(Star_in, species_id)['ab'] l2l_sct = np.std(ab)/np.sqrt(max([len(ab),2])-1) abx = 'ab' except: logger.error('Must calculate abundances before errors') return None if hasattr(s, 'err_teff'): if s.err_teff > 0: s.teff += s.err_teff s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) ap = np.mean(getattr(s, species_id)[abx]) s.teff -= 2*s.err_teff s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) am = np.mean(getattr(s, species_id)[abx]) a_teff = abs(ap-am)/2. s.teff += s.err_teff else: a_teff = 0. else: a_teff = 0. if hasattr(s, 'err_logg'): if s.err_logg > 0: s.logg += s.err_logg s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) ap = np.mean(getattr(s, species_id)[abx]) s.logg -= 2*s.err_logg s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) am = np.mean(getattr(s, species_id)[abx]) a_logg = abs(ap-am)/2. s.logg += s.err_logg else: a_logg = 0. else: a_logg = 0. if hasattr(s, 'err_feh'): if s.err_feh > 0: s.feh += s.err_feh s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) ap = np.mean(getattr(s, species_id)[abx]) s.feh -= 2*s.err_feh s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) am = np.mean(getattr(s, species_id)[abx]) a_feh = abs(ap-am)/2. s.feh += s.err_feh else: a_feh = 0. else: a_feh = 0. if hasattr(s, 'err_vt'): if s.err_vt > 0: s.vt += s.err_vt s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) ap = np.mean(getattr(s, species_id)[abx]) s.vt -= 2*s.err_vt s.get_model_atmosphere(s.model_atmosphere_grid) get_one(s, [species_id], Ref=Ref) am = np.mean(getattr(s, species_id)[abx]) a_vt = abs(ap-am)/2. s.vt += s.err_vt else: a_vt = 0. else: a_vt = 0. a_tot = np.sqrt(a_teff**2+a_logg**2+a_feh**2+a_vt**2+l2l_sct**2) if not silent: print('Line to line scatter: {0:.3f}'.format(l2l_sct)) print('Error from Teff: {0:.3f}'.format(a_teff)) print('Error from logg: {0:.3f}'.format(a_logg)) print('Error from [Fe/H]: {0:.3f}'.format(a_feh)) print('Error from vt: {0:.3f}'.format(a_vt)) print(' -------') print('Total abundance error: {0:.3f}'.format(a_tot)) print('-----------------------------') try: Ref.model_atmosphere_grid getattr(Star_in, species_id)['err_difab'] = a_tot except: getattr(Star_in, species_id)['err_ab'] = a_tot sp_map = { 'LiI' : 3.0, 'BeI' : 4.0, 'BeII': 4.1, 'BI' : 5.0, 'CI' : 6.0, 'CH' : 106.0, 'NI' : 7.0, 'OI' : 8.0, 'FI' : 9.0, 'NaI' : 11.0, 'MgI' : 12.0, 'MgII': 12.1, 'AlI' : 13.0, 'SiI' : 14.0, 'PI' : 15.0, 'SI' : 16.0, 'KI' : 19.0, 'CaI' : 20.0, 'ScI' : 21.0, 'ScII': 21.1, 'TiI' : 22.0, 'TiII': 22.1, 'VI' : 23.0, 'CrI' : 24.0, 'CrII': 24.1, 'MnI' : 25.0, 'FeI' : 26.0, 'FeII': 26.1, 'CoI' : 27.0, 'NiI' : 28.0, 'CuI' : 29.0, 'ZnI' : 30.0, 'RbI' : 37.0, 'SrI' : 38.0, 'SrII': 38.1, 'YII' : 39.1, 'ZrII': 40.1, 'BaII': 56.1, 'LaII': 57.1, 'CeII': 58.1, 'NdII': 60.1, 'SmII': 62.1, 'EuII': 63.1, 'DyII': 66.1 } def getsp(species_id): try: species = sp_map[species_id] except: logger.warning('species id not recognized: '+species_id) return None return species def getsp_ids(species_list): species_ids = [] for species_code in species_list: try: species_id = [key for key in sp_map if sp_map[key] == species_code][0] species_ids.append(species_id) except: logger.warning('species_code '+str(species_code)+' not found') return species_ids def nlte_triplet(teff, logg, feh, ao, silent=True): if feh >= 0.4: feh = 0.4 grid = read_csv(os.path.join(OTHER_PATH ,'nlte_triplet.csv')) t,g,f,dao0,dao1,dao2=[],[],[],[],[],[] for i in range(640): rg = range(i*7, i*7+7) x0 = interpolate.griddata(grid['ao'][rg], grid['dao0'][rg],\ ao[0], method='cubic') x1 = interpolate.griddata(grid['ao'][rg], grid['dao1'][rg],\ ao[1], method='cubic') x2 = interpolate.griddata(grid['ao'][rg], grid['dao2'][rg],\ ao[2], method='cubic') x0, x1, x2 = float(x0), float(x1), float(x2) t.append(grid['teff'][rg[0]]) g.append(grid['logg'][rg[0]]) f.append(grid['feh'][rg[0]]) dao0.append(x0) dao1.append(x1) dao2.append(x2) t = np.array(t) g = np.array(g) f = np.array(f) dao0 = np.array(dao0) dao1 = np.array(dao1) dao2 = np.array(dao2) tt,ff,dao00,dao11,dao22=[],[],[],[],[] for i in range(160): rg =range(i*4, i*4+4) x0 = interpolate.griddata(g[rg], dao0[rg], logg, method='cubic') x1 = interpolate.griddata(g[rg], dao1[rg], logg, method='cubic') x2 = interpolate.griddata(g[rg], dao2[rg], logg, method='cubic') x0, x1, x2 = float(x0), float(x1), float(x2) tt.append(t[rg[0]]) ff.append(f[rg[0]]) dao00.append(x0) dao11.append(x1) dao22.append(x2) tt = np.array(tt) ff = np.array(ff) dao00 = np.array(dao00) dao11 = np.array(dao11) dao22 = np.array(dao22) t,dao0,dao1,dao2=[],[],[],[] for i in range(16): rg =range(i*10, i*10+10) x0 = interpolate.griddata(ff[rg], dao00[rg], feh, method='cubic') x1 = interpolate.griddata(ff[rg], dao11[rg], feh, method='cubic') x2 = interpolate.griddata(ff[rg], dao22[rg], feh, method='cubic') x0, x1, x2 = float(x0), float(x1), float(x2) t.append(tt[rg[0]]) dao0.append(x0) dao1.append(x1) dao2.append(x2) t = np.array(t) dao0 = np.array(dao0) dao1 = np.array(dao1) dao2 = np.array(dao2) x0 = interpolate.griddata(t, dao0, teff, method='cubic') x1 = interpolate.griddata(t, dao1, teff, method='cubic') x2 = interpolate.griddata(t, dao2, teff, method='cubic') x0, x1, x2 = float(x0), float(x1), float(x2) x0 = x0 - 0.0355 x1 = x1 - 0.0180 x2 = x2 - 0.0000 if not silent: print('Wavelength (A) | A(O) LTE | Correction | A(O) NLTE') print(" 7771.9 | {0:6.3f} | {1:5.3f} | {2:6.3f}".\ format(ao[0], x0, ao[0]-x0)) print(" 7774.2 | {0:6.3f} | {1:5.3f} | {2:6.3f}".\ format(ao[1], x1, ao[1]-x1)) print(" 7775.4 | {0:6.3f} | {1:5.3f} | {2:6.3f}".\ format(ao[2], x2, ao[2]-x2)) ax = [round(ao[0]-x0, 3), round(ao[1]-x1, 3), round(ao[2]-x2, 3)] aon = np.ma.masked_array(ax,np.isnan(ax)) if not silent: print("A(O) LTE = {0:6.3f} +/- {1:5.3f}".\ format(np.mean(ao), np.std(ao))) print("A(O) NLTE = {0:6.3f} +/- {1:5.3f}".\ format(np.mean(aon), np.std(aon))) return aon def fancy_abund_plot(Star, species_id): """Makes bokeh hover-ing plots Function written to look for outliers and investigate line-to-line scatter """ if not hasattr(Star, species_id): logger.error('Star object ('+Star.name+') has no '\ +species_id+'attribute.') return None ww = getattr(Star, species_id)['ww'] ew = getattr(Star, species_id)['ew'] ab = getattr(Star, species_id)['ab'] difab = getattr(Star, species_id)['difab'] TOOLS="pan,wheel_zoom,box_zoom,reset,hover" output_notebook() p1 = figure(title=Star.name, plot_width=650, plot_height=300, x_axis_label='Wavelength (A)', y_axis_label='A('+species_id+')', tools=TOOLS, active_scroll = 'wheel_zoom') p1.xaxis.axis_label_text_font_style = "normal" p1.xaxis.axis_label_text_font_size = "12pt" p1.xaxis.major_label_text_font_size = "12pt" p1.yaxis.axis_label_text_font_style = "normal" p1.yaxis.axis_label_text_font_size = "12pt" p1.yaxis.major_label_text_font_size = "12pt" ws = [str(round(w, 1)) for w in ww] source = ColumnDataSource( data=dict( ww = ww, ws = ws, ew = ew, ab = ab, difab = difab, ) ) p1.scatter('ww', 'ab', size=9, source=source, marker='square', color='blue') hover = p1.select(dict(type=HoverTool)) hover.tooltips = OrderedDict([ ("Wavelength", "@ws A"), ("EW", "@ew mA"), ("Abundance", "@ab"), ]) show(p1) if getattr(Star, species_id)['ref']: difab = np.array(difab, dtype=np.float) #convert None to np.nan difabs = [str(round(dfab, 3)) for dfab in difab] source = ColumnDataSource( data=dict( ww = ww, ws = ws, ew = ew, ab = ab, difab = difab, difabs = difabs, ) ) p2 = figure(title=Star.name+' - '+getattr(Star, species_id)['ref'], plot_width=650, plot_height=300, x_axis_label='Wavelength (A)', y_axis_label='['+species_id+'/H]', tools=TOOLS, active_scroll = 'wheel_zoom' ) p2.xaxis.axis_label_text_font_style = "normal" p2.xaxis.axis_label_text_font_size = "12pt" p2.xaxis.major_label_text_font_size = "12pt" p2.yaxis.axis_label_text_font_style = "normal" p2.yaxis.axis_label_text_font_size = "12pt" p2.yaxis.major_label_text_font_size = "12pt" p2.scatter('ww', 'difab', size=9, source=source, marker='square', color='blue') hover = p2.select(dict(type=HoverTool)) hover.tooltips = OrderedDict([ ("Wavelength", "@ws A"), ("EW", "@ew mA"), ("Abundance", "@difabs"), ]) show(p2)
from __future__ import division import unittest from fontTools.pens.pointPen import AbstractPointPen from fontMath.mathFunctions import addPt, mulPt from fontMath.mathGlyph import ( MathGlyph, MathGlyphPen, FilterRedundantPointPen, _processMathOneContours, _processMathTwoContours, _anchorTree, _pairAnchors, _processMathOneAnchors, _processMathTwoAnchors, _pairComponents, _processMathOneComponents, _processMathTwoComponents, _expandImage, _compressImage, _pairImages, _processMathOneImage, _processMathTwoImage, _processMathOneTransformation, _processMathTwoTransformation, _roundContours, _roundTransformation, _roundImage, _roundComponents, _roundAnchors ) try: basestring, xrange range = xrange except NameError: basestring = str class MathGlyphTest(unittest.TestCase): def __init__(self, methodName): unittest.TestCase.__init__(self, methodName) def _setupTestGlyph(self): glyph = MathGlyph(None) glyph.width = 0 glyph.height = 0 return glyph def test__eq__(self): glyph1 = self._setupTestGlyph() glyph2 = self._setupTestGlyph() self.assertEqual(glyph1, glyph2) glyph2.width = 1 self.assertFalse(glyph1 == glyph2) nonglyph = object() self.assertFalse(glyph1 == nonglyph) glyph1 = MathGlyph(None) glyph1.name = 'space' glyph1.width = 100 class MyGlyph(object): pass other = MyGlyph() other.name = 'space' other.width = 100 other.height = None other.contours = [] other.components = [] other.anchors = [] other.guidelines = [] other.image = {'fileName': None, 'transformation': (1, 0, 0, 1, 0, 0), 'color': None} other.lib = {} other.unicodes = None other.note = None self.assertEqual(glyph1, other) def test__ne__(self): glyph1 = self._setupTestGlyph() glyph2 = MathGlyph(None) glyph2.width = 1 glyph2.name = 'a' self.assertNotEqual(glyph1, glyph2) self.assertNotEqual(glyph1, 'foo') def test_width_add(self): glyph1 = self._setupTestGlyph() glyph1.width = 1 glyph2 = self._setupTestGlyph() glyph2.width = 2 glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.width, 3) def test_width_sub(self): glyph1 = self._setupTestGlyph() glyph1.width = 3 glyph2 = self._setupTestGlyph() glyph2.width = 2 glyph3 = glyph1 - glyph2 self.assertEqual(glyph3.width, 1) def test_width_mul(self): glyph1 = self._setupTestGlyph() glyph1.width = 2 glyph2 = glyph1 * 3 self.assertEqual(glyph2.width, 6) glyph1 = self._setupTestGlyph() glyph1.width = 2 glyph2 = glyph1 * (3, 1) self.assertEqual(glyph2.width, 6) def test_width_div(self): glyph1 = self._setupTestGlyph() glyph1.width = 7 glyph2 = glyph1 / 2 self.assertEqual(glyph2.width, 3.5) glyph1 = self._setupTestGlyph() glyph1.width = 7 glyph2 = glyph1 / (2, 1) self.assertEqual(glyph2.width, 3.5) def test_width_round(self): glyph1 = self._setupTestGlyph() glyph1.width = 6.99 glyph2 = glyph1.round() self.assertEqual(glyph2.width, 7) def test_height_add(self): glyph1 = self._setupTestGlyph() glyph1.height = 1 glyph2 = self._setupTestGlyph() glyph2.height = 2 glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.height, 3) def test_height_sub(self): glyph1 = self._setupTestGlyph() glyph1.height = 3 glyph2 = self._setupTestGlyph() glyph2.height = 2 glyph3 = glyph1 - glyph2 self.assertEqual(glyph3.height, 1) def test_height_mul(self): glyph1 = self._setupTestGlyph() glyph1.height = 2 glyph2 = glyph1 * 3 self.assertEqual(glyph2.height, 6) glyph1 = self._setupTestGlyph() glyph1.height = 2 glyph2 = glyph1 * (1, 3) self.assertEqual(glyph2.height, 6) def test_height_div(self): glyph1 = self._setupTestGlyph() glyph1.height = 7 glyph2 = glyph1 / 2 self.assertEqual(glyph2.height, 3.5) glyph1 = self._setupTestGlyph() glyph1.height = 7 glyph2 = glyph1 / (1, 2) self.assertEqual(glyph2.height, 3.5) def test_height_round(self): glyph1 = self._setupTestGlyph() glyph1.height = 6.99 glyph2 = glyph1.round() self.assertEqual(glyph2.height, 7) def test_contours_add(self): glyph1 = self._setupTestGlyph() glyph1.contours = [ dict(identifier="contour 1", points=[("line", (0.55, 3.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55, 3.1), True, "test", "1")]) ] glyph2 = self._setupTestGlyph() glyph2.contours = [ dict(identifier="contour 1", points=[("line", (1.55, 4.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (1.55, 4.1), True, "test", "1")]) ] glyph3 = glyph1 + glyph2 expected = [ dict(identifier="contour 1", points=[("line", (0.55 + 1.55, 3.1 + 4.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55 + 1.55, 3.1 + 4.1), True, "test", "1")]) ] self.assertEqual(glyph3.contours, expected) def test_contours_sub(self): glyph1 = self._setupTestGlyph() glyph1.contours = [ dict(identifier="contour 1", points=[("line", (0.55, 3.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55, 3.1), True, "test", "1")]) ] glyph2 = self._setupTestGlyph() glyph2.contours = [ dict(identifier="contour 1", points=[("line", (1.55, 4.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (1.55, 4.1), True, "test", "1")]) ] glyph3 = glyph1 - glyph2 expected = [ dict(identifier="contour 1", points=[("line", (0.55 - 1.55, 3.1 - 4.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55 - 1.55, 3.1 - 4.1), True, "test", "1")]) ] self.assertEqual(glyph3.contours, expected) def test_contours_mul(self): glyph1 = self._setupTestGlyph() glyph1.contours = [ dict(identifier="contour 1", points=[("line", (0.55, 3.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55, 3.1), True, "test", "1")]) ] glyph2 = glyph1 * 2 expected = [ dict(identifier="contour 1", points=[("line", (0.55 * 2, 3.1 * 2), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55 * 2, 3.1 * 2), True, "test", "1")]) ] self.assertEqual(glyph2.contours, expected) def test_contours_div(self): glyph1 = self._setupTestGlyph() glyph1.contours = [ dict(identifier="contour 1", points=[("line", (1, 3.4), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (2, 3.1), True, "test", "1")]) ] glyph2 = glyph1 / 2 expected = [ dict(identifier="contour 1", points=[("line", (1/2, 3.4/2), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (2/2, 3.1/2), True, "test", "1")]) ] self.assertEqual(glyph2.contours, expected) def test_contours_round(self): glyph1 = self._setupTestGlyph() glyph1.contours = [ dict(identifier="contour 1", points=[("line", (0.55, 3.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55, 3.1), True, "test", "1")]) ] glyph2 = glyph1.round() expected = [ dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (1, 3), True, "test", "1")]) ] self.assertEqual(glyph2.contours, expected) def test_components_round(self): glyph1 = self._setupTestGlyph() glyph1.components = [ dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5.1, 5.99), identifier="1"), ] glyph2 = glyph1.round() expected = [ dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5, 6), identifier="1") ] self.assertEqual(glyph2.components, expected) def test_guidelines_add_same_name_identifier_x_y_angle(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=1, y=2, angle=1), dict(name="foo", identifier="2", x=3, y=4, angle=2) ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="foo", identifier="2", x=3, y=4, angle=2), dict(name="foo", identifier="1", x=1, y=2, angle=1) ] expected = [ dict(name="foo", identifier="1", x=2, y=4, angle=2), dict(name="foo", identifier="2", x=6, y=8, angle=4) ] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected) def test_guidelines_add_same_name_identifier(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=1, y=2, angle=1), dict(name="foo", identifier="2", x=1, y=2, angle=2), ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="foo", identifier="2", x=3, y=4, angle=3), dict(name="foo", identifier="1", x=3, y=4, angle=4) ] expected = [ dict(name="foo", identifier="1", x=4, y=6, angle=5), dict(name="foo", identifier="2", x=4, y=6, angle=5) ] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected) def test_guidelines_add_same_name_x_y_angle(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=1, y=2, angle=1), dict(name="foo", identifier="2", x=3, y=4, angle=2), ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="foo", identifier="3", x=3, y=4, angle=2), dict(name="foo", identifier="4", x=1, y=2, angle=1) ] expected = [ dict(name="foo", identifier="1", x=2, y=4, angle=2), dict(name="foo", identifier="2", x=6, y=8, angle=4) ] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected) def test_guidelines_add_same_identifier_x_y_angle(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=1, y=2, angle=1), dict(name="bar", identifier="2", x=3, y=4, angle=2), ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="xxx", identifier="2", x=3, y=4, angle=2), dict(name="yyy", identifier="1", x=1, y=2, angle=1) ] expected = [ dict(name="foo", identifier="1", x=2, y=4, angle=2), dict(name="bar", identifier="2", x=6, y=8, angle=4) ] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected) def test_guidelines_add_same_name(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=1, y=2, angle=1), dict(name="bar", identifier="2", x=1, y=2, angle=2), ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="bar", identifier="3", x=3, y=4, angle=3), dict(name="foo", identifier="4", x=3, y=4, angle=4) ] expected = [ dict(name="foo", identifier="1", x=4, y=6, angle=5), dict(name="bar", identifier="2", x=4, y=6, angle=5) ] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected) def test_guidelines_add_same_identifier(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=1, y=2, angle=1), dict(name="bar", identifier="2", x=1, y=2, angle=2), ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="xxx", identifier="2", x=3, y=4, angle=3), dict(name="yyy", identifier="1", x=3, y=4, angle=4) ] expected = [ dict(name="foo", identifier="1", x=4, y=6, angle=5), dict(name="bar", identifier="2", x=4, y=6, angle=5) ] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected) def test_guidelines_mul(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(x=1, y=3, angle=5, name="test", identifier="1", color="0,0,0,0") ] glyph2 = glyph1 * 3 expected = [ dict(x=1 * 3, y=3 * 3, angle=5, name="test", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph2.guidelines, expected) def test_guidelines_round(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(x=1.99, y=3.01, angle=5, name="test", identifier="1", color="0,0,0,0") ] glyph2 = glyph1.round() expected = [ dict(x=2, y=3, angle=5, name="test", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph2.guidelines, expected) def test_guidelines_valid_angle(self): glyph1 = self._setupTestGlyph() glyph1.guidelines = [ dict(name="foo", identifier="1", x=0, y=0, angle=1) ] glyph2 = self._setupTestGlyph() glyph2.guidelines = [ dict(name="foo", identifier="1", x=0, y=0, angle=359) ] expected_add = [dict(name="foo", identifier="1", x=0, y=0, angle=0)] glyph3 = glyph1 + glyph2 self.assertEqual(glyph3.guidelines, expected_add) expected_sub = [dict(name="foo", identifier="1", x=0, y=0, angle=2)] glyph4 = glyph1 - glyph2 self.assertEqual(glyph4.guidelines, expected_sub) expected_mul = [dict(name="foo", identifier="1", x=0, y=0, angle=359)] glyph5 = glyph2 * 5 self.assertEqual(glyph5.guidelines, expected_mul) expected_div = [dict(name="foo", identifier="1", x=0, y=0, angle=359)] glyph6 = glyph2 / 5 self.assertEqual(glyph6.guidelines, expected_div) def test_anchors_add(self): glyph1 = self._setupTestGlyph() glyph1.anchors = [ dict(x=1, y=-2, name="foo", identifier="1", color="0,0,0,0") ] glyph2 = self._setupTestGlyph() glyph2.anchors = [ dict(x=3, y=-4, name="foo", identifier="1", color="0,0,0,0") ] glyph3 = glyph1 + glyph2 expected = [ dict(x=4, y=-6, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph3.anchors, expected) def test_anchors_sub(self): glyph1 = self._setupTestGlyph() glyph1.anchors = [ dict(x=1, y=-2, name="foo", identifier="1", color="0,0,0,0") ] glyph2 = self._setupTestGlyph() glyph2.anchors = [ dict(x=3, y=-4, name="foo", identifier="1", color="0,0,0,0") ] glyph3 = glyph1 - glyph2 expected = [ dict(x=-2, y=2, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph3.anchors, expected) def test_anchors_mul(self): glyph1 = self._setupTestGlyph() glyph1.anchors = [ dict(x=1, y=-2, name="foo", identifier="1", color="0,0,0,0") ] glyph2 = glyph1 * 2 expected = [ dict(x=2, y=-4, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph2.anchors, expected) def test_anchors_div(self): glyph1 = self._setupTestGlyph() glyph1.anchors = [ dict(x=1, y=-2, name="foo", identifier="1", color="0,0,0,0") ] glyph2 = glyph1 / 2 expected = [ dict(x=0.5, y=-1, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph2.anchors, expected) def test_anchors_round(self): glyph1 = self._setupTestGlyph() glyph1.anchors = [ dict(x=99.9, y=-100.1, name="foo", identifier="1", color="0,0,0,0") ] glyph2 = glyph1.round() expected = [ dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual(glyph2.anchors, expected) def test_image_round(self): glyph1 = self._setupTestGlyph() glyph1.image = dict(fileName="foo", transformation=(1, 2, 3, 4, 4.99, 6.01), color="0,0,0,0") expected = dict(fileName="foo", transformation=(1, 2, 3, 4, 5, 6), color="0,0,0,0") glyph2 = glyph1.round() self.assertEqual(glyph2.image, expected) class MathGlyphPenTest(unittest.TestCase): def __init__(self, methodName): unittest.TestCase.__init__(self, methodName) def test_pen_with_lines(self): pen = MathGlyphPen() pen.beginPath(identifier="contour 1") pen.addPoint((0, 100), "line", smooth=False, name="name 1", identifier="point 1") pen.addPoint((100, 100), "line", smooth=False, name="name 2", identifier="point 2") pen.addPoint((100, 0), "line", smooth=False, name="name 3", identifier="point 3") pen.addPoint((0, 0), "line", smooth=False, name="name 4", identifier="point 4") pen.endPath() expected = [ ("curve", (0, 100), False, "name 1", "point 1"), (None, (0, 100), False, None, None), (None, (100, 100), False, None, None), ("curve", (100, 100), False, "name 2", "point 2"), (None, (100, 100), False, None, None), (None, (100, 0), False, None, None), ("curve", (100, 0), False, "name 3", "point 3"), (None, (100, 0), False, None, None), (None, (0, 0), False, None, None), ("curve", (0, 0), False, "name 4", "point 4"), (None, (0, 0), False, None, None), (None, (0, 100), False, None, None), ] self.assertEqual(pen.contours[-1]["points"], expected) self.assertEqual(pen.contours[-1]["identifier"], 'contour 1') def test_pen_with_lines_and_curves(self): pen = MathGlyphPen() pen.beginPath(identifier="contour 1") pen.addPoint((0, 50), "curve", smooth=False, name="name 1", identifier="point 1") pen.addPoint((50, 100), "line", smooth=False, name="name 2", identifier="point 2") pen.addPoint((75, 100), None) pen.addPoint((100, 75), None) pen.addPoint((100, 50), "curve", smooth=True, name="name 3", identifier="point 3") pen.addPoint((100, 25), None) pen.addPoint((75, 0), None) pen.addPoint((50, 0), "curve", smooth=False, name="name 4", identifier="point 4") pen.addPoint((25, 0), None) pen.addPoint((0, 25), None) pen.endPath() expected = [ ("curve", (0, 50), False, "name 1", "point 1"), (None, (0, 50), False, None, None), (None, (50, 100), False, None, None), ("curve", (50, 100), False, "name 2", "point 2"), (None, (75, 100), False, None, None), (None, (100, 75), False, None, None), ("curve", (100, 50), True, "name 3", "point 3"), (None, (100, 25), False, None, None), (None, (75, 0), False, None, None), ("curve", (50, 0), False, "name 4", "point 4"), (None, (25, 0), False, None, None), (None, (0, 25), False, None, None), ] self.assertEqual(pen.contours[-1]["points"], expected) self.assertEqual(pen.contours[-1]["identifier"], 'contour 1') class _TestPointPen(AbstractPointPen): def __init__(self): self._text = [] def dump(self): return "\n".join(self._text) def _prep(self, i): if isinstance(i, basestring): i = "\"%s\"" % i return str(i) def beginPath(self, identifier=None, **kwargs): self._text.append("beginPath(identifier=%s)" % self._prep(identifier)) def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): self._text.append( "addPoint(%s, segmentType=%s, smooth=%s, name=%s, " "identifier=%s)" % ( self._prep(pt), self._prep(segmentType), self._prep(smooth), self._prep(name), self._prep(identifier) ) ) def endPath(self): self._text.append("endPath()") def addComponent(self, baseGlyph, transformation, identifier=None, **kwargs): self._text.append( "addComponent(baseGlyph=%s, transformation=%s, identifier=%s)" % ( self._prep(baseGlyph), self._prep(transformation), self._prep(identifier) ) ) class FilterRedundantPointPenTest(unittest.TestCase): def __init__(self, methodName): unittest.TestCase.__init__(self, methodName) def test_flushContour(self): points = [ ("curve", (0, 100), False, "name 1", "point 1"), (None, (0, 100), False, None, None), (None, (100, 100), False, None, None), ("curve", (100, 100), False, "name 2", "point 2"), (None, (100, 100), False, None, None), (None, (100, 0), False, None, None), ("curve", (100, 0), False, "name 3", "point 3"), (None, (100, 0), False, None, None), (None, (0, 0), False, None, None), ("curve", (0, 0), False, "name 4", "point 4"), (None, (0, 0), False, None, None), (None, (0, 100), False, None, None), ] testPen = _TestPointPen() filterPen = FilterRedundantPointPen(testPen) filterPen.beginPath(identifier="contour 1") for segmentType, pt, smooth, name, identifier in points: filterPen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier) filterPen.endPath() self.assertEqual( testPen.dump(), 'beginPath(identifier="contour 1")\n' 'addPoint((0, 100), segmentType="line", smooth=False, ' 'name="name 1", identifier="point 1")\n' 'addPoint((100, 100), segmentType="line", smooth=False, ' 'name="name 2", identifier="point 2")\n' 'addPoint((100, 0), segmentType="line", smooth=False, ' 'name="name 3", identifier="point 3")\n' 'addPoint((0, 0), segmentType="line", smooth=False, ' 'name="name 4", identifier="point 4")\n' 'endPath()' ) class PrivateFuncsTest(unittest.TestCase): def __init__(self, methodName): unittest.TestCase.__init__(self, methodName) def test_processMathOneContours(self): contours1 = [ dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")]) ] contours2 = [ dict(identifier=None, points=[(None, (4, 6), True, None, None)]) ] self.assertEqual( _processMathOneContours(contours1, contours2, addPt), [ dict(identifier="contour 1", points=[("line", (5, 9), False, "test", "1")]) ] ) def test_processMathTwoContours(self): contours = [ dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")]) ] self.assertEqual( _processMathTwoContours(contours, (2, 1.5), mulPt), [ dict(identifier="contour 1", points=[("line", (2, 4.5), False, "test", "1")]) ] ) True def test_anchorTree(self): anchors = [ dict(identifier="1", name="test", x=1, y=2, color=None), dict(name="test", x=1, y=2, color=None), dict(name="test", x=3, y=4, color=None), dict(name="test", x=2, y=3, color=None), dict(name="c", x=1, y=2, color=None), dict(name="a", x=0, y=0, color=None), ] self.assertEqual( list(_anchorTree(anchors).items()), [ ("test", [ ("1", 1, 2, None), (None, 1, 2, None), (None, 3, 4, None), (None, 2, 3, None) ]), ("c", [ (None, 1, 2, None) ]), ("a", [ (None, 0, 0, None) ]) ] ) def test_pairAnchors_matching_identifiers(self): anchors1 = { "test": [ (None, 1, 2, None), ("identifier 1", 3, 4, None) ] } anchors2 = { "test": [ ("identifier 1", 1, 2, None), (None, 3, 4, None) ] } self.assertEqual( _pairAnchors(anchors1, anchors2), [ ( dict(name="test", identifier=None, x=1, y=2, color=None), dict(name="test", identifier=None, x=3, y=4, color=None) ), ( dict(name="test", identifier="identifier 1", x=3, y=4, color=None), dict(name="test", identifier="identifier 1", x=1, y=2, color=None) ) ] ) def test_pairAnchors_mismatched_identifiers(self): anchors1 = { "test": [ ("identifier 1", 3, 4, None) ] } anchors2 = { "test": [ ("identifier 2", 1, 2, None), ] } self.assertEqual( _pairAnchors(anchors1, anchors2), [ ( dict(name="test", identifier="identifier 1", x=3, y=4, color=None), dict(name="test", identifier="identifier 2", x=1, y=2, color=None) ) ] ) def test_processMathOneAnchors(self): anchorPairs = [ ( dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0"), dict(x=200, y=-200, name="bar", identifier="2", color="1,1,1,1") ) ] self.assertEqual( _processMathOneAnchors(anchorPairs, addPt), [ dict(x=300, y=-300, name="foo", identifier="1", color="0,0,0,0") ] ) def test_processMathTwoAnchors(self): anchors = [ dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual( _processMathTwoAnchors(anchors, (2, 1.5), mulPt), [ dict(x=200, y=-150, name="foo", identifier="1", color="0,0,0,0") ] ) def test_pairComponents(self): components1 = [ dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1"), dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1"), dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None) ] components2 = [ dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None), dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1"), dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1") ] self.assertEqual( _pairComponents(components1, components2), [ ( dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1"), dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1") ), ( dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1"), dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1") ), ( dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None), dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None) ), ] ) components1 = [ dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None), dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None) ] components2 = [ dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None), dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None) ] self.assertEqual( _pairComponents(components1, components2), [ ( dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None), dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None) ), ( dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None), dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None) ), ] ) def test_processMathOneComponents(self): components = [ ( dict(baseGlyph="A", transformation=(1, 3, 5, 7, 9, 11), identifier="1"), dict(baseGlyph="A", transformation=(12, 14, 16, 18, 20, 22), identifier=None) ) ] self.assertEqual( _processMathOneComponents(components, addPt), [ dict(baseGlyph="A", transformation=(13, 17, 21, 25, 29, 33), identifier="1") ] ) def test_processMathTwoComponents(self): components = [ dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5, 6), identifier="1") ] scaled_components = [ dict(baseGlyph="A", transformation=(2, 4, 4.5, 6, 10, 9), identifier="1") ] self.assertEqual( _processMathTwoComponents(components, (2, 1.5), mulPt), scaled_components ) self.assertEqual( _processMathTwoComponents( components, (2, 1.5), mulPt, scaleComponentTransform=True ), scaled_components ) self.assertEqual( _processMathTwoComponents( components, (2, 1.5), mulPt, scaleComponentTransform=False ), [ dict( baseGlyph="A", transformation=(1, 2, 3, 4, 10, 9), identifier="1" ) ], ) def test_expandImage(self): self.assertEqual( _expandImage(None), dict(fileName=None, transformation=(1, 0, 0, 1, 0, 0), color=None) ) self.assertEqual( _expandImage(dict(fileName="foo")), dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None) ) def test_compressImage(self): self.assertEqual( _compressImage( dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None)), dict(fileName="foo", color=None, xScale=1, xyScale=0, yxScale=0, yScale=1, xOffset=0, yOffset=0) ) def test_pairImages(self): image1 = dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None) image2 = dict(fileName="foo", transformation=(2, 0, 0, 2, 0, 0), color="0,0,0,0") self.assertEqual( _pairImages(image1, image2), (image1, image2) ) image1 = dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None) image2 = dict(fileName="bar", transformation=(1, 0, 0, 1, 0, 0), color=None) self.assertEqual( _pairImages(image1, image2), () ) def test_processMathOneImage(self): image1 = dict(fileName="foo", transformation=(1, 3, 5, 7, 9, 11), color="0,0,0,0") image2 = dict(fileName="bar", transformation=(12, 14, 16, 18, 20, 22), color=None) self.assertEqual( _processMathOneImage((image1, image2), addPt), dict(fileName="foo", transformation=(13, 17, 21, 25, 29, 33), color="0,0,0,0") ) def test_processMathTwoImage(self): image = dict(fileName="foo", transformation=(1, 2, 3, 4, 5, 6), color="0,0,0,0") self.assertEqual( _processMathTwoImage(image, (2, 1.5), mulPt), dict(fileName="foo", transformation=(2, 4, 4.5, 6, 10, 9), color="0,0,0,0") ) def test_processMathOneTransformation(self): transformation1 = (1, 3, 5, 7, 9, 11) transformation2 = (12, 14, 16, 18, 20, 22) self.assertEqual( _processMathOneTransformation(transformation1, transformation2, addPt), (13, 17, 21, 25, 29, 33) ) def test_processMathTwoTransformation(self): transformation = (1, 2, 3, 4, 5, 6) self.assertEqual( _processMathTwoTransformation(transformation, (2, 1.5), mulPt), (2, 4, 4.5, 6, 10, 9) ) self.assertEqual( _processMathTwoTransformation(transformation, (2, 1.5), mulPt, doScale=True), (2, 4, 4.5, 6, 10, 9) ) self.assertEqual( _processMathTwoTransformation(transformation, (2, 1.5), mulPt, doScale=False), (1, 2, 3, 4, 10, 9) ) def test_roundContours(self): contour = [ dict(identifier="contour 1", points=[("line", (0.55, 3.1), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (0.55, 3.1), True, "test", "1")]) ] self.assertEqual( _roundContours(contour), [ dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")]), dict(identifier="contour 1", points=[("line", (1, 3), True, "test", "1")]) ] ) def test_roundTransformation(self): transformation = (1, 2, 3, 4, 4.99, 6.01) self.assertEqual( _roundTransformation(transformation), (1, 2, 3, 4, 5, 6) ) def test_roundImage(self): image = dict(fileName="foo", transformation=(1, 2, 3, 4, 4.99, 6.01), color="0,0,0,0") self.assertEqual( _roundImage(image), dict(fileName="foo", transformation=(1, 2, 3, 4, 5, 6), color="0,0,0,0") ) def test_roundComponents(self): components = [ dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5.1, 5.99), identifier="1"), ] self.assertEqual( _roundComponents(components), [ dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5, 6), identifier="1") ] ) def test_roundAnchors(self): anchors = [ dict(x=99.9, y=-100.1, name="foo", identifier="1", color="0,0,0,0") ] self.assertEqual( _roundAnchors(anchors), [ dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0") ] ) if __name__ == "__main__": unittest.main()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class PublicIPAddressesOperations(object): """PublicIPAddressesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2021_02_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_cloud_service_public_ip_addresses( self, resource_group_name, # type: str cloud_service_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.PublicIPAddressListResult"] """Gets information about all public IP addresses on a cloud service level. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cloud_service_name: The name of the cloud service. :type cloud_service_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PublicIPAddressListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_cloud_service_public_ip_addresses.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_cloud_service_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/publicipaddresses'} # type: ignore def list_cloud_service_role_instance_public_ip_addresses( self, resource_group_name, # type: str cloud_service_name, # type: str role_instance_name, # type: str network_interface_name, # type: str ip_configuration_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.PublicIPAddressListResult"] """Gets information about all public IP addresses in a role instance IP configuration in a cloud service. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cloud_service_name: The name of the cloud service. :type cloud_service_name: str :param role_instance_name: The name of role instance. :type role_instance_name: str :param network_interface_name: The network interface name. :type network_interface_name: str :param ip_configuration_name: The IP configuration name. :type ip_configuration_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PublicIPAddressListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_cloud_service_role_instance_public_ip_addresses.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'), 'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_cloud_service_role_instance_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore def get_cloud_service_public_ip_address( self, resource_group_name, # type: str cloud_service_name, # type: str role_instance_name, # type: str network_interface_name, # type: str ip_configuration_name, # type: str public_ip_address_name, # type: str expand=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> "_models.PublicIPAddress" """Get the specified public IP address in a cloud service. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param cloud_service_name: The name of the cloud service. :type cloud_service_name: str :param role_instance_name: The role instance name. :type role_instance_name: str :param network_interface_name: The name of the network interface. :type network_interface_name: str :param ip_configuration_name: The name of the IP configuration. :type ip_configuration_name: str :param public_ip_address_name: The name of the public IP Address. :type public_ip_address_name: str :param expand: Expands referenced resources. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PublicIPAddress, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_02_01.models.PublicIPAddress :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self.get_cloud_service_public_ip_address.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'cloudServiceName': self._serialize.url("cloud_service_name", cloud_service_name, 'str'), 'roleInstanceName': self._serialize.url("role_instance_name", role_instance_name, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PublicIPAddress', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_cloud_service_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore def _delete_initial( self, resource_group_name, # type: str public_ip_address_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str public_ip_address_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes the specified public IP address. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_address_name: The name of the public IP address. :type public_ip_address_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, public_ip_address_name=public_ip_address_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore def get( self, resource_group_name, # type: str public_ip_address_name, # type: str expand=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> "_models.PublicIPAddress" """Gets the specified public IP address in a specified resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_address_name: The name of the public IP address. :type public_ip_address_name: str :param expand: Expands referenced resources. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PublicIPAddress, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_02_01.models.PublicIPAddress :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PublicIPAddress', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore def _create_or_update_initial( self, resource_group_name, # type: str public_ip_address_name, # type: str parameters, # type: "_models.PublicIPAddress" **kwargs # type: Any ): # type: (...) -> "_models.PublicIPAddress" cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'PublicIPAddress') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('PublicIPAddress', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('PublicIPAddress', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str public_ip_address_name, # type: str parameters, # type: "_models.PublicIPAddress" **kwargs # type: Any ): # type: (...) -> LROPoller["_models.PublicIPAddress"] """Creates or updates a static or dynamic public IP address. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_address_name: The name of the public IP address. :type public_ip_address_name: str :param parameters: Parameters supplied to the create or update public IP address operation. :type parameters: ~azure.mgmt.network.v2021_02_01.models.PublicIPAddress :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either PublicIPAddress or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.PublicIPAddress] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, public_ip_address_name=public_ip_address_name, parameters=parameters, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('PublicIPAddress', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore def update_tags( self, resource_group_name, # type: str public_ip_address_name, # type: str parameters, # type: "_models.TagsObject" **kwargs # type: Any ): # type: (...) -> "_models.PublicIPAddress" """Updates public IP address tags. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param public_ip_address_name: The name of the public IP address. :type public_ip_address_name: str :param parameters: Parameters supplied to update public IP address tags. :type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :return: PublicIPAddress, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_02_01.models.PublicIPAddress :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PublicIPAddress', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore def list_all( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.PublicIPAddressListResult"] """Gets all the public IP addresses in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PublicIPAddressListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_all.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore def list( self, resource_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.PublicIPAddressListResult"] """Gets all public IP addresses in a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PublicIPAddressListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore def list_virtual_machine_scale_set_public_ip_addresses( self, resource_group_name, # type: str virtual_machine_scale_set_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.PublicIPAddressListResult"] """Gets information about all public IP addresses on a virtual machine scale set level. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_machine_scale_set_name: The name of the virtual machine scale set. :type virtual_machine_scale_set_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PublicIPAddressListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-10-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore def list_virtual_machine_scale_set_vm_public_ip_addresses( self, resource_group_name, # type: str virtual_machine_scale_set_name, # type: str virtualmachine_index, # type: str network_interface_name, # type: str ip_configuration_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.PublicIPAddressListResult"] """Gets information about all public IP addresses in a virtual machine IP configuration in a virtual machine scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_machine_scale_set_name: The name of the virtual machine scale set. :type virtual_machine_scale_set_name: str :param virtualmachine_index: The virtual machine index. :type virtualmachine_index: str :param network_interface_name: The network interface name. :type network_interface_name: str :param ip_configuration_name: The IP configuration name. :type ip_configuration_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.PublicIPAddressListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-10-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore def get_virtual_machine_scale_set_public_ip_address( self, resource_group_name, # type: str virtual_machine_scale_set_name, # type: str virtualmachine_index, # type: str network_interface_name, # type: str ip_configuration_name, # type: str public_ip_address_name, # type: str expand=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> "_models.PublicIPAddress" """Get the specified public IP address in a virtual machine scale set. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param virtual_machine_scale_set_name: The name of the virtual machine scale set. :type virtual_machine_scale_set_name: str :param virtualmachine_index: The virtual machine index. :type virtualmachine_index: str :param network_interface_name: The name of the network interface. :type network_interface_name: str :param ip_configuration_name: The name of the IP configuration. :type ip_configuration_name: str :param public_ip_address_name: The name of the public IP Address. :type public_ip_address_name: str :param expand: Expands referenced resources. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PublicIPAddress, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_02_01.models.PublicIPAddress :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2018-10-01" accept = "application/json" # Construct URL url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'), 'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'), 'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'), 'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'), 'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if expand is not None: query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('PublicIPAddress', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
from unittest import mock from .tools import run_object from contexts.plugin_interface import PluginInterface, EXAMPLES, SETUP, ACTION, ASSERTION, TEARDOWN from contexts import assertion class WhenRunningAParametrisedSpec: def given_a_parametrised_test(self): class ParametrisedSpec: initialised = 0 setups = [] actions = [] assertions = [] teardowns = [] @classmethod def examples(cls): yield 1 yield 2 def __init__(self): self.__class__.initialised += 1 def context(self, example): self.__class__.setups.append(example) def because(self, example): self.__class__.actions.append(example) def it(self, example): self.__class__.assertions.append(example) def cleanup(self, example): self.__class__.teardowns.append(example) self.ParametrisedSpec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.context: SETUP, ParametrisedSpec.because: ACTION, ParametrisedSpec.it: ASSERTION, ParametrisedSpec.cleanup: TEARDOWN }[meth] def because_we_run_the_class(self): run_object(self.ParametrisedSpec, [self.plugin]) def it_should_instantiate_the_class_twice(self): assert self.ParametrisedSpec.initialised == 2 def it_should_run_the_setup_twice(self): assert self.ParametrisedSpec.setups == [1, 2] def it_should_run_the_assertion_twice(self): assert self.ParametrisedSpec.assertions == [1, 2] def it_should_run_the_action_twice(self): assert self.ParametrisedSpec.actions == [1, 2] def it_should_run_the_teardown_twice(self): assert self.ParametrisedSpec.teardowns == [1, 2] class WhenRunningAParametrisedSpecAndExamplesYieldsTuples: def given_a_parametrised_test(self): class ParametrisedSpec: params = [] @classmethod def examples(cls): yield 1, 2 yield 3, 4 def it(self, a, b): self.__class__.params.append(a) self.__class__.params.append(b) self.ParametrisedSpec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.it: ASSERTION }[meth] def because_we_run_the_class(self): run_object(self.ParametrisedSpec, [self.plugin]) def it_should_unpack_the_tuples(self): assert self.ParametrisedSpec.params == [1, 2, 3, 4] class WhenRunningAParametrisedSpecAndExamplesYieldsTuplesButTheMethodsOnlyAcceptOneArgument: def given_a_parametrised_test(self): class ParametrisedSpec: params = [] @classmethod def examples(cls): yield 1, 2 yield 3, 4 def it(self, a): self.__class__.params.append(a) self.ParametrisedSpec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.it: ASSERTION }[meth] def because_we_run_the_class(self): run_object(self.ParametrisedSpec, [self.plugin]) def it_should_not_unpack_the_tuples(self): assert self.ParametrisedSpec.params == [(1, 2), (3, 4)] class WhenRunningAParametrisedSpecWithNonParametrisedMethods: def context(self): class ParametrisedSpec: initialised = 0 setups = 0 actions = 0 assertions = 0 teardowns = 0 @classmethod def examples(cls): yield 1 yield 2 def it(self): self.__class__.assertions += 1 self.ParametrisedSpec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.it: ASSERTION }[meth] def because_we_run_the_class(self): run_object(self.ParametrisedSpec, [self.plugin]) def it_should_run_the_assertion_twice(self): assert self.ParametrisedSpec.assertions == 2 class WhenNotifyingAPluginOfExamples: def context(self): class ParametrisedSpec: @classmethod def examples(cls): yield 1 yield 2 def it(self): pass self.ParametrisedSpec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.it: ASSERTION }[meth] def because_we_run_the_class(self): run_object(self.ParametrisedSpec, [self.plugin]) def it_should_call_test_class_started_once(self): assert self.plugin.mock_calls[3] == mock.call.test_class_started(self.ParametrisedSpec) assert self.plugin.test_class_started.call_count == 1 @assertion def it_should_call_context_started_with_the_first_example(self): assert self.plugin.context_started.mock_calls[0] == mock.call(self.ParametrisedSpec, 1) @assertion def it_should_call_context_ended_with_the_first_example(self): assert self.plugin.context_ended.mock_calls[0] == mock.call(self.ParametrisedSpec, 1) @assertion def it_should_call_context_started_with_the_second_example(self): assert self.plugin.context_started.mock_calls[1] == mock.call(self.ParametrisedSpec, 2) @assertion def it_should_call_context_ended_with_the_second_example(self): assert self.plugin.context_ended.mock_calls[1] == mock.call(self.ParametrisedSpec, 2) def it_should_call_test_class_ended_once(self): assert self.plugin.mock_calls[-3] == mock.call.test_class_ended(self.ParametrisedSpec) assert self.plugin.test_class_ended.call_count == 1 class WhenExamplesRaisesAnException: def context(self): self.exception = Exception() class ParametrisedSpec: total = 0 @classmethod def examples(s): yield 3 raise self.exception def it(s, example): s.__class__.total += example self.spec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.it: ASSERTION }[meth] def because_we_run_the_spec(self): run_object(self.spec, [self.plugin]) def it_should_run_the_first_one(self): assert self.spec.total == 3 def it_should_send_an_exception_to_the_plugin(self): self.plugin.test_class_errored.assert_called_once_with(self.spec, self.exception) class WhenUserFailsToMakeExamplesAClassmethod: def context(self): class Naughty: def examples(self): pass def it(self): pass self.spec = Naughty self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { Naughty.examples: EXAMPLES, Naughty.it: ASSERTION }[meth] def because_we_run_the_spec(self): run_object(self.spec, [self.plugin]) def it_should_call_test_class_errored_with_the_class_and_the_exception(self): self.plugin.test_class_errored.assert_called_once_with(self.spec, mock.ANY) assert isinstance(self.plugin.test_class_errored.call_args[0][1], TypeError) class WhenExamplesReturnsNone: def context(self): class ParametrisedSpec: times_run = 0 @classmethod def examples(cls): pass def it(self): self.__class__.times_run += 1 self.spec = ParametrisedSpec self.plugin = mock.Mock(spec=PluginInterface) self.plugin.identify_method.side_effect = lambda meth: { ParametrisedSpec.examples: EXAMPLES, ParametrisedSpec.it: ASSERTION }[meth] def because_we_run_the_spec(self): run_object(self.spec, [self.plugin]) def it_should_run_the_spec_once(self): assert self.spec.times_run == 1
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stochastic graphs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import distributions as distributions_lib from tensorflow.contrib.bayesflow.python.ops import stochastic_graph from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.platform import test st = stochastic_tensor sg = stochastic_graph distributions = distributions_lib class NormalNotParam(distributions.Normal): @property def is_reparameterized(self): return False class TestSurrogateLosses(test.TestCase): def testPathwiseDerivativeDoesNotAddSurrogateLosses(self): with self.test_session(): mu = [0.0, 0.1, 0.2] sigma = constant_op.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleValue()): prior = st.StochasticTensor(distributions.Normal(mu=mu, sigma=sigma)) likelihood = st.StochasticTensor( distributions.Normal( mu=prior, sigma=sigma)) self.assertTrue(prior.distribution.is_reparameterized) self.assertTrue(likelihood.distribution.is_reparameterized) loss = math_ops.square(array_ops.identity(likelihood) - [0.0, 0.1, 0.2]) sum_loss = math_ops.reduce_sum(loss) surrogate_loss = sg.surrogate_loss([loss]) with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"): _ = sg.surrogate_loss([sum_loss]) surrogate_from_both = sg.surrogate_loss( [loss, sum_loss * array_ops.ones_like(loss)]) # Pathwise derivative terms do not require add'l surrogate loss terms. with self.test_session() as sess: self.assertAllClose(*sess.run([loss, surrogate_loss])) self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both])) def _testSurrogateLoss(self, session, losses, expected_addl_terms, xs): surrogate_loss = sg.surrogate_loss(losses) expected_surrogate_loss = math_ops.add_n(losses + expected_addl_terms) self.assertAllClose(*session.run([surrogate_loss, expected_surrogate_loss])) # Test backprop expected_grads = gradients_impl.gradients(ys=expected_surrogate_loss, xs=xs) surrogate_grads = gradients_impl.gradients(ys=surrogate_loss, xs=xs) self.assertEqual(len(expected_grads), len(surrogate_grads)) grad_values = session.run(expected_grads + surrogate_grads) n_grad = len(expected_grads) self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:]) def testSurrogateLoss(self): with self.test_session() as sess: mu = constant_op.constant([0.0, 0.1, 0.2]) sigma = constant_op.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleValue()): prior = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma)) likelihood = st.StochasticTensor(NormalNotParam(mu=prior, sigma=sigma)) prior_2 = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma)) loss = math_ops.square(array_ops.identity(likelihood) - mu) part_loss = math_ops.square(array_ops.identity(prior) - mu) sum_loss = math_ops.reduce_sum(loss) loss_nodeps = math_ops.square(array_ops.identity(prior_2) - mu) # For ground truth, use the stop-gradient versions of the losses loss_nograd = array_ops.stop_gradient(loss) loss_nodeps_nograd = array_ops.stop_gradient(loss_nodeps) sum_loss_nograd = array_ops.stop_gradient(sum_loss) # These score functions should ignore prior_2 self._testSurrogateLoss( session=sess, losses=[loss], expected_addl_terms=[ likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd, prior.distribution.log_pdf(prior.value()) * loss_nograd ], xs=[mu, sigma]) self._testSurrogateLoss( session=sess, losses=[loss, part_loss], expected_addl_terms=[ likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd, (prior.distribution.log_pdf(prior.value()) * array_ops.stop_gradient(part_loss + loss)) ], xs=[mu, sigma]) self._testSurrogateLoss( session=sess, losses=[sum_loss * array_ops.ones_like(loss)], expected_addl_terms=[( likelihood.distribution.log_pdf(likelihood.value()) * sum_loss_nograd), prior.distribution.log_pdf(prior.value()) * sum_loss_nograd], xs=[mu, sigma]) self._testSurrogateLoss( session=sess, losses=[loss, sum_loss * array_ops.ones_like(loss)], expected_addl_terms=[( likelihood.distribution.log_pdf(likelihood.value()) * array_ops.stop_gradient(loss + sum_loss)), (prior.distribution.log_pdf(prior.value()) * array_ops.stop_gradient(loss + sum_loss))], xs=[mu, sigma]) # These score functions should ignore prior and likelihood self._testSurrogateLoss( session=sess, losses=[loss_nodeps], expected_addl_terms=[(prior_2.distribution.log_pdf(prior_2.value()) * loss_nodeps_nograd)], xs=[mu, sigma]) # These score functions should include all terms selectively self._testSurrogateLoss( session=sess, losses=[loss, loss_nodeps], # We can't guarantee ordering of output losses in this case. expected_addl_terms=[( likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd), prior.distribution.log_pdf(prior.value()) * loss_nograd, (prior_2.distribution.log_pdf(prior_2.value()) * loss_nodeps_nograd)], xs=[mu, sigma]) def testNoSurrogateLoss(self): with self.test_session(): mu = constant_op.constant([0.0, 0.1, 0.2]) sigma = constant_op.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleValue()): dt = st.StochasticTensor( NormalNotParam( mu=mu, sigma=sigma), loss_fn=None) self.assertEqual(None, dt.loss(constant_op.constant([2.0]))) def testExplicitStochasticTensors(self): with self.test_session() as sess: mu = constant_op.constant([0.0, 0.1, 0.2]) sigma = constant_op.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleValue()): dt1 = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma)) dt2 = st.StochasticTensor(NormalNotParam(mu=mu, sigma=sigma)) loss = math_ops.square(array_ops.identity(dt1)) + 10. + dt2 sl_all = sg.surrogate_loss([loss]) sl_dt1 = sg.surrogate_loss([loss], stochastic_tensors=[dt1]) sl_dt2 = sg.surrogate_loss([loss], stochastic_tensors=[dt2]) dt1_term = dt1.distribution.log_pdf(dt1) * loss dt2_term = dt2.distribution.log_pdf(dt2) * loss self.assertAllClose(*sess.run( [sl_all, sum([loss, dt1_term, dt2_term])])) self.assertAllClose(*sess.run([sl_dt1, sum([loss, dt1_term])])) self.assertAllClose(*sess.run([sl_dt2, sum([loss, dt2_term])])) class StochasticDependenciesMapTest(test.TestCase): def testBuildsMapOfUpstreamNodes(self): dt1 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.)) dt2 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.)) out1 = dt1.value() + 1. out2 = dt2.value() + 2. x = out1 + out2 y = out2 * 3. dep_map = sg._stochastic_dependencies_map([x, y]) self.assertEqual(dep_map[dt1], set([x])) self.assertEqual(dep_map[dt2], set([x, y])) def testHandlesStackedStochasticNodes(self): dt1 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.)) out1 = dt1.value() + 1. dt2 = st.StochasticTensor(distributions.Normal(mu=out1, sigma=1.)) x = dt2.value() + 2. dt3 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.)) y = dt3.value() * 3. dep_map = sg._stochastic_dependencies_map([x, y]) self.assertEqual(dep_map[dt1], set([x])) self.assertEqual(dep_map[dt2], set([x])) self.assertEqual(dep_map[dt3], set([y])) def testTraversesControlInputs(self): dt1 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.)) logits = dt1.value() * 3. dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits)) dt3 = st.StochasticTensor(distributions.Normal(mu=0., sigma=1.)) x = dt3.value() y = array_ops.ones((2, 2)) * 4. z = array_ops.ones((2, 2)) * 3. out = control_flow_ops.cond( math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y), lambda: math_ops.square(z)) out += 5. dep_map = sg._stochastic_dependencies_map([out]) self.assertEqual(dep_map[dt1], set([out])) self.assertEqual(dep_map[dt2], set([out])) self.assertEqual(dep_map[dt3], set([out])) if __name__ == "__main__": test.main()
# Copyright 2016 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures import mock import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils import testtools from nova import context from nova import rpc from nova import test # Make a class that resets all of the global variables in nova.rpc class RPCResetFixture(fixtures.Fixture): def _setUp(self): self.trans = copy.copy(rpc.TRANSPORT) self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT) self.noti = copy.copy(rpc.NOTIFIER) self.all_mods = copy.copy(rpc.ALLOWED_EXMODS) self.ext_mods = copy.copy(rpc.EXTRA_EXMODS) self.conf = copy.copy(rpc.CONF) self.addCleanup(self._reset_everything) def _reset_everything(self): rpc.TRANSPORT = self.trans rpc.NOTIFICATION_TRANSPORT = self.noti_trans rpc.NOTIFIER = self.noti rpc.ALLOWED_EXMODS = self.all_mods rpc.EXTRA_EXMODS = self.ext_mods rpc.CONF = self.conf # We can't import nova.test.TestCase because that sets up an RPCFixture # that pretty much nullifies all of this testing class TestRPC(testtools.TestCase): def setUp(self): super(TestRPC, self).setUp() self.useFixture(RPCResetFixture()) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_unversioned(self, mock_notif, mock_noti_trans, mock_ser, mock_exmods): # The expected call to get the legacy notifier will require no new # kwargs, and we expect the new notifier will need the noop driver expected = [{}, {'driver': 'noop'}] self._test_init(mock_notif, mock_noti_trans, mock_ser, mock_exmods, 'unversioned', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_both(self, mock_notif, mock_noti_trans, mock_ser, mock_exmods): expected = [{}, {'topics': ['versioned_notifications']}] self._test_init(mock_notif, mock_noti_trans, mock_ser, mock_exmods, 'both', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_versioned(self, mock_notif, mock_noti_trans, mock_ser, mock_exmods): expected = [{'driver': 'noop'}, {'topics': ['versioned_notifications']}] self._test_init(mock_notif, mock_noti_trans, mock_ser, mock_exmods, 'versioned', expected) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_notification_transport') @mock.patch.object(messaging, 'Notifier') def test_init_versioned_with_custom_topics(self, mock_notif, mock_noti_trans, mock_ser, mock_exmods): expected = [{'driver': 'noop'}, {'topics': ['custom_topic1', 'custom_topic2']}] self._test_init( mock_notif, mock_noti_trans, mock_ser, mock_exmods, 'versioned', expected, versioned_notification_topics=['custom_topic1', 'custom_topic2']) def test_cleanup_transport_null(self): rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_notification_transport_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup_legacy_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.NOTIFIER = mock.Mock() def test_cleanup_notifier_null(self): rpc.TRANSPORT = mock.Mock() rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() self.assertRaises(AssertionError, rpc.cleanup) def test_cleanup(self): rpc.LEGACY_NOTIFIER = mock.Mock() rpc.NOTIFIER = mock.Mock() rpc.NOTIFICATION_TRANSPORT = mock.Mock() rpc.TRANSPORT = mock.Mock() trans_cleanup = mock.Mock() not_trans_cleanup = mock.Mock() rpc.TRANSPORT.cleanup = trans_cleanup rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup rpc.cleanup() trans_cleanup.assert_called_once_with() not_trans_cleanup.assert_called_once_with() self.assertIsNone(rpc.TRANSPORT) self.assertIsNone(rpc.NOTIFICATION_TRANSPORT) self.assertIsNone(rpc.LEGACY_NOTIFIER) self.assertIsNone(rpc.NOTIFIER) @mock.patch.object(messaging, 'set_transport_defaults') def test_set_defaults(self, mock_set): control_exchange = mock.Mock() rpc.set_defaults(control_exchange) mock_set.assert_called_once_with(control_exchange) def test_add_extra_exmods(self): rpc.EXTRA_EXMODS = [] rpc.add_extra_exmods('foo', 'bar') self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS) def test_clear_extra_exmods(self): rpc.EXTRA_EXMODS = ['foo', 'bar'] rpc.clear_extra_exmods() self.assertEqual(0, len(rpc.EXTRA_EXMODS)) def test_get_allowed_exmods(self): rpc.ALLOWED_EXMODS = ['foo'] rpc.EXTRA_EXMODS = ['bar'] exmods = rpc.get_allowed_exmods() self.assertEqual(['foo', 'bar'], exmods) @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url(url_str='bar') self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, 'bar') @mock.patch.object(messaging, 'TransportURL') def test_get_transport_url_null(self, mock_url): conf = mock.Mock() rpc.CONF = conf mock_url.parse.return_value = 'foo' url = rpc.get_transport_url() self.assertEqual('foo', url) mock_url.parse.assert_called_once_with(conf, None) @mock.patch.object(rpc, 'profiler', None) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'RPCClient') def test_get_client(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo') mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser) self.assertEqual('client', client) @mock.patch.object(rpc, 'profiler', None) @mock.patch.object(rpc, 'RequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') access_policy = dispatcher.DefaultRPCAccessPolicy mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser, access_policy=access_policy) self.assertEqual('server', server) @mock.patch.object(rpc, 'profiler', mock.Mock()) @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') @mock.patch.object(messaging, 'RPCClient') def test_get_client_profiler_enabled(self, mock_client, mock_ser): rpc.TRANSPORT = mock.Mock() tgt = mock.Mock() ser = mock.Mock() mock_client.return_value = 'client' mock_ser.return_value = ser client = rpc.get_client(tgt, version_cap='1.0', serializer='foo') mock_ser.assert_called_once_with('foo') mock_client.assert_called_once_with(rpc.TRANSPORT, tgt, version_cap='1.0', serializer=ser) self.assertEqual('client', client) @mock.patch.object(rpc, 'profiler', mock.Mock()) @mock.patch.object(rpc, 'ProfilerRequestContextSerializer') @mock.patch.object(messaging, 'get_rpc_server') def test_get_server_profiler_enabled(self, mock_get, mock_ser): rpc.TRANSPORT = mock.Mock() ser = mock.Mock() tgt = mock.Mock() ends = mock.Mock() mock_ser.return_value = ser mock_get.return_value = 'server' server = rpc.get_server(tgt, ends, serializer='foo') mock_ser.assert_called_once_with('foo') access_policy = dispatcher.DefaultRPCAccessPolicy mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends, executor='eventlet', serializer=ser, access_policy=access_policy) self.assertEqual('server', server) def test_get_notifier(self): rpc.LEGACY_NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.LEGACY_NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', publisher_id='foo') mock_prep.assert_called_once_with(publisher_id='foo') self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier) self.assertEqual('notifier', notifier.notifier) def test_get_notifier_null_publisher(self): rpc.LEGACY_NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.LEGACY_NOTIFIER.prepare = mock_prep notifier = rpc.get_notifier('service', host='bar') mock_prep.assert_called_once_with(publisher_id='service.bar') self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier) self.assertEqual('notifier', notifier.notifier) def test_get_versioned_notifier(self): rpc.NOTIFIER = mock.Mock() mock_prep = mock.Mock() mock_prep.return_value = 'notifier' rpc.NOTIFIER.prepare = mock_prep notifier = rpc.get_versioned_notifier('service.foo') mock_prep.assert_called_once_with(publisher_id='service.foo') self.assertEqual('notifier', notifier) @mock.patch.object(rpc, 'get_allowed_exmods') @mock.patch.object(messaging, 'get_rpc_transport') def test_create_transport(self, mock_transport, mock_exmods): exmods = mock_exmods.return_value transport = rpc.create_transport(mock.sentinel.url) self.assertEqual(mock_transport.return_value, transport) mock_exmods.assert_called_once_with() mock_transport.assert_called_once_with(rpc.CONF, url=mock.sentinel.url, allowed_remote_exmods=exmods) def _test_init(self, mock_notif, mock_noti_trans, mock_ser, mock_exmods, notif_format, expected_driver_topic_kwargs, versioned_notification_topics=['versioned_notifications']): legacy_notifier = mock.Mock() notifier = mock.Mock() notif_transport = mock.Mock() transport = mock.Mock() serializer = mock.Mock() conf = mock.Mock() conf.transport_url = None conf.notifications.notification_format = notif_format conf.notifications.versioned_notifications_topics = ( versioned_notification_topics) mock_exmods.return_value = ['foo'] mock_noti_trans.return_value = notif_transport mock_ser.return_value = serializer mock_notif.side_effect = [legacy_notifier, notifier] @mock.patch.object(rpc, 'CONF', new=conf) @mock.patch.object(rpc, 'create_transport') @mock.patch.object(rpc, 'get_transport_url') def _test(get_url, create_transport): create_transport.return_value = transport rpc.init(conf) create_transport.assert_called_once_with(get_url.return_value) _test() self.assertTrue(mock_exmods.called) self.assertIsNotNone(rpc.TRANSPORT) self.assertIsNotNone(rpc.LEGACY_NOTIFIER) self.assertIsNotNone(rpc.NOTIFIER) self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER) self.assertEqual(notifier, rpc.NOTIFIER) expected_calls = [] for kwargs in expected_driver_topic_kwargs: expected_kwargs = {'serializer': serializer} expected_kwargs.update(kwargs) expected_calls.append(((notif_transport,), expected_kwargs)) self.assertEqual(expected_calls, mock_notif.call_args_list, "The calls to messaging.Notifier() did not create " "the legacy and versioned notifiers properly.") class TestJsonPayloadSerializer(test.NoDBTestCase): def test_serialize_entity(self): with mock.patch.object(jsonutils, 'to_primitive') as mock_prim: rpc.JsonPayloadSerializer.serialize_entity('context', 'entity') mock_prim.assert_called_once_with('entity', convert_instances=True) class TestRequestContextSerializer(test.NoDBTestCase): def setUp(self): super(TestRequestContextSerializer, self).setUp() self.mock_base = mock.Mock() self.ser = rpc.RequestContextSerializer(self.mock_base) self.ser_null = rpc.RequestContextSerializer(None) def test_serialize_entity(self): self.mock_base.serialize_entity.return_value = 'foo' ser_ent = self.ser.serialize_entity('context', 'entity') self.mock_base.serialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', ser_ent) def test_serialize_entity_null_base(self): ser_ent = self.ser_null.serialize_entity('context', 'entity') self.assertEqual('entity', ser_ent) def test_deserialize_entity(self): self.mock_base.deserialize_entity.return_value = 'foo' deser_ent = self.ser.deserialize_entity('context', 'entity') self.mock_base.deserialize_entity.assert_called_once_with('context', 'entity') self.assertEqual('foo', deser_ent) def test_deserialize_entity_null_base(self): deser_ent = self.ser_null.deserialize_entity('context', 'entity') self.assertEqual('entity', deser_ent) def test_serialize_context(self): context = mock.Mock() self.ser.serialize_context(context) context.to_dict.assert_called_once_with() @mock.patch.object(context, 'RequestContext') def test_deserialize_context(self, mock_req): self.ser.deserialize_context('context') mock_req.from_dict.assert_called_once_with('context') class TestProfilerRequestContextSerializer(test.NoDBTestCase): def setUp(self): super(TestProfilerRequestContextSerializer, self).setUp() self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock()) @mock.patch('nova.rpc.profiler') def test_serialize_context(self, mock_profiler): prof = mock_profiler.get.return_value prof.hmac_key = 'swordfish' prof.get_base_id.return_value = 'baseid' prof.get_id.return_value = 'parentid' context = mock.Mock() context.to_dict.return_value = {'project_id': 'test'} self.assertEqual({'project_id': 'test', 'trace_info': { 'hmac_key': 'swordfish', 'base_id': 'baseid', 'parent_id': 'parentid'}}, self.ser.serialize_context(context)) @mock.patch('nova.rpc.profiler') def test_deserialize_context(self, mock_profiler): serialized = {'project_id': 'test', 'trace_info': { 'hmac_key': 'swordfish', 'base_id': 'baseid', 'parent_id': 'parentid'}} context = self.ser.deserialize_context(serialized) self.assertEqual('test', context.project_id) mock_profiler.init.assert_called_once_with( hmac_key='swordfish', base_id='baseid', parent_id='parentid') class TestClientRouter(test.NoDBTestCase): @mock.patch('oslo_messaging.RPCClient') def test_by_instance(self, mock_rpcclient): default_client = mock.Mock() cell_client = mock.Mock() mock_rpcclient.return_value = cell_client ctxt = mock.Mock() ctxt.mq_connection = mock.sentinel.transport router = rpc.ClientRouter(default_client) client = router.client(ctxt) # verify a client was created by ClientRouter mock_rpcclient.assert_called_once_with( mock.sentinel.transport, default_client.target, version_cap=default_client.version_cap, serializer=default_client.serializer) # verify cell client was returned self.assertEqual(cell_client, client) @mock.patch('oslo_messaging.RPCClient') def test_by_instance_untargeted(self, mock_rpcclient): default_client = mock.Mock() cell_client = mock.Mock() mock_rpcclient.return_value = cell_client ctxt = mock.Mock() ctxt.mq_connection = None router = rpc.ClientRouter(default_client) client = router.client(ctxt) self.assertEqual(router.default_client, client) self.assertFalse(mock_rpcclient.called) class TestIsNotificationsEnabledDecorator(test.NoDBTestCase): def setUp(self): super(TestIsNotificationsEnabledDecorator, self).setUp() self.f = mock.Mock() self.f.__name__ = 'f' self.decorated = rpc.if_notifications_enabled(self.f) def test_call_func_if_needed(self): self.decorated() self.f.assert_called_once_with() @mock.patch('nova.rpc.NOTIFIER.is_enabled', return_value=False) def test_not_call_func_if_notifier_disabled(self, mock_is_enabled): self.decorated() self.assertEqual(0, len(self.f.mock_calls)) def test_not_call_func_if_only_unversioned_notifications_requested(self): self.flags(notification_format='unversioned', group='notifications') self.decorated() self.assertEqual(0, len(self.f.mock_calls))
# Copyright (c) Moshe Zadka # See LICENSE for details. """Test ncolony.schedulelib""" from __future__ import division import os import unittest import sys import six from zope.interface import verify from twisted.python import failure from twisted.internet import defer from twisted.internet import interfaces as tiinterfaces from twisted.internet import reactor from twisted.application import internet as tainternet from twisted.runner.test import test_procmon from ncolony import schedulelib from ncolony.client.tests import test_heart class TestProcessProtocol(unittest.TestCase): """Test schedulelib.ProcessProtocol""" def setUp(self): out = six.StringIO() oldstdout = sys.stdout def _cleanup(): sys.stdout = oldstdout self.addCleanup(_cleanup) sys.stdout = out self.deferred = defer.Deferred() self.pp = schedulelib.ProcessProtocol(self.deferred) def test_process_stdout_one_line(self): """Test one stdout line""" self.pp.childDataReceived(1, "hello") self.assertEquals(sys.stdout.getvalue(), '[1] hello\n') def test_process_stdout_two_line(self): """Test two stdout lines""" self.pp.childDataReceived(1, "hello\nworld") self.assertEquals(sys.stdout.getvalue(), '[1] hello\n[1] world\n') def test_process_stderr_one_line(self): """Test one stderr line""" self.pp.childDataReceived(2, "hello") self.assertEquals(sys.stdout.getvalue(), '[2] hello\n') def test_end(self): """Test process end""" myfail = failure.Failure(ValueError("nonono")) self.pp.processEnded(myfail) result = [] self.deferred.addErrback(result.append) self.assertIs(result[0], myfail) def test_implements(self): """Test object implements the right interface""" self.assertTrue(verify.verifyObject(tiinterfaces.IProcessProtocol, self.pp)) def test_doesnt_break(self): """Test required methods do not fail""" self.pp.processExited(None) self.pp.childConnectionLost(None) self.pp.makeConnection(None) class TestRunProcess(unittest.TestCase): """Test schedulelib.runProcess""" def setUp(self): self.reactor = test_procmon.DummyProcessReactor() out = six.StringIO() oldstdout = sys.stdout def _cleanup(): sys.stdout = oldstdout self.addCleanup(_cleanup) sys.stdout = out def test_run_process_simple(self): """Test process successful termination causes a log message""" args = ['/bin/echo', 'hello'] timeout = 10 grace = 2.5 results = [] deferred = schedulelib.runProcess(args, timeout, grace, self.reactor) deferred.addCallback(results.append) process, = self.reactor.spawnedProcesses self.assertIsInstance(process.proto, schedulelib.ProcessProtocol) # pylint: disable=protected-access self.assertIs(process._reactor, self.reactor) self.assertIs(process._executable, args[0]) self.assertIs(process._args, args) self.assertIs(process._environment, os.environ) # pylint: enable=protected-access terminate, kill = self.reactor.getDelayedCalls() self.assertTrue(terminate.active()) self.assertEquals(terminate.getTime(), timeout) self.assertTrue(kill.active()) self.assertEquals(kill.getTime(), timeout+grace) process.processEnded(0) self.assertFalse(terminate.active()) self.assertFalse(kill.active()) dummy, = results output = sys.stdout.getvalue() message = ('A process has ended without apparent errors: ' 'process finished with exit code 0.\n') self.assertEquals(output, message) def test_run_process_failing(self): """Test process failure causes a log message""" args = ['/bin/echo', 'hello'] timeout = 10 grace = 2.5 results = [] deferred = schedulelib.runProcess(args, timeout, grace, self.reactor) deferred.addCallback(results.append) process, = self.reactor.spawnedProcesses terminate, kill = self.reactor.getDelayedCalls() process.processEnded(1) self.assertFalse(terminate.active()) self.assertFalse(kill.active()) dummy, = results output = sys.stdout.getvalue() message = ('A process has ended with a probable error condition: ' 'process ended with exit code 1.\n') self.assertEquals(output, message) def test_run_process_stuck(self): """Test process gets TERM if it does not end by itself""" args = ['/bin/echo', 'hello'] timeout = 10 grace = 2.5 results = [] deferred = schedulelib.runProcess(args, timeout, grace, self.reactor) deferred.addCallback(results.append) dummy, = self.reactor.spawnedProcesses terminate, kill = self.reactor.getDelayedCalls() self.reactor.advance(10) self.assertFalse(terminate.active()) self.assertTrue(kill.active()) self.reactor.advance(2) self.assertFalse(kill.active()) dummy, = results output = sys.stdout.getvalue() message = ('A process has ended without apparent errors: ' 'process finished with exit code 0.\n') self.assertEquals(output, message) def test_run_process_stuck_hard(self): """Test process gets KILL if TERM doesn't kill it""" args = ['/bin/echo', 'hello'] timeout = 10 grace = 0.5 results = [] deferred = schedulelib.runProcess(args, timeout, grace, self.reactor) deferred.addCallback(results.append) dummy, = self.reactor.spawnedProcesses terminate, kill = self.reactor.getDelayedCalls() self.reactor.advance(10) self.assertFalse(terminate.active()) self.assertTrue(kill.active()) self.reactor.advance(0.7) self.assertFalse(kill.active()) dummy, = results output = sys.stdout.getvalue() message = ('A process has ended with a probable error condition: ' 'process ended with exit code 1.\n') self.assertEquals(output, message) def test_run_process_pass_through_unexpected_fail(self): """Test that non-process-related failures fall through""" args = ['/bin/echo', 'hello'] timeout = 10 grace = 2.5 results = [] deferred = schedulelib.runProcess(args, timeout, grace, self.reactor) deferred.addErrback(results.append) deferred.errback(failure.Failure(ValueError("HAHA"))) dummy, = results class TestService(unittest.TestCase): """Test the service""" def setUp(self): self.parser = schedulelib.Options() self.args = dict(arg='/bin/echo hello', timeout='10', grace='2', frequency='30') def getArgs(self): """Get the arguments as a list of strings""" return ' '.join(' '.join('--%s %s' % (key, vpart) for vpart in value.split()) for key, value in six.iteritems(self.args)).split() def test_normal(self): """Test correct parsing of a command line""" args = self.getArgs() self.parser.parseOptions(args) self.assertEquals(self.parser['args'], ['/bin/echo', 'hello']) self.assertEquals(self.parser['timeout'], 10) self.assertEquals(self.parser['grace'], 2) self.assertEquals(self.parser['frequency'], 30) def helper_test_required(self, value): """Helper method: test that a given parameter is required""" del self.args[value] with self.assertRaises(ValueError): self.parser.parseOptions(self.getArgs()) def test_required_args(self): """Test that at least one argument is required""" self.helper_test_required('arg') def test_required_timeout(self): """Test that timeout is required""" self.helper_test_required('timeout') def test_required_grace(self): """Test that grace is required""" self.helper_test_required('grace') def test_required_frequency(self): """Test that frequency is required""" self.helper_test_required('frequency') def test_make_service(self): """Test the make service function""" opts = {} opts['args'] = ['/bin/echo', 'hello'] opts['timeout'] = 10 opts['grace'] = 2 opts['frequency'] = 30 masterService = schedulelib.makeService(opts) service = masterService.getServiceNamed('scheduler') self.assertIsInstance(service, tainternet.TimerService) func, args, kwargs = service.call self.assertFalse(kwargs) self.assertIs(func, schedulelib.runProcess) self.assertEquals(args, (opts['args'], opts['timeout'], opts['grace'], reactor)) self.assertEquals(service.step, opts['frequency']) def test_make_service_with_health(self): """Test schedulelib with heart beater""" opts = dict(timeout=10, grace=2, frequency=30) opts['args'] = ['/bin/echo', 'hello'] myEnv = test_heart.buildEnv() test_heart.replaceEnvironment(self, myEnv) masterService = schedulelib.makeService(opts) service = masterService.getServiceNamed('heart') test_heart.checkHeartService(self, service)
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from cinderclient import api_versions from osc_lib.command import command from osc_lib import exceptions from osc_lib import utils from openstackclient.i18n import _ LOG = logging.getLogger(__name__) def _format_group_snapshot(snapshot): columns = ( 'id', 'status', 'name', 'description', 'group_id', 'group_type_id', ) column_headers = ( 'ID', 'Status', 'Name', 'Description', 'Group', 'Group Type', ) return ( column_headers, utils.get_item_properties( snapshot, columns, ), ) class CreateVolumeGroupSnapshot(command.ShowOne): """Create a volume group snapshot. This command requires ``--os-volume-api-version`` 3.13 or greater. """ def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( 'volume_group', metavar='<volume_group>', help=_('Name or ID of volume group to create a snapshot of.'), ) parser.add_argument( '--name', metavar='<name>', help=_('Name of the volume group snapshot.'), ) parser.add_argument( '--description', metavar='<description>', help=_('Description of a volume group snapshot.') ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume if volume_client.api_version < api_versions.APIVersion('3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot create' command" ) raise exceptions.CommandError(msg) volume_group = utils.find_resource( volume_client.groups, parsed_args.volume_group, ) snapshot = volume_client.group_snapshots.create( volume_group.id, parsed_args.name, parsed_args.description) return _format_group_snapshot(snapshot) class DeleteVolumeGroupSnapshot(command.Command): """Delete a volume group snapshot. This command requires ``--os-volume-api-version`` 3.14 or greater. """ def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( 'snapshot', metavar='<snapshot>', help=_('Name or ID of volume group snapshot to delete'), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume if volume_client.api_version < api_versions.APIVersion('3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot delete' command" ) raise exceptions.CommandError(msg) snapshot = utils.find_resource( volume_client.group_snapshots, parsed_args.snapshot, ) volume_client.group_snapshots.delete(snapshot.id) class ListVolumeGroupSnapshot(command.Lister): """Lists all volume group snapshot. This command requires ``--os-volume-api-version`` 3.14 or greater. """ def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( '--all-projects', dest='all_projects', action='store_true', default=utils.env('ALL_PROJECTS', default=False), help=_('Shows details for all projects (admin only).'), ) # TODO(stephenfin): Add once we have an equivalent command for # 'cinder list-filters' # parser.add_argument( # '--filter', # metavar='<key=value>', # action=parseractions.KeyValueAction, # dest='filters', # help=_( # "Filter key and value pairs. Use 'foo' to " # "check enabled filters from server. Use 'key~=value' for " # "inexact filtering if the key supports " # "(supported by --os-volume-api-version 3.33 or above)" # ), # ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume if volume_client.api_version < api_versions.APIVersion('3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot list' command" ) raise exceptions.CommandError(msg) search_opts = { 'all_tenants': parsed_args.all_projects, } groups = volume_client.group_snapshots.list( search_opts=search_opts) column_headers = ( 'ID', 'Status', 'Name', ) columns = ( 'id', 'status', 'name', ) return ( column_headers, ( utils.get_item_properties(a, columns) for a in groups ), ) class ShowVolumeGroupSnapshot(command.ShowOne): """Show detailed information for a volume group snapshot. This command requires ``--os-volume-api-version`` 3.14 or greater. """ def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( 'snapshot', metavar='<snapshot>', help=_('Name or ID of volume group snapshot.'), ) return parser def take_action(self, parsed_args): volume_client = self.app.client_manager.volume if volume_client.api_version < api_versions.APIVersion('3.14'): msg = _( "--os-volume-api-version 3.14 or greater is required to " "support the 'volume group snapshot show' command" ) raise exceptions.CommandError(msg) snapshot = utils.find_resource( volume_client.group_snapshots, parsed_args.snapshot, ) # TODO(stephenfin): Do we need this? snapshot = volume_client.groups.show(snapshot.id) return _format_group_snapshot(snapshot)
from __future__ import division import pygame from pygame.locals import Color from pygame import key import Scene import math import cairo import rsvg import array class ControlsScreen(Scene.Scene): def __init__(self, size): super(ControlsScreen, self).__init__() screen = pygame.display.set_mode(size, pygame.HWSURFACE|pygame.DOUBLEBUF) #screen.fill(white) self.bg = pygame.image.load('./refinery.png').convert() #repeating bg for x in range(0, screen.get_width(), self.bg.get_width()): for y in range(0, screen.get_height(), self.bg.get_height()): screen.blit(self.bg, (x,y)) self.bgcache = screen.copy() pygame.display.flip() self.months = [ ["January", 31], ["February", 28], ["March", 31], ["April", 30], ["May", 31], ["June", 30], ["July", 31], ["August", 31], ["September", 30], ["October", 31], ["November", 30], ["December", 31] ] self.month = 5 self.oldmonth = 5 self.loadText(screen, 48, self.months[self.month][0], (50, 25), 100, 100) newsize = screen.get_height() / 4 self.scalesize = newsize * 2 / 3 self.scalesize = int(math.floor(self.scalesize)) self.btn = self.loadsvg('./gray.svg', screen, self.scalesize) self.imgSize = newsize #width of button; should be same as height self.baseX = screen.get_width() / 2 - 9 * self.imgSize / 2 posX = self.baseX self.posY = screen.get_height() / 2 - 96 / 2 self.dayofmonth = 29 self.direction = 1 for i in xrange(-3, 7): screen.blit(self.btn, (posX, self.posY)) self.loadText(screen, 24, self.getRelativeDay(i, True), (posX, self.posY), self.scalesize, self.scalesize) posX += self.imgSize self.frame = self.loadsvg('./onyx.svg', screen, self.scalesize + self.scalesize / 10) self.framepos = (screen.get_width() / 2 - self.imgSize / 2 - self.imgSize / 25, self.posY - 3) screen.blit(self.frame, self.framepos) pygame.display.update() self.curmove = self.imgSize self.moved = 0 self.moving = False self.lastMove = False self.nextMonth = False self.monthPos = 25 self.monthHalf = False self.speed = self.imgSize / 30 self.curmonth = "June" def getRelativeDay(self, mv, boolStart): if boolStart: start = self.dayofmonth - 1 else: start = self.dayofmonth - 2 if self.direction > 0 else self.dayofmonth newVal = start + mv if newVal < 1: if self.month == 0: newVal = self.months[11][1] + newVal else: newVal = self.months[self.month - 1][1] + newVal elif newVal > self.months[self.month][1]: newVal -= self.months[self.month][1] return str(newVal) def render(self, screen): if self.moving: screen.blit(self.bgcache, (0, 0)) #start animation #screen.blit(self.getTextAsImage(self.months[self.month][0]), (50, self.monthPos)) month = self.oldmonth if not self.monthHalf else self.month self.loadText(screen, 48, self.months[month][0], (50, self.monthPos), 200, 100) #TODO month posX = self.baseX - self.curmove for i in xrange(-3, 7): screen.blit(self.btn, (posX, self.posY)) self.loadText(screen, 24, self.getRelativeDay(i, False), (posX, self.posY), self.scalesize, self.scalesize) posX += self.imgSize screen.blit(self.frame, self.framepos) if self.lastMove: self.moving = False self.lastMove = False if not math.fabs(self.curmove) >= self.imgSize * math.fabs(self.direction): if self.direction > 0: self.direction -= 1 elif self.direction < 0: self.direction += 1 self.move(self.direction) if self.nextMonth: self.monthHalf = False self.nextMonth = False self.oldmonth = self.month pygame.display.update() def loadText(self, screen, size, text, pos, targetWidth, targetHeight): font_path = "./Ubuntu-M.ttf" fontObj = pygame.font.Font(font_path, size) txt = fontObj.render(text, 1, (255,255,255)) fSize = fontObj.size(text) fPos = (pos[0] + ((targetWidth - fSize[0]) / 2), pos[1] + ((targetHeight - fSize[1]) / 2)) screen.blit(txt, fPos) def update(self): if self.moving: self.curmove += self.speed * self.direction if math.fabs(self.curmove) >= self.imgSize: self.lastMove = True def moveRight(self): self.move(25) def moveLeft(self): self.move(-25) def move(self, direction): if not self.moving: self.direction = direction self.curmove = 0 self.moving = True if direction > 0: if self.dayofmonth < self.months[self.month][1]: self.dayofmonth += 1 else: self.dayofmonth = 1 self.nextMonth = True self.oldmonth = self.month if self.month == 11: self.month = 0 else: self.month += 1 else: if self.dayofmonth == 1: self.nextMonth = True self.oldmonth = self.month if self.month == 0: self.month = 11 else: self.month -= 1 if self.oldmonth == 0: self.dayofmonth = self.months[11][1] else: self.dayofmonth = self.months[self.month][1] else: self.dayofmonth -= 1 def handle_event(self, event): pass def processEvent(self, arg): pass def processKey(self, arg): return self.processEvent(arg) def loadsvg(self, filename, surface, targetWidth): WIDTH = surface.get_width() HEIGHT = surface.get_height() data = array.array('c', chr(0) * WIDTH * HEIGHT * 4) cairosurface = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32, WIDTH, HEIGHT, WIDTH * 4) svg = rsvg.Handle(filename) dimens = svg.get_dimension_data() scale = targetWidth / dimens[0] targetHeight = scale * dimens[1] ctx = cairo.Context(cairosurface) if scale != 1: ctx.scale(scale, scale) svg.render_cairo(ctx) image = pygame.image.frombuffer(data.tostring(), (WIDTH, HEIGHT),"RGBA") print "displaying image at", WIDTH, HEIGHT return image
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SparseAdd.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import timeit import numpy as np import tensorflow as tf def _sparsify(x, thresh=0.5, index_dtype=np.int64): x[x < thresh] = 0 non_zero = np.where(x) x_indices = np.vstack(non_zero).astype(index_dtype).T x_values = x[non_zero] x_shape = x.shape return tf.SparseTensor( indices=x_indices, values=x_values, shape=x_shape), len(x_values) class SparseAddTest(tf.test.TestCase): def _randomTensor(self, size, np_dtype, sparse=True): n, m = size x = np.random.randn(n, m).astype(np_dtype) return _sparsify(x) if sparse else x def _SparseTensor_3x3(self, negate=False): # [ 1] # [2 ] # [3 4] # ...or its cwise negation, if `negate` ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]]) val = np.array([1, 2, 3, 4]) if negate: val = -np.array([1, 2, 3, 4]) shape = np.array([3, 3]) return tf.SparseTensor( tf.constant(ind, tf.int64), tf.constant(val, tf.float32), tf.constant(shape, tf.int64)) def _SparseTensor_3x3_v2(self): # [ 1] # [-1.9 ] # [ 3 -4.2] ind = np.array([[0, 1], [1, 0], [2, 0], [2, 1]]) val = np.array([1, -1.9, 3, -4.2]) shape = np.array([3, 3]) return tf.SparseTensor( tf.constant(ind, tf.int64), tf.constant(val, tf.float32), tf.constant(shape, tf.int64)) def testAddSelf(self): with self.test_session(use_gpu=False) as sess: sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3() sp_sum = tf.sparse_add(sp_a, sp_b) sum_out = sess.run(sp_sum) self.assertEqual(sp_sum.shape.get_shape(), [2]) self.assertAllEqual( sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]]) self.assertAllEqual(sum_out.values, [2, 4, 6, 8]) self.assertAllEqual(sum_out.shape, [3, 3]) def testAddSelfAndNegation(self): with self.test_session(use_gpu=False) as sess: sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3(negate=True) sp_sum = tf.sparse_add(sp_a, sp_b, 0.1) sum_out = sess.run(sp_sum) self.assertEqual(sp_sum.shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, np.empty([0, 2])) self.assertAllEqual(sum_out.values, []) self.assertAllEqual(sum_out.shape, [3, 3]) def testSmallValuesShouldVanish(self): with self.test_session(use_gpu=False) as sess: sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3_v2() # sum: # [ 2] # [.1 ] # [ 6 -.2] # two values should vanish: |.1| < .21, and |-.2| < .21 sp_sum = tf.sparse_add(sp_a, sp_b, thresh=0.21) sum_out = sess.run(sp_sum) self.assertEqual(sp_sum.shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]]) self.assertAllEqual(sum_out.values, [2, 6]) self.assertAllEqual(sum_out.shape, [3, 3]) # only .1 vanishes sp_sum = tf.sparse_add(sp_a, sp_b, thresh=0.11) sum_out = sess.run(sp_sum) self.assertEqual(sp_sum.shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]]) self.assertAllClose(sum_out.values, [2, 6, -.2]) self.assertAllEqual(sum_out.shape, [3, 3]) def testGradients(self): np.random.seed(1618) # Make it reproducible. with self.test_session(use_gpu=False): for n in [10, 31]: for m in [4, 17]: sp_a, nnz_a = self._randomTensor([n, m], np.float32) sp_b, nnz_b = self._randomTensor([n, m], np.float32) sp_sum = tf.sparse_add(sp_a, sp_b) nnz_sum = len(sp_sum.values.eval()) err = tf.test.compute_gradient_error([sp_a.values, sp_b.values], [(nnz_a,), (nnz_b,)], sp_sum.values, (nnz_sum,)) self.assertLess(err, 1e-3) def testAddSparseDense(self): np.random.seed(1618) # Make it reproducible. n, m = np.random.randint(30, size=2) for dtype in [np.float32, np.float64, np.int64, np.complex64]: for index_dtype in [np.int32, np.int64]: rand_vals_np = np.random.randn(n, m).astype(dtype) dense_np = np.random.randn(n, m).astype(dtype) with self.test_session(use_gpu=False): sparse, unused_nnz = _sparsify(rand_vals_np, index_dtype=index_dtype) s = tf.sparse_add(sparse, tf.constant(dense_np)).eval() self.assertAllEqual(dense_np + rand_vals_np, s) self.assertTrue(s.dtype == dtype) # check commutativity s = tf.sparse_add(tf.constant(dense_np), sparse).eval() self.assertAllEqual(dense_np + rand_vals_np, s) self.assertTrue(s.dtype == dtype) def testSparseTensorDenseAddGradients(self): np.random.seed(1618) # Make it reproducible. n, m = np.random.randint(30, size=2) rand_vals_np = np.random.randn(n, m).astype(np.float32) dense_np = np.random.randn(n, m).astype(np.float32) with self.test_session(use_gpu=False): sparse, nnz = _sparsify(rand_vals_np) dense = tf.constant(dense_np, dtype=tf.float32) s = tf.sparse_add(sparse, dense) err = tf.test.compute_gradient_error( [sparse.values, dense], [(nnz,), (n, m)], s, (n, m)) self.assertLess(err, 1e-3) ######################## Benchmarking code def _s2d_add_vs_sparse_add(sparsity, n, m, num_iters=50): np.random.seed(1618) with tf.Session(graph=tf.Graph()) as sess: sp_vals = np.random.rand(n, m).astype(np.float32) sp_t, unused_nnz = _sparsify(sp_vals, thresh=sparsity, index_dtype=np.int32) vals = np.random.rand(n, m).astype(np.float32) s2d = tf.add(tf.sparse_tensor_to_dense(sp_t), tf.constant(vals)) sa = tf.sparse_add(sp_t, tf.constant(vals)) timeit.timeit(lambda: sess.run(s2d), number=3) timeit.timeit(lambda: sess.run(sa), number=3) s2d_total = timeit.timeit(lambda: sess.run(s2d), number=num_iters) sa_total = timeit.timeit(lambda: sess.run(sa), number=num_iters) # per-iter latency; secs to millis return s2d_total * 1e3 / num_iters, sa_total * 1e3 / num_iters class SparseAddBenchmark(tf.test.Benchmark): def benchmarkSparseAddDense(self): print("SparseAddDense: add with sparse_to_dense vs. sparse_add") print("%nnz \t n \t m \t millis(s2d) \t millis(sparse_add) \t speedup") for sparsity in [0.99, 0.5, 0.01]: for n in [1, 256, 50000]: for m in [100, 1000]: s2d_dt, sa_dt = _s2d_add_vs_sparse_add(sparsity, n, m) print("%.2f \t %d \t %d \t %.4f \t %.4f \t %.2f" % (sparsity, n, m, s2d_dt, sa_dt, s2d_dt / sa_dt)) if __name__ == "__main__": tf.test.main()
import entity from utils import Point, rand def move_north(agent=None, entities=None, environment=None): """ Behavior that causes an agent to move north one unit. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ if environment.body.contains_point(Point(agent.body.top_left.position[0], agent.body.top_left.position[1] + 1)): for item in agent.inventory: item.body.top_left.position[1] += 1 item.body.bottom_right.position[1] += 1 agent.body.top_left.position[1] += 1 agent.body.bottom_right.position[1] += 1 agent.time += 1 return agent def move_east(agent=None, entities=None, environment=None): """ Behavior that causes an agent to move east one unit. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ if environment.body.contains_point(Point(agent.body.bottom_right.position[0] + 1, agent.body.bottom_right.position[1])): for item in agent.inventory: item.body.top_left.position[0] += 1 item.body.bottom_right.position[0] += 1 agent.body.top_left.position[0] += 1 agent.body.bottom_right.position[0] += 1 agent.time += 1 return agent def move_south(agent=None, entities=None, environment=None): """ Behavior that causes an agent to move south one unit. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ if environment.body.contains_point(Point(agent.body.bottom_right.position[0], agent.body.bottom_right.position[1] - 1)): for item in agent.inventory: item.body.top_left.position[1] -= 1 item.body.bottom_right.position[1] -= 1 agent.body.top_left.position[1] -= 1 agent.body.bottom_right.position[1] -= 1 agent.time += 1 return agent def move_west(agent=None, entities=None, environment=None): """ Behavior that causes an agent to move west one unit. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ if environment.body.contains_point(Point(agent.body.top_left.position[0] - 1, agent.body.top_left.position[1])): for item in agent.inventory: item.body.top_left.position[0] -= 1 item.body.bottom_right.position[0] -= 1 agent.body.top_left.position[0] -= 1 agent.body.bottom_right.position[0] -= 1 agent.time += 1 return agent def pickup_food(agent=None, entities=None, environment=None): """ Behavior that causes an agent to pickup food. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ foods = filter(lambda x: isinstance(x, entity.Food), entities) if not agent.holding_food: for food in foods: if agent.body.contains_rectangle(food.body) and food.interactable: agent.inventory.append(food) food.interactable = False agent.holding_food = True break agent.time += 1 return agent def drop_food(agent=None, entities=None, environment=None): """ Behavior that causes an agent to drop food. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ nest = filter(lambda x: isinstance(x, entity.Nest), entities)[0] if agent.holding_food: for item in agent.inventory: agent.inventory.remove(item) if nest.body.contains_rectangle(agent.body): nest.food_count += 1 else: item.interactable = True agent.holding_food = False agent.time += 1 return agent def random_walk(agent=None, entities=None, environment=None): """ Behavior that causes an agent to walk in a random direction for one time step. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ random_direction = rand.randint(0, 3) if random_direction == 0: return move_north(agent, entities, environment) elif random_direction == 1: return move_east(agent, entities, environment) elif random_direction == 2: return move_south(agent, entities, environment) elif random_direction == 3: return move_west(agent, entities, environment) return agent def return_home(agent=None, entities=None, environment=None): """ Behavior (naive) that causes an agent to return to the nest. :param agent: The agent to perform the behavior. :param entities: A list of entities in the simulation. :param environment: The environment containing the agent. :return: The updated agent. """ nest = filter(lambda x: isinstance(x, entity.Nest), entities)[0] agent.time += int(agent.body.top_left.distance_to(nest.body.top_left)) agent.body.top_left.position[1] = (nest.body.top_left.position[1] + nest.body.bottom_right.position[1]) / 2 agent.body.bottom_right.position[0] = (nest.body.top_left.position[0] + nest.body.bottom_right.position[0]) / 2 agent.body.bottom_right.position[1] = (nest.body.top_left.position[1] + nest.body.bottom_right.position[1]) / 2 agent.body.top_left.position[0] = (nest.body.top_left.position[0] + nest.body.bottom_right.position[0]) / 2 return agent
#!/usr/bin/env python import sys import unittest import seqan.dox.sig_parser as sig_parser class TestParseEnum(unittest.TestCase): def testValid(self): txt = 'enum EnumName' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'enum') self.assertEqual(entry.name, 'EnumName') txt = 'enum EnumName;' self.assertEqual(entry.toString(), txt) class TestParseStruct(unittest.TestCase): def testValid(self): txt = 'struct StructName' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'struct') self.assertEqual(entry.name, 'StructName') txt = 'struct StructName;' self.assertEqual(entry.toString(), txt) class TestParseClass(unittest.TestCase): def testValid(self): txt = 'class ClassName' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'class') self.assertEqual(entry.name, 'ClassName') txt = 'class ClassName;' self.assertEqual(entry.toString(), txt) class TestParseConcept(unittest.TestCase): def testValid(self): txt = 'concept ConceptConcept' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'concept') self.assertEqual(entry.name, 'ConceptConcept') txt = 'concept ConceptConcept;' self.assertEqual(entry.toString(), txt) class TestParseVariable(unittest.TestCase): def testValid(self): txt = 'Type variable;' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'variable') self.assertEqual(entry.var_type, 'Type') self.assertEqual(entry.name, 'variable') txt = 'Type variable;' self.assertEqual(entry.toString(), txt) class TestParseFunction(unittest.TestCase): def testEmptyParams(self): txt = 'void foo()' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.return_type, 'void') self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 0) def testWithParams(self): txt = 'void foo(int x, double y)' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.return_type, 'void') self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 2) self.assertEqual(entry.params[0].type, 'int') self.assertEqual(entry.params[0].name, 'x') self.assertEqual(entry.params[1].type, 'double') self.assertEqual(entry.params[1].name, 'y') txt = 'void foo(int x, double y);' self.assertEqual(entry.toString(), txt) def testMemberFunction(self): txt = 'void Foo::bar(int x, double y)' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.name, 'Foo::bar') self.assertEqual(entry.return_type, 'void') self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 2) self.assertEqual(entry.params[0].type, 'int') self.assertEqual(entry.params[0].name, 'x') self.assertEqual(entry.params[1].type, 'double') self.assertEqual(entry.params[1].name, 'y') txt = 'void Foo::bar(int x, double y);' self.assertEqual(entry.toString(), txt) def testConstructorNoParams(self): txt = 'String::String()' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.name, 'String::String') self.assertEqual(entry.return_type, None) self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 0) txt = 'String::String();' self.assertEqual(entry.toString(), txt) def testConstructorParams(self): txt = 'String::String(int x, double y)' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.name, 'String::String') self.assertEqual(entry.return_type, None) self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 2) self.assertEqual(entry.params[0].type, 'int') self.assertEqual(entry.params[0].name, 'x') self.assertEqual(entry.params[1].type, 'double') self.assertEqual(entry.params[1].name, 'y') txt = 'String::String(int x, double y);' self.assertEqual(entry.toString(), txt) def testDestructorNoParams(self): txt = 'String::~String()' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.name, 'String::~String') self.assertEqual(entry.return_type, None) self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 0) txt = 'String::~String();' self.assertEqual(entry.toString(), txt) def testInterfaceFunction(self): txt = 'void Foo#bar(int x, double y)' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.name, 'Foo#bar') self.assertEqual(entry.return_type, 'void') self.assertEqual(entry.is_tpl, False) self.assertEqual(len(entry.params), 2) self.assertEqual(entry.params[0].type, 'int') self.assertEqual(entry.params[0].name, 'x') self.assertEqual(entry.params[1].type, 'double') self.assertEqual(entry.params[1].name, 'y') txt = 'void Foo#bar(int x, double y);' self.assertEqual(entry.toString(), txt) def testConstructor(self): pass class TestTemplateFunction(unittest.TestCase): def testEmptyParams(self): txt = ('template <typename T1, int I>\n' 'void foo()') parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.return_type, 'void') self.assertEqual(entry.is_tpl, True) self.assertEqual(len(entry.params), 0) self.assertEqual(len(entry.tparams), 2) self.assertEqual(entry.tparams[0].type, 'typename') self.assertEqual(entry.tparams[0].name, 'T1') self.assertEqual(entry.tparams[1].type, 'int') self.assertEqual(entry.tparams[1].name, 'I') txt = ('template <typename T1, int I>\n' 'void foo();') self.assertEqual(entry.toString(), txt) def testWithParams(self): txt = ('template <typename T1, int I>\n' 'void foo(int x, double y)') parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'function') self.assertEqual(entry.return_type, 'void') self.assertEqual(entry.is_tpl, True) self.assertEqual(len(entry.params), 2) self.assertEqual(entry.params[0].type, 'int') self.assertEqual(entry.params[0].name, 'x') self.assertEqual(entry.params[1].type, 'double') self.assertEqual(entry.params[1].name, 'y') self.assertEqual(len(entry.tparams), 2) self.assertEqual(entry.tparams[0].type, 'typename') self.assertEqual(entry.tparams[0].name, 'T1') self.assertEqual(entry.tparams[1].type, 'int') self.assertEqual(entry.tparams[1].name, 'I') txt = ('template <typename T1, int I>\n' 'void foo(int x, double y);') self.assertEqual(entry.toString(), txt) class TestTemplateClass(unittest.TestCase): def testEmptyParams(self): txt = ('template <>\n' 'class C') parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'class') self.assertEqual(entry.name, 'C') self.assertEqual(entry.is_tpl, True) self.assertEqual(len(entry.tparams), 0) txt = ('template <>\n' 'class C;') self.assertEqual(entry.toString(), txt) def testWithParams(self): txt = ('template <typename T1, int I>\n' 'class C') parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'class') self.assertEqual(entry.is_tpl, True) self.assertEqual(len(entry.tparams), 2) self.assertEqual(entry.name, 'C') self.assertEqual(entry.tparams[0].type, 'typename') self.assertEqual(entry.tparams[0].name, 'T1') self.assertEqual(entry.tparams[1].type, 'int') self.assertEqual(entry.tparams[1].name, 'I') txt = ('template <typename T1, int I>\n' 'class C;') self.assertEqual(entry.toString(), txt) class TestMetafunction(unittest.TestCase): def testValueNoParam(self): txt = 'TInt Metafunction<>::VALUE' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'metafunction') self.assertEqual(entry.name, 'Metafunction') self.assertEqual(entry.return_type, 'TInt') self.assertEqual(entry.return_name, 'VALUE') self.assertEqual(len(entry.tparams), 0) txt = 'TInt Metafunction<>::VALUE;' self.assertEqual(entry.toString(), txt) def testValueInterfaceNoParam(self): txt = 'TInt Klass#Metafunction<>::VALUE' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'metafunction') self.assertEqual(entry.name, 'Klass#Metafunction') self.assertEqual(entry.return_type, 'TInt') self.assertEqual(entry.return_name, 'VALUE') self.assertEqual(len(entry.tparams), 0) txt = 'TInt Klass#Metafunction<>::VALUE;' self.assertEqual(entry.toString(), txt) def testValueWithParam(self): txt = 'TInt Metafunction<T1, T2>::VALUE' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'metafunction') self.assertEqual(entry.name, 'Metafunction') self.assertEqual(entry.return_type, 'TInt') self.assertEqual(entry.return_name, 'VALUE') self.assertEqual(len(entry.tparams), 2) self.assertEqual(entry.tparams[0].name, 'T1') self.assertEqual(entry.tparams[1].name, 'T2') txt = 'TInt Metafunction<T1, T2>::VALUE;' self.assertEqual(entry.toString(), txt) def testTypeNoParam(self): txt = 'Metafunction<>::Type' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'metafunction') self.assertEqual(entry.name, 'Metafunction') self.assertEqual(entry.return_name, 'Type') self.assertEqual(len(entry.tparams), 0) txt = 'Metafunction<>::Type;' self.assertEqual(entry.toString(), txt) def testTypeInterfaceNoParam(self): txt = 'Klass#Metafunction<>::Type' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'metafunction') self.assertEqual(entry.name, 'Klass#Metafunction') self.assertEqual(entry.return_name, 'Type') self.assertEqual(len(entry.tparams), 0) txt = 'Klass#Metafunction<>::Type;' self.assertEqual(entry.toString(), txt) def testTypeWithParam(self): txt = 'Metafunction<T1, T2>::Type' parser = sig_parser.SigParser(txt) entry = parser.parse() self.assertEqual(entry.kind, 'metafunction') self.assertEqual(entry.name, 'Metafunction') self.assertEqual(entry.return_name, 'Type') self.assertEqual(len(entry.tparams), 2) self.assertEqual(entry.tparams[0].name, 'T1') self.assertEqual(entry.tparams[1].name, 'T2') txt = 'Metafunction<T1, T2>::Type;' self.assertEqual(entry.toString(), txt) if __name__ == '__main__': unittest.main()
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from sqlalchemy import orm from nova.compute import utils as compute_utils from nova.db.api import api as api_db_api from nova.db.api import models as api_models from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base from nova.objects import fields LOG = logging.getLogger(__name__) DEPRECATED_FIELDS = ['deleted', 'deleted_at'] @api_db_api.context_manager.reader def _aggregate_get_from_db(context, aggregate_id): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload('_hosts')).\ options(orm.joinedload('_metadata')) query = query.filter(api_models.Aggregate.id == aggregate_id) aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate @api_db_api.context_manager.reader def _aggregate_get_from_db_by_uuid(context, aggregate_uuid): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload('_hosts')).\ options(orm.joinedload('_metadata')) query = query.filter(api_models.Aggregate.uuid == aggregate_uuid) aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_uuid) return aggregate def _host_add_to_db(context, aggregate_id, host): try: with api_db_api.context_manager.writer.using(context): # Check to see if the aggregate exists _aggregate_get_from_db(context, aggregate_id) host_ref = api_models.AggregateHost() host_ref.update({"host": host, "aggregate_id": aggregate_id}) host_ref.save(context.session) return host_ref except db_exc.DBDuplicateEntry: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) def _host_delete_from_db(context, aggregate_id, host): count = 0 with api_db_api.context_manager.writer.using(context): # Check to see if the aggregate exists _aggregate_get_from_db(context, aggregate_id) query = context.session.query(api_models.AggregateHost) query = query.filter(api_models.AggregateHost.aggregate_id == aggregate_id) count = query.filter_by(host=host).delete() if count == 0: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) def _metadata_add_to_db(context, aggregate_id, metadata, max_retries=10, set_delete=False): all_keys = metadata.keys() for attempt in range(max_retries): try: with api_db_api.context_manager.writer.using(context): query = context.session.query(api_models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id) if set_delete: query.filter(~api_models.AggregateMetadata.key. in_(all_keys)).\ delete(synchronize_session=False) already_existing_keys = set() if all_keys: query = query.filter( api_models.AggregateMetadata.key.in_(all_keys)) for meta_ref in query.all(): key = meta_ref.key meta_ref.update({"value": metadata[key]}) already_existing_keys.add(key) new_entries = [] for key, value in metadata.items(): if key in already_existing_keys: continue new_entries.append({"key": key, "value": value, "aggregate_id": aggregate_id}) if new_entries: context.session.execute( api_models.AggregateMetadata.__table__.insert(None), new_entries) return metadata except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt with excutils.save_and_reraise_exception() as ctxt: if attempt < max_retries - 1: ctxt.reraise = False else: msg = _("Add metadata failed for aggregate %(id)s " "after %(retries)s retries") % \ {"id": aggregate_id, "retries": max_retries} LOG.warning(msg) @api_db_api.context_manager.writer def _metadata_delete_from_db(context, aggregate_id, key): # Check to see if the aggregate exists _aggregate_get_from_db(context, aggregate_id) query = context.session.query(api_models.AggregateMetadata) query = query.filter(api_models.AggregateMetadata.aggregate_id == aggregate_id) count = query.filter_by(key=key).delete() if count == 0: raise exception.AggregateMetadataNotFound( aggregate_id=aggregate_id, metadata_key=key) @api_db_api.context_manager.writer def _aggregate_create_in_db(context, values, metadata=None): query = context.session.query(api_models.Aggregate) query = query.filter(api_models.Aggregate.name == values['name']) aggregate = query.first() if not aggregate: aggregate = api_models.Aggregate() aggregate.update(values) aggregate.save(context.session) # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this aggregate. aggregate._hosts = [] aggregate._metadata = [] else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: _metadata_add_to_db(context, aggregate.id, metadata) context.session.expire(aggregate, ['_metadata']) aggregate._metadata return aggregate @api_db_api.context_manager.writer def _aggregate_delete_from_db(context, aggregate_id): # Delete Metadata first context.session.query(api_models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id).\ delete() count = context.session.query(api_models.Aggregate).\ filter(api_models.Aggregate.id == aggregate_id).\ delete() if count == 0: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @api_db_api.context_manager.writer def _aggregate_update_to_db(context, aggregate_id, values): aggregate = _aggregate_get_from_db(context, aggregate_id) set_delete = True if "availability_zone" in values: az = values.pop('availability_zone') if 'metadata' not in values: values['metadata'] = {'availability_zone': az} set_delete = False else: values['metadata']['availability_zone'] = az metadata = values.get('metadata') if metadata is not None: _metadata_add_to_db(context, aggregate_id, values.pop('metadata'), set_delete=set_delete) aggregate.update(values) try: aggregate.save(context.session) except db_exc.DBDuplicateEntry: if 'name' in values: raise exception.AggregateNameExists( aggregate_name=values['name']) else: raise return _aggregate_get_from_db(context, aggregate_id) @base.NovaObjectRegistry.register class Aggregate(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added uuid field # Version 1.3: Added get_by_uuid method VERSION = '1.3' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(nullable=False), 'name': fields.StringField(), 'hosts': fields.ListOfStringsField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), } obj_extra_fields = ['availability_zone'] @staticmethod def _from_db_object(context, aggregate, db_aggregate): for key in aggregate.fields: if key == 'metadata': db_key = 'metadetails' elif key in DEPRECATED_FIELDS and key not in db_aggregate: continue else: db_key = key setattr(aggregate, key, db_aggregate[db_key]) # NOTE: This can be removed when we bump Aggregate to v2.0 aggregate.deleted_at = None aggregate.deleted = False aggregate._context = context aggregate.obj_reset_changes() return aggregate def _assert_no_hosts(self, action): if 'hosts' in self.obj_what_changed(): raise exception.ObjectActionError( action=action, reason='hosts updated inline') @base.remotable_classmethod def get_by_id(cls, context, aggregate_id): db_aggregate = _aggregate_get_from_db(context, aggregate_id) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable_classmethod def get_by_uuid(cls, context, aggregate_uuid): db_aggregate = _aggregate_get_from_db_by_uuid(context, aggregate_uuid) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._assert_no_hosts('create') updates = self.obj_get_changes() payload = dict(updates) if 'metadata' in updates: # NOTE(danms): For some reason the notification format is weird payload['meta_data'] = payload.pop('metadata') if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] LOG.debug('Generated uuid %(uuid)s for aggregate', dict(uuid=updates['uuid'])) compute_utils.notify_about_aggregate_update(self._context, "create.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.CREATE, phase=fields.NotificationPhase.START) metadata = updates.pop('metadata', None) db_aggregate = _aggregate_create_in_db(self._context, updates, metadata=metadata) self._from_db_object(self._context, self, db_aggregate) payload['aggregate_id'] = self.id compute_utils.notify_about_aggregate_update(self._context, "create.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.CREATE, phase=fields.NotificationPhase.END) @base.remotable def save(self): self._assert_no_hosts('save') updates = self.obj_get_changes() payload = {'aggregate_id': self.id} if 'metadata' in updates: payload['meta_data'] = updates['metadata'] compute_utils.notify_about_aggregate_update(self._context, "updateprop.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_PROP, phase=fields.NotificationPhase.START) updates.pop('id', None) db_aggregate = _aggregate_update_to_db(self._context, self.id, updates) compute_utils.notify_about_aggregate_update(self._context, "updateprop.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_PROP, phase=fields.NotificationPhase.END) self._from_db_object(self._context, self, db_aggregate) @base.remotable def update_metadata(self, updates): payload = {'aggregate_id': self.id, 'meta_data': updates} compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.start", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_METADATA, phase=fields.NotificationPhase.START) to_add = {} for key, value in updates.items(): if value is None: try: _metadata_delete_from_db(self._context, self.id, key) except exception.AggregateMetadataNotFound: pass try: self.metadata.pop(key) except KeyError: pass else: to_add[key] = value self.metadata[key] = value _metadata_add_to_db(self._context, self.id, to_add) compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.end", payload) compute_utils.notify_about_aggregate_action( context=self._context, aggregate=self, action=fields.NotificationAction.UPDATE_METADATA, phase=fields.NotificationPhase.END) self.obj_reset_changes(fields=['metadata']) @base.remotable def destroy(self): _aggregate_delete_from_db(self._context, self.id) @base.remotable def add_host(self, host): _host_add_to_db(self._context, self.id, host) if self.hosts is None: self.hosts = [] self.hosts.append(host) self.obj_reset_changes(fields=['hosts']) @base.remotable def delete_host(self, host): _host_delete_from_db(self._context, self.id, host) self.hosts.remove(host) self.obj_reset_changes(fields=['hosts']) @property def availability_zone(self): return self.metadata.get('availability_zone', None) @api_db_api.context_manager.reader def _get_all_from_db(context): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload('_hosts')).\ options(orm.joinedload('_metadata')) return query.all() @api_db_api.context_manager.reader def _get_by_host_from_db(context, host, key=None): query = context.session.query(api_models.Aggregate).\ options(orm.joinedload('_hosts')).\ options(orm.joinedload('_metadata')) query = query.join('_hosts') query = query.filter(api_models.AggregateHost.host == host) if key: query = query.join("_metadata").filter( api_models.AggregateMetadata.key == key) return query.all() @api_db_api.context_manager.reader def _get_by_metadata_from_db(context, key=None, value=None): assert(key is not None or value is not None) query = context.session.query(api_models.Aggregate) query = query.join("_metadata") if key is not None: query = query.filter(api_models.AggregateMetadata.key == key) if value is not None: query = query.filter(api_models.AggregateMetadata.value == value) query = query.options(orm.contains_eager("_metadata")) query = query.options(orm.joinedload("_hosts")) return query.all() @api_db_api.context_manager.reader def _get_non_matching_by_metadata_keys_from_db(context, ignored_keys, key_prefix, value): """Filter aggregates based on non matching metadata. Find aggregates with at least one ${key_prefix}*[=${value}] metadata where the metadata key are not in the ignored_keys list. :return: Aggregates with any metadata entry: - whose key starts with `key_prefix`; and - whose value is `value` and - whose key is *not* in the `ignored_keys` list. """ if not key_prefix: raise ValueError(_('key_prefix mandatory field.')) query = context.session.query(api_models.Aggregate) query = query.join("_metadata") query = query.filter(api_models.AggregateMetadata.value == value) query = query.filter(api_models.AggregateMetadata.key.like( key_prefix + '%')) if len(ignored_keys) > 0: query = query.filter(~api_models.AggregateMetadata.key.in_( ignored_keys)) query = query.options(orm.contains_eager("_metadata")) query = query.options(orm.joinedload("_hosts")) return query.all() @base.NovaObjectRegistry.register class AggregateList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added key argument to get_by_host() # Aggregate <= version 1.1 # Version 1.2: Added get_by_metadata_key # Version 1.3: Added get_by_metadata VERSION = '1.3' fields = { 'objects': fields.ListOfObjectsField('Aggregate'), } @classmethod def _filter_db_aggregates(cls, db_aggregates, hosts): if not isinstance(hosts, set): hosts = set(hosts) filtered_aggregates = [] for db_aggregate in db_aggregates: for host in db_aggregate['hosts']: if host in hosts: filtered_aggregates.append(db_aggregate) break return filtered_aggregates @base.remotable_classmethod def get_all(cls, context): db_aggregates = _get_all_from_db(context) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_host(cls, context, host, key=None): db_aggregates = _get_by_host_from_db(context, host, key=key) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_metadata_key(cls, context, key, hosts=None): db_aggregates = _get_by_metadata_from_db(context, key=key) if hosts is not None: db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_metadata(cls, context, key=None, value=None): """Return aggregates with a metadata key set to value. This returns a list of all aggregates that have a metadata key set to some value. If key is specified, then only values for that key will qualify. """ db_aggregates = _get_by_metadata_from_db(context, key=key, value=value) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @classmethod def get_non_matching_by_metadata_keys(cls, context, ignored_keys, key_prefix, value): """Return aggregates that are not matching with metadata. For example, we have aggregates with metadata as below: 'agg1' with trait:HW_CPU_X86_MMX="required" 'agg2' with trait:HW_CPU_X86_SGX="required" 'agg3' with trait:HW_CPU_X86_MMX="required" 'agg3' with trait:HW_CPU_X86_SGX="required" Assume below request: aggregate_obj.AggregateList.get_non_matching_by_metadata_keys( self.context, ['trait:HW_CPU_X86_MMX'], 'trait:', value='required') It will return 'agg2' and 'agg3' as aggregates that are not matching with metadata. :param context: The security context :param ignored_keys: List of keys to match with the aggregate metadata keys that starts with key_prefix. :param key_prefix: Only compares metadata keys that starts with the key_prefix :param value: Value of metadata :returns: List of aggregates that doesn't match metadata keys that starts with key_prefix with the supplied keys. """ db_aggregates = _get_non_matching_by_metadata_keys_from_db( context, ignored_keys, key_prefix, value) return base.obj_make_list(context, objects.AggregateList(context), objects.Aggregate, db_aggregates)
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Photo.user' db.alter_column('vkontakte_photos_photo', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['vkontakte_users.User'])) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'Photo.user' raise RuntimeError("Cannot reverse this migration. 'Photo.user' and its values cannot be restored.") models = { 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'vkontakte_groups.group': { 'Meta': {'ordering': "['name']", 'object_name': 'Group'}, 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}), 'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}), 'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['vkontakte_users.User']", 'symmetrical': 'False'}) }, 'vkontakte_photos.album': { 'Meta': {'ordering': "['remote_id']", 'object_name': 'Album'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'description': ('django.db.models.fields.TextField', [], {}), 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photo_albums'", 'null': 'True', 'to': "orm['vkontakte_groups.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photo_albums'", 'null': 'True', 'to': "orm['vkontakte_users.User']"}), 'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}), 'size': ('django.db.models.fields.PositiveIntegerField', [], {}), 'thumb_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'thumb_src': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'vkontakte_photos.photo': { 'Meta': {'ordering': "['remote_id']", 'object_name': 'Photo'}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'to': "orm['vkontakte_photos.Album']"}), 'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'null': 'True', 'to': "orm['vkontakte_groups.Group']"}), 'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos'", 'null': 'True', 'to': "orm['vkontakte_users.User']"}), 'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}), 'src': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'src_big': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'src_small': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'src_xbig': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'src_xxbig': ('django.db.models.fields.CharField', [], {'max_length': "'200'"}), 'tags': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'text': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'photos_author'", 'null': 'True', 'to': "orm['vkontakte_users.User']"}), 'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}) }, 'vkontakte_places.city': { 'Meta': {'ordering': "['name']", 'object_name': 'City'}, 'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': "orm['vkontakte_places.Country']"}), 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}) }, 'vkontakte_places.country': { 'Meta': {'ordering': "['name']", 'object_name': 'Country'}, 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}) }, 'vkontakte_users.user': { 'Meta': {'ordering': "['remote_id']", 'object_name': 'User'}, 'about': ('django.db.models.fields.TextField', [], {}), 'activity': ('django.db.models.fields.TextField', [], {}), 'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'books': ('django.db.models.fields.TextField', [], {}), 'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': "orm['vkontakte_users.User']"}), 'games': ('django.db.models.fields.TextField', [], {}), 'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'interests': ('django.db.models.fields.TextField', [], {}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'movies': ('django.db.models.fields.TextField', [], {}), 'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}), 'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}), 'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}), 'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'tv': ('django.db.models.fields.TextField', [], {}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'vkontakte_wall.comment': { 'Meta': {'ordering': "['post', '-date']", 'object_name': 'Comment'}, 'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['contenttypes.ContentType']"}), 'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'date': ('django.db.models.fields.DateTimeField', [], {}), 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'from_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_comments'", 'to': "orm['vkontakte_wall.Post']"}), 'raw_html': ('django.db.models.fields.TextField', [], {}), 'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}), 'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'reply_for_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Comment']", 'null': 'True'}), 'text': ('django.db.models.fields.TextField', [], {}), 'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_comments'", 'to': "orm['contenttypes.ContentType']"}), 'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'vkontakte_wall.post': { 'Meta': {'ordering': "['wall_owner_id', '-date']", 'object_name': 'Post'}, 'attachments': ('django.db.models.fields.TextField', [], {}), 'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_posts'", 'to': "orm['contenttypes.ContentType']"}), 'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'copy_owner_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'copy_post_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'copy_text': ('django.db.models.fields.TextField', [], {}), 'date': ('django.db.models.fields.DateTimeField', [], {}), 'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'geo': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'like_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}), 'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'media': ('django.db.models.fields.TextField', [], {}), 'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'post_source': ('django.db.models.fields.TextField', [], {}), 'raw_html': ('django.db.models.fields.TextField', [], {}), 'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}), 'reply_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'repost_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'repost_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}), 'reposts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'text': ('django.db.models.fields.TextField', [], {}), 'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_posts'", 'to': "orm['contenttypes.ContentType']"}), 'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {}) } } complete_apps = ['vkontakte_photos']
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import functools import logging import os import shutil import subprocess import sys import tempfile import threading import time import weakref from oslo.config import cfg from networking_mlnx.openstack.common import fileutils from networking_mlnx.openstack.common._i18n import _, _LE, _LI LOG = logging.getLogger(__name__) util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, help='Enables or disables inter-process locks.'), cfg.StrOpt('lock_path', default=os.environ.get("NEUTRON_LOCK_PATH"), help='Directory to use for lock files.') ] CONF = cfg.CONF CONF.register_opts(util_opts) def set_defaults(lock_path): cfg.set_defaults(util_opts, lock_path=lock_path) class _FileLock(object): """Lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file descriptor rather than outside of the process, the lock gets dropped automatically if the process crashes, even if __exit__ is not executed. There are no guarantees regarding usage by multiple green threads in a single process here. This lock works only between processes. Exclusive access between local threads should be achieved using the semaphores in the @synchronized decorator. Note these locks are released when the descriptor is closed, so it's not safe to close the file descriptor while another green thread holds the lock. Just opening and closing the lock file can break synchronisation, so lock files must be accessed only using this abstraction. """ def __init__(self, name): self.lockfile = None self.fname = name def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % {'filename': self.fname, 'exception': e}) def __enter__(self): self.acquire() return self def release(self): try: self.unlock() self.lockfile.close() LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname) def __exit__(self, exc_type, exc_val, exc_tb): self.release() def exists(self): return os.path.exists(self.fname) def trylock(self): raise NotImplementedError() def unlock(self): raise NotImplementedError() class _WindowsLock(_FileLock): def trylock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) def unlock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) class _FcntlLock(_FileLock): def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) def unlock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_UN) if os.name == 'nt': import msvcrt InterProcessLock = _WindowsLock else: import fcntl InterProcessLock = _FcntlLock _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() def _get_lock_path(name, lock_file_prefix, lock_path=None): # NOTE(mikal): the lock name cannot contain directory # separators name = name.replace(os.sep, '_') if lock_file_prefix: sep = '' if lock_file_prefix.endswith('-') else '-' name = '%s%s%s' % (lock_file_prefix, sep, name) local_lock_path = lock_path or CONF.lock_path if not local_lock_path: raise cfg.RequiredOptError('lock_path') return os.path.join(local_lock_path, name) def external_lock(name, lock_file_prefix=None, lock_path=None): LOG.debug('Attempting to grab external lock "%(lock)s"', {'lock': name}) lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) return InterProcessLock(lock_file_path) def remove_external_lock_file(name, lock_file_prefix=None): """Remove an external lock file when it's not used anymore This will be helpful when we have a lot of lock files """ with internal_lock(name): lock_file_path = _get_lock_path(name, lock_file_prefix) try: os.remove(lock_file_path) except OSError: LOG.info(_LI('Failed to remove file %(file)s'), {'file': lock_file_path}) def internal_lock(name): with _semaphores_lock: try: sem = _semaphores[name] LOG.debug('Using existing semaphore "%s"', name) except KeyError: sem = threading.Semaphore() _semaphores[name] = sem LOG.debug('Created new semaphore "%s"', name) return sem @contextlib.contextmanager def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. """ int_lock = internal_lock(name) with int_lock: LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name}) try: if external and not CONF.disable_process_locking: ext_lock = external_lock(name, lock_file_prefix, lock_path) with ext_lock: yield ext_lock else: yield int_lock finally: LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name}) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): """Synchronization decorator. Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one thread will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. """ def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug('Got semaphore / lock "%(function)s"', {'function': f.__name__}) return f(*args, **kwargs) finally: LOG.debug('Semaphore / lock released "%(function)s"', {'function': f.__name__}) return inner return wrap def synchronized_with_prefix(lock_file_prefix): """Partial object generator for the synchronization decorator. Redefine @synchronized in each project like so:: (in nova/utils.py) from nova.openstack.common import lockutils synchronized = lockutils.synchronized_with_prefix('nova-') (in nova/foo.py) from nova import utils @utils.synchronized('mylock') def bar(self, *args): ... The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. """ return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) def main(argv): """Create a dir for locks and pass it to command from arguments If you run this: python -m openstack.common.lockutils python setup.py testr <etc> a temporary directory will be created for all your locks and passed to all your tests in an environment variable. The temporary dir will be deleted afterwards and the return value will be preserved. """ lock_dir = tempfile.mkdtemp() os.environ["NEUTRON_LOCK_PATH"] = lock_dir try: ret_val = subprocess.call(argv[1:]) finally: shutil.rmtree(lock_dir, ignore_errors=True) return ret_val if __name__ == '__main__': sys.exit(main(sys.argv))
"""Extend the basic Accessory and Bridge functions.""" from datetime import timedelta from functools import partial, wraps from inspect import getmodule import logging from pyhap.accessory import Accessory, Bridge from pyhap.accessory_driver import AccessoryDriver from pyhap.const import CATEGORY_OTHER from homeassistant.components import cover, vacuum from homeassistant.components.cover import ( DEVICE_CLASS_GARAGE, DEVICE_CLASS_GATE, DEVICE_CLASS_WINDOW, ) from homeassistant.components.media_player import DEVICE_CLASS_TV from homeassistant.const import ( ATTR_BATTERY_CHARGING, ATTR_BATTERY_LEVEL, ATTR_DEVICE_CLASS, ATTR_ENTITY_ID, ATTR_SERVICE, ATTR_SUPPORTED_FEATURES, ATTR_UNIT_OF_MEASUREMENT, CONF_NAME, CONF_TYPE, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_TEMPERATURE, LIGHT_LUX, PERCENTAGE, STATE_ON, STATE_UNAVAILABLE, TEMP_CELSIUS, TEMP_FAHRENHEIT, __version__, ) from homeassistant.core import Context, callback as ha_callback, split_entity_id from homeassistant.helpers.event import ( async_track_state_change_event, track_point_in_utc_time, ) from homeassistant.util import dt as dt_util from homeassistant.util.decorator import Registry from .const import ( ATTR_DISPLAY_NAME, ATTR_INTERGRATION, ATTR_MANUFACTURER, ATTR_MODEL, ATTR_SOFTWARE_VERSION, ATTR_VALUE, BRIDGE_MODEL, BRIDGE_SERIAL_NUMBER, CHAR_BATTERY_LEVEL, CHAR_CHARGING_STATE, CHAR_STATUS_LOW_BATTERY, CONF_FEATURE_LIST, CONF_LINKED_BATTERY_CHARGING_SENSOR, CONF_LINKED_BATTERY_SENSOR, CONF_LOW_BATTERY_THRESHOLD, DEBOUNCE_TIMEOUT, DEFAULT_LOW_BATTERY_THRESHOLD, DEVICE_CLASS_CO, DEVICE_CLASS_CO2, DEVICE_CLASS_PM25, EVENT_HOMEKIT_CHANGED, HK_CHARGING, HK_NOT_CHARGABLE, HK_NOT_CHARGING, MANUFACTURER, SERV_BATTERY_SERVICE, TYPE_FAUCET, TYPE_OUTLET, TYPE_SHOWER, TYPE_SPRINKLER, TYPE_SWITCH, TYPE_VALVE, ) from .util import ( convert_to_float, dismiss_setup_message, format_sw_version, show_setup_message, validate_media_player_features, ) _LOGGER = logging.getLogger(__name__) SWITCH_TYPES = { TYPE_FAUCET: "Valve", TYPE_OUTLET: "Outlet", TYPE_SHOWER: "Valve", TYPE_SPRINKLER: "Valve", TYPE_SWITCH: "Switch", TYPE_VALVE: "Valve", } TYPES = Registry() def debounce(func): """Decorate function to debounce callbacks from HomeKit.""" @ha_callback def call_later_listener(self, *args): """Handle call_later callback.""" debounce_params = self.debounce.pop(func.__name__, None) if debounce_params: self.hass.async_add_executor_job(func, self, *debounce_params[1:]) @wraps(func) def wrapper(self, *args): """Start async timer.""" debounce_params = self.debounce.pop(func.__name__, None) if debounce_params: debounce_params[0]() # remove listener remove_listener = track_point_in_utc_time( self.hass, partial(call_later_listener, self), dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT), ) self.debounce[func.__name__] = (remove_listener, *args) logger.debug( "%s: Start %s timeout", self.entity_id, func.__name__.replace("set_", "") ) name = getmodule(func).__name__ logger = logging.getLogger(name) return wrapper def get_accessory(hass, driver, state, aid, config): """Take state and return an accessory object if supported.""" if not aid: _LOGGER.warning( 'The entity "%s" is not supported, since it ' "generates an invalid aid, please change it", state.entity_id, ) return None a_type = None name = config.get(CONF_NAME, state.name) if state.domain == "alarm_control_panel": a_type = "SecuritySystem" elif state.domain in ("binary_sensor", "device_tracker", "person"): a_type = "BinarySensor" elif state.domain == "climate": a_type = "Thermostat" elif state.domain == "cover": device_class = state.attributes.get(ATTR_DEVICE_CLASS) features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if device_class in (DEVICE_CLASS_GARAGE, DEVICE_CLASS_GATE) and features & ( cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE ): a_type = "GarageDoorOpener" elif ( device_class == DEVICE_CLASS_WINDOW and features & cover.SUPPORT_SET_POSITION ): a_type = "Window" elif features & cover.SUPPORT_SET_POSITION: a_type = "WindowCovering" elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE): a_type = "WindowCoveringBasic" elif state.domain == "fan": a_type = "Fan" elif state.domain == "humidifier": a_type = "HumidifierDehumidifier" elif state.domain == "light": a_type = "Light" elif state.domain == "lock": a_type = "Lock" elif state.domain == "media_player": device_class = state.attributes.get(ATTR_DEVICE_CLASS) feature_list = config.get(CONF_FEATURE_LIST, []) if device_class == DEVICE_CLASS_TV: a_type = "TelevisionMediaPlayer" elif validate_media_player_features(state, feature_list): a_type = "MediaPlayer" elif state.domain == "sensor": device_class = state.attributes.get(ATTR_DEVICE_CLASS) unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) if device_class == DEVICE_CLASS_TEMPERATURE or unit in ( TEMP_CELSIUS, TEMP_FAHRENHEIT, ): a_type = "TemperatureSensor" elif device_class == DEVICE_CLASS_HUMIDITY and unit == PERCENTAGE: a_type = "HumiditySensor" elif device_class == DEVICE_CLASS_PM25 or DEVICE_CLASS_PM25 in state.entity_id: a_type = "AirQualitySensor" elif device_class == DEVICE_CLASS_CO: a_type = "CarbonMonoxideSensor" elif device_class == DEVICE_CLASS_CO2 or DEVICE_CLASS_CO2 in state.entity_id: a_type = "CarbonDioxideSensor" elif device_class == DEVICE_CLASS_ILLUMINANCE or unit in ("lm", LIGHT_LUX): a_type = "LightSensor" elif state.domain == "switch": switch_type = config.get(CONF_TYPE, TYPE_SWITCH) a_type = SWITCH_TYPES[switch_type] elif state.domain == "vacuum": features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) if features & (vacuum.SUPPORT_START | vacuum.SUPPORT_RETURN_HOME): a_type = "DockVacuum" else: a_type = "Switch" elif state.domain in ("automation", "input_boolean", "remote", "scene", "script"): a_type = "Switch" elif state.domain == "water_heater": a_type = "WaterHeater" elif state.domain == "camera": a_type = "Camera" if a_type is None: return None _LOGGER.debug('Add "%s" as "%s"', state.entity_id, a_type) return TYPES[a_type](hass, driver, name, state.entity_id, aid, config) class HomeAccessory(Accessory): """Adapter class for Accessory.""" def __init__( self, hass, driver, name, entity_id, aid, config, *args, category=CATEGORY_OTHER, **kwargs, ): """Initialize a Accessory object.""" super().__init__(driver=driver, display_name=name, aid=aid, *args, **kwargs) self.config = config or {} domain = split_entity_id(entity_id)[0].replace("_", " ") if ATTR_MANUFACTURER in self.config: manufacturer = self.config[ATTR_MANUFACTURER] elif ATTR_INTERGRATION in self.config: manufacturer = self.config[ATTR_INTERGRATION].replace("_", " ").title() else: manufacturer = f"{MANUFACTURER} {domain}".title() if ATTR_MODEL in self.config: model = self.config[ATTR_MODEL] else: model = domain.title() if ATTR_SOFTWARE_VERSION in self.config: sw_version = format_sw_version(self.config[ATTR_SOFTWARE_VERSION]) else: sw_version = __version__ self.set_info_service( manufacturer=manufacturer, model=model, serial_number=entity_id, firmware_revision=sw_version, ) self.category = category self.entity_id = entity_id self.hass = hass self.debounce = {} self._subscriptions = [] self._char_battery = None self._char_charging = None self._char_low_battery = None self.linked_battery_sensor = self.config.get(CONF_LINKED_BATTERY_SENSOR) self.linked_battery_charging_sensor = self.config.get( CONF_LINKED_BATTERY_CHARGING_SENSOR ) self.low_battery_threshold = self.config.get( CONF_LOW_BATTERY_THRESHOLD, DEFAULT_LOW_BATTERY_THRESHOLD ) """Add battery service if available""" entity_attributes = self.hass.states.get(self.entity_id).attributes battery_found = entity_attributes.get(ATTR_BATTERY_LEVEL) if self.linked_battery_sensor: state = self.hass.states.get(self.linked_battery_sensor) if state is not None: battery_found = state.state else: self.linked_battery_sensor = None _LOGGER.warning( "%s: Battery sensor state missing: %s", self.entity_id, self.linked_battery_sensor, ) if not battery_found: return _LOGGER.debug("%s: Found battery level", self.entity_id) if self.linked_battery_charging_sensor: state = self.hass.states.get(self.linked_battery_charging_sensor) if state is None: self.linked_battery_charging_sensor = None _LOGGER.warning( "%s: Battery charging binary_sensor state missing: %s", self.entity_id, self.linked_battery_charging_sensor, ) else: _LOGGER.debug("%s: Found battery charging", self.entity_id) serv_battery = self.add_preload_service(SERV_BATTERY_SERVICE) self._char_battery = serv_battery.configure_char(CHAR_BATTERY_LEVEL, value=0) self._char_charging = serv_battery.configure_char( CHAR_CHARGING_STATE, value=HK_NOT_CHARGABLE ) self._char_low_battery = serv_battery.configure_char( CHAR_STATUS_LOW_BATTERY, value=0 ) @property def available(self): """Return if accessory is available.""" state = self.hass.states.get(self.entity_id) return state is not None and state.state != STATE_UNAVAILABLE async def run(self): """Handle accessory driver started event. Run inside the HAP-python event loop. """ self.hass.add_job(self.run_handler) async def run_handler(self): """Handle accessory driver started event. Run inside the Home Assistant event loop. """ state = self.hass.states.get(self.entity_id) self.async_update_state_callback(state) self._subscriptions.append( async_track_state_change_event( self.hass, [self.entity_id], self.async_update_event_state_callback ) ) battery_charging_state = None battery_state = None if self.linked_battery_sensor: linked_battery_sensor_state = self.hass.states.get( self.linked_battery_sensor ) battery_state = linked_battery_sensor_state.state battery_charging_state = linked_battery_sensor_state.attributes.get( ATTR_BATTERY_CHARGING ) self._subscriptions.append( async_track_state_change_event( self.hass, [self.linked_battery_sensor], self.async_update_linked_battery_callback, ) ) elif state is not None: battery_state = state.attributes.get(ATTR_BATTERY_LEVEL) if self.linked_battery_charging_sensor: state = self.hass.states.get(self.linked_battery_charging_sensor) battery_charging_state = state and state.state == STATE_ON self._subscriptions.append( async_track_state_change_event( self.hass, [self.linked_battery_charging_sensor], self.async_update_linked_battery_charging_callback, ) ) elif battery_charging_state is None and state is not None: battery_charging_state = state.attributes.get(ATTR_BATTERY_CHARGING) if battery_state is not None or battery_charging_state is not None: self.async_update_battery(battery_state, battery_charging_state) @ha_callback def async_update_event_state_callback(self, event): """Handle state change event listener callback.""" self.async_update_state_callback(event.data.get("new_state")) @ha_callback def async_update_state_callback(self, new_state): """Handle state change listener callback.""" _LOGGER.debug("New_state: %s", new_state) if new_state is None: return battery_state = None battery_charging_state = None if ( not self.linked_battery_sensor and ATTR_BATTERY_LEVEL in new_state.attributes ): battery_state = new_state.attributes.get(ATTR_BATTERY_LEVEL) if ( not self.linked_battery_charging_sensor and ATTR_BATTERY_CHARGING in new_state.attributes ): battery_charging_state = new_state.attributes.get(ATTR_BATTERY_CHARGING) if battery_state is not None or battery_charging_state is not None: self.async_update_battery(battery_state, battery_charging_state) self.async_update_state(new_state) @ha_callback def async_update_linked_battery_callback(self, event): """Handle linked battery sensor state change listener callback.""" new_state = event.data.get("new_state") if new_state is None: return if self.linked_battery_charging_sensor: battery_charging_state = None else: battery_charging_state = new_state.attributes.get(ATTR_BATTERY_CHARGING) self.async_update_battery(new_state.state, battery_charging_state) @ha_callback def async_update_linked_battery_charging_callback(self, event): """Handle linked battery charging sensor state change listener callback.""" new_state = event.data.get("new_state") if new_state is None: return self.async_update_battery(None, new_state.state == STATE_ON) @ha_callback def async_update_battery(self, battery_level, battery_charging): """Update battery service if available. Only call this function if self._support_battery_level is True. """ if not self._char_battery: # Battery appeared after homekit was started return battery_level = convert_to_float(battery_level) if battery_level is not None: if self._char_battery.value != battery_level: self._char_battery.set_value(battery_level) is_low_battery = 1 if battery_level < self.low_battery_threshold else 0 if self._char_low_battery.value != is_low_battery: self._char_low_battery.set_value(is_low_battery) _LOGGER.debug( "%s: Updated battery level to %d", self.entity_id, battery_level ) # Charging state can appear after homekit was started if battery_charging is None or not self._char_charging: return hk_charging = HK_CHARGING if battery_charging else HK_NOT_CHARGING if self._char_charging.value != hk_charging: self._char_charging.set_value(hk_charging) _LOGGER.debug( "%s: Updated battery charging to %d", self.entity_id, hk_charging ) @ha_callback def async_update_state(self, new_state): """Handle state change to update HomeKit value. Overridden by accessory types. """ raise NotImplementedError() def call_service(self, domain, service, service_data, value=None): """Fire event and call service for changes from HomeKit.""" self.hass.add_job(self.async_call_service, domain, service, service_data, value) async def async_call_service(self, domain, service, service_data, value=None): """Fire event and call service for changes from HomeKit. This method must be run in the event loop. """ event_data = { ATTR_ENTITY_ID: self.entity_id, ATTR_DISPLAY_NAME: self.display_name, ATTR_SERVICE: service, ATTR_VALUE: value, } context = Context() self.hass.bus.async_fire(EVENT_HOMEKIT_CHANGED, event_data, context=context) await self.hass.services.async_call( domain, service, service_data, context=context ) @ha_callback def async_stop(self): """Cancel any subscriptions when the bridge is stopped.""" while self._subscriptions: self._subscriptions.pop(0)() class HomeBridge(Bridge): """Adapter class for Bridge.""" def __init__(self, hass, driver, name): """Initialize a Bridge object.""" super().__init__(driver, name) self.set_info_service( firmware_revision=__version__, manufacturer=MANUFACTURER, model=BRIDGE_MODEL, serial_number=BRIDGE_SERIAL_NUMBER, ) self.hass = hass def setup_message(self): """Prevent print of pyhap setup message to terminal.""" def get_snapshot(self, info): """Get snapshot from accessory if supported.""" acc = self.accessories.get(info["aid"]) if acc is None: raise ValueError("Requested snapshot for missing accessory") if not hasattr(acc, "get_snapshot"): raise ValueError( "Got a request for snapshot, but the Accessory " 'does not define a "get_snapshot" method' ) return acc.get_snapshot(info) class HomeDriver(AccessoryDriver): """Adapter class for AccessoryDriver.""" def __init__(self, hass, entry_id, bridge_name, **kwargs): """Initialize a AccessoryDriver object.""" super().__init__(**kwargs) self.hass = hass self._entry_id = entry_id self._bridge_name = bridge_name def pair(self, client_uuid, client_public): """Override super function to dismiss setup message if paired.""" success = super().pair(client_uuid, client_public) if success: dismiss_setup_message(self.hass, self._entry_id) return success def unpair(self, client_uuid): """Override super function to show setup message if unpaired.""" super().unpair(client_uuid) show_setup_message( self.hass, self._entry_id, self._bridge_name, self.state.pincode, self.accessory.xhm_uri(), )
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Special Math Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from scipy import special from scipy import stats import tensorflow as tf sm = tf.contrib.bayesflow.special_math def _check_strictly_increasing(array_1d): diff = np.diff(array_1d) np.testing.assert_array_less(0, diff) def _make_grid(dtype, grid_spec): """Returns a uniform grid + noise, reshaped to shape argument.""" rng = np.random.RandomState(0) num_points = np.prod(grid_spec.shape) grid = np.linspace( grid_spec.min, grid_spec.max, num=num_points).astype(dtype) grid_spacing = (grid_spec.max - grid_spec.min) / num_points grid += 0.1 * grid_spacing * rng.randn(*grid.shape) # More useful if it's sorted (e.g. for testing monotonicity, or debugging). grid = np.sort(grid) return np.reshape(grid, grid_spec.shape) GridSpec = collections.namedtuple("GridSpec", ["min", "max", "shape"]) ErrorSpec = collections.namedtuple("ErrorSpec", ["rtol", "atol"]) class NdtrTest(tf.test.TestCase): _use_log = False # Grid min/max chosen to ensure 0 < cdf(x) < 1. _grid32 = GridSpec(min=-12.9, max=5., shape=[100]) _grid64 = GridSpec(min=-37.5, max=8., shape=[100]) _error32 = ErrorSpec(rtol=1e-4, atol=0.) _error64 = ErrorSpec(rtol=1e-6, atol=0.) def _test_grid(self, dtype, grid_spec, error_spec): if self._use_log: self._test_grid_log(dtype, grid_spec, error_spec) else: self._test_grid_no_log(dtype, grid_spec, error_spec) def _test_grid_log(self, dtype, grid_spec, error_spec): with self.test_session(): grid = _make_grid(dtype, grid_spec) actual = sm.log_ndtr(grid).eval() # Basic tests. self.assertTrue(np.isfinite(actual).all()) # On the grid, -inf < log_cdf(x) < 0. In this case, we should be able # to use a huge grid because we have used tricks to escape numerical # difficulties. self.assertTrue((actual < 0).all()) _check_strictly_increasing(actual) # Versus scipy. expected = special.log_ndtr(grid) # Scipy prematurely goes to zero at some places that we don't. So don't # include these in the comparison. self.assertAllClose(expected.astype(np.float64)[expected < 0], actual.astype(np.float64)[expected < 0], rtol=error_spec.rtol, atol=error_spec.atol) def _test_grid_no_log(self, dtype, grid_spec, error_spec): with self.test_session(): grid = _make_grid(dtype, grid_spec) actual = sm.ndtr(grid).eval() # Basic tests. self.assertTrue(np.isfinite(actual).all()) # On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due # to numerical limitations of cdf. self.assertTrue((actual > 0).all()) self.assertTrue((actual < 1).all()) _check_strictly_increasing(actual) # Versus scipy. expected = special.ndtr(grid) # Scipy prematurely goes to zero at some places that we don't. So don't # include these in the comparison. self.assertAllClose(expected.astype(np.float64)[expected < 0], actual.astype(np.float64)[expected < 0], rtol=error_spec.rtol, atol=error_spec.atol) def test_float32(self): self._test_grid(np.float32, self._grid32, self._error32) def test_float64(self): self._test_grid(np.float64, self._grid64, self._error64) class LogNdtrTestLower(NdtrTest): _use_log = True _grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100]) _grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100]) _error32 = ErrorSpec(rtol=1e-4, atol=0.) _error64 = ErrorSpec(rtol=1e-4, atol=0.) # The errors are quite large when the input is > 6 or so. Also, # scipy.special.log_ndtr becomes zero very early, before 10, # (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and # avoid this issue. class LogNdtrTestMid(NdtrTest): _use_log = True _grid32 = GridSpec( min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100]) _grid64 = GridSpec( min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100]) # Differences show up as soon as we're in the tail, so add some atol. _error32 = ErrorSpec(rtol=0.1, atol=1e-7) _error64 = ErrorSpec(rtol=0.1, atol=1e-7) class LogNdtrTestUpper(NdtrTest): _use_log = True _grid32 = GridSpec( min=sm.LOGNDTR_FLOAT32_UPPER, max=12., # Beyond this, log_cdf(x) may be zero. shape=[100]) _grid64 = GridSpec( min=sm.LOGNDTR_FLOAT64_UPPER, max=35., # Beyond this, log_cdf(x) may be zero. shape=[100]) _error32 = ErrorSpec(rtol=1e-6, atol=1e-14) _error64 = ErrorSpec(rtol=1e-6, atol=1e-14) class NdtrGradientTest(tf.test.TestCase): _use_log = False _grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8]) _error32 = ErrorSpec(rtol=1e-4, atol=0) _error64 = ErrorSpec(rtol=1e-7, atol=0) def assert_all_true(self, v): self.assertAllEqual(np.ones_like(v, dtype=np.bool), v) def assert_all_false(self, v): self.assertAllEqual(np.zeros_like(v, dtype=np.bool), v) def _test_grad_finite(self, dtype): with self.test_session(): x = tf.Variable([-100., 0., 100.], dtype=dtype) output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x)) grad_output = tf.gradients(output, x) tf.global_variables_initializer().run() self.assert_all_true(np.isfinite(output.eval())) self.assert_all_true(np.isfinite(grad_output[0].eval())) def _test_grad_accuracy(self, dtype, grid_spec, error_spec): raw_grid = _make_grid(dtype, grid_spec) grid = tf.convert_to_tensor(raw_grid) with self.test_session(): fn = sm.log_ndtr if self._use_log else sm.ndtr # If there are N points in the grid, # grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of # the ith output point w.r.t. the jth grid point. We only expect the # diagonal to be nonzero. # TODO(b/31131137): Replace tf.test.compute_gradient with our own custom # gradient evaluation to ensure we correctly handle small function delta. grad_eval, _ = tf.test.compute_gradient( grid, grid_spec.shape, fn(grid), grid_spec.shape) grad_eval = np.diag(grad_eval) # Check for NaN separately in order to get informative failures. self.assert_all_false(np.isnan(grad_eval)) self.assert_all_true(grad_eval > 0.) self.assert_all_true(np.isfinite(grad_eval)) # Do the same checks but explicitly compute the gradient. # (We did this because we're not sure if we trust # tf.test.compute_gradient.) grad_eval = tf.gradients(fn(grid), grid)[0].eval() self.assert_all_false(np.isnan(grad_eval)) if self._use_log: g = np.reshape(grad_eval, [-1]) half = np.ceil(len(g)/2) self.assert_all_true(g[:half] > 0.) self.assert_all_true(g[half:] >= 0.) else: # The ndtr gradient will only be non-zero in the range [-14, 14] for # float32 and [-38, 38] for float64. self.assert_all_true(grad_eval >= 0.) self.assert_all_true(np.isfinite(grad_eval)) # Versus scipy. expected = stats.norm.pdf(raw_grid) if self._use_log: expected /= special.ndtr(raw_grid) expected[np.isnan(expected)] = 0. # Scipy prematurely goes to zero at some places that we don't. So don't # include these in the comparison. self.assertAllClose(expected.astype(np.float64)[expected < 0], grad_eval.astype(np.float64)[expected < 0], rtol=error_spec.rtol, atol=error_spec.atol) def test_float32(self): self._test_grad_accuracy(np.float32, self._grid, self._error32) self._test_grad_finite(np.float32) def test_float64(self): self._test_grad_accuracy(np.float64, self._grid, self._error64) self._test_grad_finite(np.float64) class LogNdtrGradientTest(NdtrGradientTest): _use_log = True if __name__ == "__main__": tf.test.main()
#!/usr/bin/env python """Import Google Cloud Platform projects into an organization.""" import argparse import re import sys from lib.google import Google def add_project_to_org(admin, project): """Add the project to the organization.""" output = [] project_id = project['projectId'] body = project.copy() del body['bindings'] del body['google'] del body['owner'] body['parent'] = { 'type': 'organization', 'id': admin.organization_id } params = { 'projectId': project_id, 'body': body, } # connect as service account sa = Google() sa.auth(service_account_file='serviceaccount.json') try: sa.crm.projects().update(**params).execute() text = ' + added project to organization %s.' % ( admin.organization_id, ) output.append(text) except Exception as e: print 'ERROR adding project %s to organization %s!' % ( project_id, admin.organization_id, ) print e return output def add_serviceaccount_owner(admin, project_id, project): """Add our service account as an owner on the project.""" sa_email = admin.credentials.service_account_email newbindings = [] update = False for b in project['bindings']: user = 'serviceAccount:%s' % (sa_email) if b['role'] == 'roles/owner' and user not in b['members']: b['members'].append(user) update = True newbindings.append(b) body = { 'policy': { 'bindings': newbindings } } params = { 'resource': project_id, 'body': body, } output = [] if update: g = project['google'] try: g.crm.projects().setIamPolicy(**params).execute() text = ' + added %s as project owner.' % ( sa_email ) output.append(text) except Exception as e: print 'ERROR: Failed to add service account to %s!' % ( project_id ) print e return output def display_projects(projects): """Display the list of projects to import.""" if projects: print '\nFound %s projects to import:' % (len(projects)) for project_id in projects: owner = projects[project_id]['owner'] print ' * %s (%s)' % (project_id, owner) def get_domain_users(admin): """Return all users in the domain.""" # get users as superadmin print 'Retrieving users from Google Admin SDK Directory API...' users = admin.get_users() print 'Found %s users in domain.' % (len(users)) return users def get_args(): """Return the arguments from argparse.""" parser = argparse.ArgumentParser() parser.add_argument('superadmin') return parser.parse_args() def get_domain_projects(users): """Return a dict of all domain projects that have no parent.""" domain_projects = {} # scan all users and get all their projects print '\nScanning all users for projects without a parent...' for user in users: email = user['primaryEmail'] # authenticate as the user g = Google() g.auth( service_account_file='serviceaccount.json', sub_account=email, ) # retrieve the user's projects try: projects = g.get_projects() except Exception as e: print 'ERROR retrieving %s: %s' % ( email, e ) continue # scan the projects user_projects = scan_user_projects(g, email, projects) domain_projects.update(user_projects) return domain_projects def get_organization_id(admin): """Return the organization_id based on the super admin email.""" organization_id = None # get list of organizations organizations = admin.get_organizations() # find the correct organization for o in organizations: domain = o['displayName'] if domain == admin.domain: name = o['name'] organization_id = name.replace('organizations/', '') print 'Organization: %s [%s] (customer: %s)\n' % ( domain, organization_id, o['owner']['directoryCustomerId'], ) return organization_id def move_projects(admin, projects): """Update and move projects into the organization.""" for project_id in sorted(projects): project = projects[project_id] # add service account as project owner output = add_serviceaccount_owner(admin, project_id, project) # add project to organization output += add_project_to_org(admin, project) if output: print ' * %s:' % (project_id) print '\n'.join(output) print def scan_user_projects(g, email, projects): """Return the list of projects to import.""" user_projects = {} output = [] for p in sorted(projects, key=lambda x: x['name']): # skip projects that are not active if p['lifecycleState'] != 'ACTIVE': continue # look for projects that have no parent if 'parent' not in p or not p['parent']: project_id = p['projectId'] params = { 'resource': project_id, 'body': {}, } # retrieve the iam policy for the project policy = g.crm.projects().getIamPolicy(**params).execute() owner = None # scan each of the bindings to see if user is owner bindings = policy.get('bindings', []) for b in bindings: # skip bindings other than owner if b['role'] != 'roles/owner': continue # see if user is one of the owners if 'user:%s' % (email) in b['members']: owner = email # skip projects where the user is not the owner if not owner: continue # create some text to output about the user's project text = ' * %s: %s [%s] (%s)' % ( p['name'], p['projectId'], p['projectNumber'], p['lifecycleState'], ) output.append(text) # add bindings, google auth and owner to the project data p['bindings'] = bindings p['google'] = g p['owner'] = email # add project to list of projects to import user_projects[project_id] = p # display the output for this user if output: print ' %s:' % (email) print '\n'.join(output) return user_projects def main(): """Main function.""" # get arguments args = get_args() # create google class object admin = Google(superadmin=args.superadmin) # authenticate with the service account admin.auth( service_account_file='serviceaccount.json', sub_account=args.superadmin, ) # get domain users users = get_domain_users(admin) # get domain-owned projects that have no parent projects = get_domain_projects(users) # exit if there are no projects to import if not projects: sys.exit(0) # display the list of all projects we can import display_projects(projects) # ask if we are sure we want to continue print '\nPreparing to move %s projects into org: %s...' % ( len(projects), admin.domain, ) prompt = " ---> Are you sure you want to continue? [y/N]: " confirm = raw_input(prompt) # continue if Yes or yes or Y or y if not re.search('y|Y', confirm): print 'Exiting.' sys.exit(1) print # get the oranization ID based on the super admin email admin.organization_id = get_organization_id(admin) # fail if the organization_id is not set if not admin.organization_id: print 'ERORR: organization_id not found!' sys.exit(1) # move projects into the organization move_projects(admin, projects) print 'Done.' if __name__ == "__main__": main()
# Copyright (c) 2012 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Track resources like memory and disk for a compute host. Provides the scheduler with useful information about availability through the ComputeNode model. """ from nova.compute import claims from nova.compute import instance_types from nova.compute import task_states from nova.compute import vm_states from nova import conductor from nova import context from nova import db from nova import exception from nova import notifications from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging resource_tracker_opts = [ cfg.IntOpt('reserved_host_disk_mb', default=0, help='Amount of disk in MB to reserve for the host'), cfg.IntOpt('reserved_host_memory_mb', default=512, help='Amount of memory in MB to reserve for the host'), cfg.StrOpt('compute_stats_class', default='nova.compute.stats.Stats', help='Class that will manage stats for the local compute host') ] CONF = cfg.CONF CONF.register_opts(resource_tracker_opts) LOG = logging.getLogger(__name__) COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE class ResourceTracker(object): """Compute helper class for keeping track of resource usage as instances are built and destroyed. """ def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API() @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-') def instance_claim(self, context, instance_ref, limits=None): """Indicate that some resources are needed for an upcoming compute instance build operation. This should be called before the compute node is about to perform an instance build operation that will consume additional resources. :param context: security context :param instance_ref: instance to reserve resources for :param limits: Dict of oversubscription limits for memory, disk, and CPUs. :returns: A Claim ticket representing the reserved resources. It can be used to revert the resource usage if an error occurs during the instance build. """ if self.disabled: # compute_driver doesn't support resource tracking, just # set the 'host' and node fields and continue the build: self._set_instance_host_and_node(context, instance_ref) return claims.NopClaim() # sanity checks: if instance_ref['host']: LOG.warning(_("Host field should not be set on the instance until " "resources have been claimed."), instance=instance_ref) if instance_ref['node']: LOG.warning(_("Node field should be not be set on the instance " "until resources have been claimed."), instance=instance_ref) claim = claims.Claim(instance_ref, self) if claim.test(self.compute_node, limits): self._set_instance_host_and_node(context, instance_ref) # Mark resources in-use and update stats self._update_usage_from_instance(self.compute_node, instance_ref) # persist changes to the compute node: self._update(context, self.compute_node) return claim else: raise exception.ComputeResourcesUnavailable() @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-') def resize_claim(self, context, instance_ref, instance_type, limits=None): """Indicate that resources are needed for a resize operation to this compute host. :param context: security context :param instance_ref: instance to reserve resources for :param instance_type: new instance_type being resized to :param limits: Dict of oversubscription limits for memory, disk, and CPUs. :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ if self.disabled: # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: migration_ref = self._create_migration(context, instance_ref, instance_type) return claims.NopClaim(migration=migration_ref) claim = claims.ResizeClaim(instance_ref, instance_type, self) if claim.test(self.compute_node, limits): migration_ref = self._create_migration(context, instance_ref, instance_type) claim.migration = migration_ref # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(self.compute_node, migration_ref) elevated = context.elevated() self._update(elevated, self.compute_node) return claim else: raise exception.ComputeResourcesUnavailable() def _create_migration(self, context, instance, instance_type): """Create a migration record for the upcoming resize. This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim will not be lost if the audit process starts. """ # TODO(russellb): no-db-compute: Send the old instance type # info that is needed via rpc so db access isn't required # here. old_instance_type_id = instance['instance_type_id'] old_instance_type = instance_types.get_instance_type( old_instance_type_id) return db.migration_create(context.elevated(), {'instance_uuid': instance['uuid'], 'source_compute': instance['host'], 'source_node': instance['node'], 'dest_compute': self.host, 'dest_node': self.nodename, 'dest_host': self.driver.get_host_ip_addr(), 'old_instance_type_id': old_instance_type['id'], 'new_instance_type_id': instance_type['id'], 'status': 'pre-migrating'}) def _set_instance_host_and_node(self, context, instance_ref): """Tag the instance as belonging to this host. This should be done while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim will not be lost if the audit process starts. """ values = {'host': self.host, 'node': self.nodename, 'launched_on': self.host} self.conductor_api.instance_update(context, instance_ref['uuid'], **values) instance_ref['host'] = self.host instance_ref['launched_on'] = self.host instance_ref['node'] = self.nodename def abort_instance_claim(self, instance): """Remove usage from the given instance""" # flag the instance as deleted to revert the resource usage # and associated stats: instance['vm_state'] = vm_states.DELETED self._update_usage_from_instance(self.compute_node, instance) ctxt = context.get_admin_context() self._update(ctxt, self.compute_node) def abort_resize_claim(self, instance_uuid, instance_type): """Remove usage for an incoming migration""" if instance_uuid in self.tracked_migrations: migration, itype = self.tracked_migrations.pop(instance_uuid) if instance_type['id'] == migration['new_instance_type_id']: self.stats.update_stats_for_migration(itype, sign=-1) self._update_usage(self.compute_node, itype, sign=-1) ctxt = context.get_admin_context() self._update(ctxt, self.compute_node) @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-') def update_usage(self, context, instance): """Update the resource usage and stats after a change in an instance """ if self.disabled: return uuid = instance['uuid'] # don't update usage for this instance unless it submitted a resource # claim first: if uuid in self.tracked_instances: self._update_usage_from_instance(self.compute_node, instance) self._update(context.elevated(), self.compute_node) @property def disabled(self): return self.compute_node is None @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-') def update_available_resource(self, context): """Override in-memory calculations of compute node resource usage based on data audited from the hypervisor layer. Add in resource claims in progress to account for operations that have declared a need for resources, but not necessarily retrieved them from the hypervisor layer yet. """ LOG.audit(_("Auditing locally available compute resources")) resources = self.driver.get_available_resource(self.nodename) if not resources: # The virt driver does not support this function LOG.audit(_("Virt driver does not support " "'get_available_resource' Compute tracking is disabled.")) self.compute_node = None return self._verify_resources(resources) self._report_hypervisor_resource_view(resources) # Grab all instances assigned to this node: instances = db.instance_get_all_by_host_and_node(context, self.host, self.nodename) # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) # Grab all in-progress migrations: migrations = db.migration_get_in_progress_by_host_and_node(context, self.host, self.nodename) self._update_usage_from_migrations(resources, migrations) self._report_final_resource_view(resources) self._sync_compute_node(context, resources) def _sync_compute_node(self, context, resources): """Create or update the compute node DB record""" if not self.compute_node: # we need a copy of the ComputeNode record: service = self._get_service(context) if not service: # no service record, disable resource return compute_node_refs = service['compute_node'] if compute_node_refs: for cn in compute_node_refs: if cn.get('hypervisor_hostname') == self.nodename: self.compute_node = cn break if not self.compute_node: # Need to create the ComputeNode record: resources['service_id'] = service['id'] self._create(context, resources) LOG.info(_('Compute_service record created for %s ') % self.host) else: # just update the record: self._update(context, resources, prune_stats=True) LOG.info(_('Compute_service record updated for %s ') % self.host) def _create(self, context, values): """Create the compute node in the DB""" # initialize load stats from existing instances: compute_node = db.compute_node_create(context, values) self.compute_node = dict(compute_node) def _get_service(self, context): try: return db.service_get_all_compute_by_host(context, self.host)[0] except exception.NotFound: LOG.warn(_("No service record for host %s"), self.host) def _report_hypervisor_resource_view(self, resources): """Log the hypervisor's view of free memory in and free disk. This is just a snapshot of resource usage recorded by the virt driver. """ free_ram_mb = resources['memory_mb'] - resources['memory_mb_used'] free_disk_gb = resources['local_gb'] - resources['local_gb_used'] LOG.debug(_("Hypervisor: free ram (MB): %s") % free_ram_mb) LOG.debug(_("Hypervisor: free disk (GB): %s") % free_disk_gb) vcpus = resources['vcpus'] if vcpus: free_vcpus = vcpus - resources['vcpus_used'] LOG.debug(_("Hypervisor: free VCPUs: %s") % free_vcpus) else: LOG.debug(_("Hypervisor: VCPU information unavailable")) def _report_final_resource_view(self, resources): """Report final calculate of free memory and free disk including instance calculations and in-progress resource claims. These values will be exposed via the compute node table to the scheduler. """ LOG.audit(_("Free ram (MB): %s") % resources['free_ram_mb']) LOG.audit(_("Free disk (GB): %s") % resources['free_disk_gb']) vcpus = resources['vcpus'] if vcpus: free_vcpus = vcpus - resources['vcpus_used'] LOG.audit(_("Free VCPUS: %s") % free_vcpus) else: LOG.audit(_("Free VCPU information unavailable")) def _update(self, context, values, prune_stats=False): """Persist the compute node updates to the DB""" compute_node = db.compute_node_update(context, self.compute_node['id'], values, prune_stats) self.compute_node = dict(compute_node) def confirm_resize(self, context, migration, status='confirmed'): """Cleanup usage for a confirmed resize""" elevated = context.elevated() db.migration_update(elevated, migration['id'], {'status': status}) self.update_available_resource(elevated) def revert_resize(self, context, migration, status='reverted'): """Cleanup usage for a reverted resize""" self.confirm_resize(context, migration, status) def _update_usage(self, resources, usage, sign=1): resources['memory_mb_used'] += sign * usage['memory_mb'] resources['local_gb_used'] += sign * usage['root_gb'] resources['local_gb_used'] += sign * usage['ephemeral_gb'] # free ram and disk may be negative, depending on policy: resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - resources['local_gb_used']) resources['running_vms'] = self.stats.num_instances resources['vcpus_used'] = self.stats.num_vcpus_used def _update_usage_from_migration(self, resources, migration): """Update usage for a single migration. The record may represent an incoming or outbound migration. """ uuid = migration['instance_uuid'] LOG.audit(_("Updating from migration %s") % uuid) incoming = (migration['dest_compute'] == self.host and migration['dest_node'] == self.nodename) outbound = (migration['source_compute'] == self.host and migration['source_node'] == self.nodename) same_node = (incoming and outbound) instance = self.tracked_instances.get(uuid, None) itype = None if same_node: # same node resize. record usage for whichever instance type the # instance is *not* in: if (instance['instance_type_id'] == migration['old_instance_type_id']): itype = migration['new_instance_type_id'] else: # instance record already has new flavor, hold space for a # possible revert to the old instance type: itype = migration['old_instance_type_id'] elif incoming and not instance: # instance has not yet migrated here: itype = migration['new_instance_type_id'] elif outbound and not instance: # instance migrated, but record usage for a possible revert: itype = migration['old_instance_type_id'] if itype: instance_type = instance_types.get_instance_type(itype) self.stats.update_stats_for_migration(instance_type) self._update_usage(resources, instance_type) resources['stats'] = self.stats self.tracked_migrations[uuid] = (migration, instance_type) def _update_usage_from_migrations(self, resources, migrations): self.tracked_migrations.clear() filtered = {} # do some defensive filtering against bad migrations records in the # database: for migration in migrations: instance = migration['instance'] if not instance: # migration referencing deleted instance continue uuid = instance['uuid'] # skip migration if instance isn't in a resize state: if not self._instance_in_resize_state(instance): LOG.warn(_("Instance not resizing, skipping migration."), instance_uuid=uuid) continue # filter to most recently updated migration for each instance: m = filtered.get(uuid, None) if not m or migration['updated_at'] >= m['updated_at']: filtered[uuid] = migration for migration in filtered.values(): self._update_usage_from_migration(resources, migration) def _update_usage_from_instance(self, resources, instance): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances is_deleted_instance = instance['vm_state'] == vm_states.DELETED if is_new_instance: self.tracked_instances[uuid] = jsonutils.to_primitive(instance) sign = 1 if is_deleted_instance: self.tracked_instances.pop(uuid) sign = -1 self.stats.update_stats_for_instance(instance) # if it's a new or deleted instance: if is_new_instance or is_deleted_instance: # new instance, update compute node resource usage: self._update_usage(resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() resources['stats'] = self.stats def _update_usage_from_instances(self, resources, instances): """Calculate resource usage based on instance utilization. This is different than the hypervisor's view as it will account for all instances assigned to the local compute host, even if they are not currently powered on. """ self.tracked_instances.clear() # purge old stats self.stats.clear() # set some intiial values, reserve room for host/hypervisor: resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024 resources['memory_mb_used'] = CONF.reserved_host_memory_mb resources['vcpus_used'] = 0 resources['free_ram_mb'] = (resources['memory_mb'] - resources['memory_mb_used']) resources['free_disk_gb'] = (resources['local_gb'] - resources['local_gb_used']) resources['current_workload'] = 0 resources['running_vms'] = 0 for instance in instances: self._update_usage_from_instance(resources, instance) def _verify_resources(self, resources): resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info", "vcpus_used", "memory_mb_used", "local_gb_used"] missing_keys = [k for k in resource_keys if k not in resources] if missing_keys: reason = _("Missing keys: %s") % missing_keys raise exception.InvalidInput(reason=reason) def _instance_in_resize_state(self, instance): vm = instance['vm_state'] task = instance['task_state'] if vm == vm_states.RESIZED: return True if (vm == vm_states.ACTIVE and task in [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]): return True return False
# Copyright (c) 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools as func import eventlet import jsonschema from oslo_log import log as logging import six from murano.common.i18n import _, _LE LOG = logging.getLogger(__name__) class TraverseHelper(object): value_type = (six.string_types, int, float, bool) @staticmethod def get(path, source): """Provides the ability to traverse a data source made up of any combination of lists and dicts. Has simple rules for selecting item of the list: * each item should have id property * to select item from the list, specify id value Examples: source = {'obj': {'attr': True}} value = TraverseHelper.get('/obj/attr', source) source = {'obj': [ {'id': '1', 'value': 1}, {'id': '2s', 'value': 2}, ]} value = TraverseHelper.get('/obj/2s/value', source) :param path: string with path to desired value :param source: python object (list or dict) :return: object :raise: ValueError if object is malformed """ queue = collections.deque(filter(lambda x: x, path.split('/'))) while len(queue): path = queue.popleft() if isinstance(source, list): idx_source = source iterator = ( i for i in source if i.get('?', {}).get('id') == path ) source = next(iterator, None) if source is None and path.isdigit(): source = idx_source[int(path)] elif isinstance(source, dict): source = source[path] elif isinstance(source, TraverseHelper.value_type): break else: raise ValueError(_('Source object or path is malformed')) return source @staticmethod def update(path, value, source): """Updates value selected with specified path. Warning: Root object could not be updated :param path: string with path to desired value :param value: value :param source: python object (list or dict) """ parent_path = '/'.join(path.split('/')[:-1]) node = TraverseHelper.get(parent_path, source) key = path[1:].split('/')[-1] node[key] = value @staticmethod def insert(path, value, source): """Inserts new item to selected list. :param path: string with path to desired value :param value: value :param source: List """ node = TraverseHelper.get(path, source) node.append(value) @staticmethod def extend(path, value, source): """Extend list by appending elements from the iterable. :param path: string with path to desired value :param value: value :param source: List """ node = TraverseHelper.get(path, source) node.extend(value) @staticmethod def remove(path, source): """Removes selected item from source. :param path: string with path to desired value :param source: python object (list or dict) """ parent_path = '/'.join(path.split('/')[:-1]) node = TraverseHelper.get(parent_path, source) key = path[1:].split('/')[-1] if isinstance(node, list): iterator = (i for i in node if i.get('?', {}).get('id') == key) item = next(iterator, None) if item is None and key.isdigit(): del node[int(key)] else: node.remove(item) elif isinstance(node, dict): del node[key] else: raise ValueError(_('Source object or path is malformed')) def is_different(obj1, obj2): """Stripped-down version of deep.diff comparator Compares arbitrary nested objects, handles circular links, but doesn't point to the first difference as deep.diff does. """ class Difference(Exception): pass def is_in(o, st): for _o in st: if o is _o: return True return False def rec(o1, o2, stack1=(), stack2=()): if is_in(o1, stack1) and is_in(o2, stack2): # circular reference detected - break the loop return elif is_in(o1, stack1): raise Difference() else: stack1 += (o1,) stack2 += (o2,) if o1 is o2: return elif (isinstance(o1, six.string_types) and isinstance(o2, six.string_types)) and o1 == o2: return elif type(o1) != type(o2): raise Difference() elif isinstance(o1, dict): # check for keys inequality rec(o1.keys(), o2.keys(), stack1, stack2) for key in o1.keys(): rec(o1[key], o2[key], stack1, stack2) elif isinstance(o1, (list, tuple, set)): if len(o1) != len(o2): raise Difference() else: for _o1, _o2 in zip(o1, o2): rec(_o1, _o2, stack1, stack2) elif hasattr(o1, '__dict__'): return rec(o1.__dict__, o2.__dict__, stack1, stack2) elif o1 != o2: raise Difference() try: rec(obj1, obj2) except Difference: return True else: return False def build_entity_map(value): def build_entity_map_recursive(value, id_map): if isinstance(value, dict): if '?' in value and 'id' in value['?']: id_map[value['?']['id']] = value for v in six.itervalues(value): build_entity_map_recursive(v, id_map) if isinstance(value, list): for item in value: build_entity_map_recursive(item, id_map) id_map = {} build_entity_map_recursive(value, id_map) return id_map def retry(ExceptionToCheck, tries=4, delay=3, backoff=2): """Retry calling the decorated function using an exponential backoff. http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry :param ExceptionToCheck: the exception to check. may be a tuple of exceptions to check :type ExceptionToCheck: Exception or tuple :param tries: number of times to try (not retry) before giving up :type tries: int :param delay: initial delay between retries in seconds :type delay: int :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry :type backoff: int """ def deco_retry(f): @func.wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay forever = mtries == -1 while forever or mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: LOG.exception(_LE("An exception occurred {exc}. Retrying " "in {time} seconds").format(exc=e, time=mdelay)) eventlet.sleep(mdelay) if not forever: mtries -= 1 if mdelay < 60: mdelay *= backoff return f(*args, **kwargs) return f_retry return deco_retry def handle(f): """Handles exception in wrapped function and writes to LOG.""" @func.wraps(f) def f_handle(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: LOG.exception(e) return f_handle def validate_body(schema): def deco_validate_body(f): @func.wraps(f) def f_validate_body(*args, **kwargs): if 'body' in kwargs: jsonschema.validate(kwargs['body'], schema) return f(*args, **kwargs) return f_validate_body return deco_validate_body
# Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for the creator dashboard, notifications, and creating new activities. """ from core.controllers import base from core.domain import collection_domain from core.domain import collection_services from core.domain import config_domain from core.domain import exp_domain from core.domain import exp_services from core.domain import feedback_services from core.domain import subscription_services from core.domain import summary_services from core.domain import user_jobs_continuous from core.domain import user_services import feconf import utils EXPLORATION_ID_KEY = 'explorationId' COLLECTION_ID_KEY = 'collectionId' class NotificationsDashboardPage(base.BaseHandler): """Page with notifications for the user.""" @base.require_user def get(self): if self.username in config_domain.BANNED_USERNAMES.value: raise self.UnauthorizedUserException( 'You do not have the credentials to access this page.') elif user_services.has_fully_registered(self.user_id): self.values.update({ 'meta_description': feconf.DASHBOARD_PAGE_DESCRIPTION, 'nav_mode': feconf.NAV_MODE_DASHBOARD, }) self.render_template( 'dashboard/notifications_dashboard.html', redirect_url_on_logout='/') else: self.redirect(utils.set_url_query_parameter( feconf.SIGNUP_URL, 'return_url', '/notifications_dashboard')) class NotificationsDashboardHandler(base.BaseHandler): """Provides data for the user notifications dashboard.""" PAGE_NAME_FOR_CSRF = 'dashboard' def get(self): """Handles GET requests.""" if self.user_id is None: raise self.PageNotFoundException job_queued_msec, recent_notifications = ( user_jobs_continuous.DashboardRecentUpdatesAggregator.get_recent_notifications( # pylint: disable=line-too-long self.user_id)) last_seen_msec = ( subscription_services.get_last_seen_notifications_msec( self.user_id)) # Replace author_ids with their usernames. author_ids = [ notification['author_id'] for notification in recent_notifications if notification['author_id']] author_usernames = user_services.get_usernames(author_ids) author_id_to_username = { None: '', } for ind, author_id in enumerate(author_ids): author_id_to_username[author_id] = author_usernames[ind] for notification in recent_notifications: notification['author_username'] = ( author_id_to_username[notification['author_id']]) del notification['author_id'] subscription_services.record_user_has_seen_notifications( self.user_id, job_queued_msec if job_queued_msec else 0.0) self.values.update({ # This may be None if no job has ever run for this user. 'job_queued_msec': job_queued_msec, # This may be None if this is the first time the user has seen # the dashboard. 'last_seen_msec': last_seen_msec, 'recent_notifications': recent_notifications, }) self.render_json(self.values) class DashboardPage(base.BaseHandler): """Page showing the user's creator dashboard.""" PAGE_NAME_FOR_CSRF = 'dashboard' PAGE_HAS_CREATE_EXP_REQUEST = True @base.require_user def get(self): if self.username in config_domain.BANNED_USERNAMES.value: raise self.UnauthorizedUserException( 'You do not have the credentials to access this page.') elif user_services.has_fully_registered(self.user_id): self.values.update({ 'nav_mode': feconf.NAV_MODE_DASHBOARD, 'can_create_collections': ( self.username in config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value ), 'allow_yaml_file_upload': feconf.ALLOW_YAML_FILE_UPLOAD, }) self.render_template( 'dashboard/dashboard.html', redirect_url_on_logout='/') else: self.redirect(utils.set_url_query_parameter( feconf.SIGNUP_URL, 'return_url', feconf.DASHBOARD_URL)) class DashboardHandler(base.BaseHandler): """Provides data for the user's creator dashboard page.""" def get(self): """Handles GET requests.""" if self.user_id is None: raise self.PageNotFoundException def _get_intro_card_color(category): return ( feconf.CATEGORIES_TO_COLORS[category] if category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR) exploration_ids_subscribed_to = ( subscription_services.get_exploration_ids_subscribed_to( self.user_id)) subscribed_exploration_summaries = filter(None, ( exp_services.get_exploration_summaries_matching_ids( exploration_ids_subscribed_to))) subscribed_collection_summaries = filter(None, ( collection_services.get_collection_summaries_matching_ids( subscription_services.get_collection_ids_subscribed_to( self.user_id)))) explorations_list = summary_services.get_displayable_exp_summary_dicts( subscribed_exploration_summaries) collections_list = [] feedback_thread_analytics = ( feedback_services.get_thread_analytics_multi( exploration_ids_subscribed_to)) for ind, exploration in enumerate(explorations_list): exploration.update(feedback_thread_analytics[ind].to_dict()) explorations_list = sorted( explorations_list, key=lambda x: (x['num_open_threads'], x['last_updated_msec']), reverse=True) if (self.username in config_domain.WHITELISTED_COLLECTION_EDITOR_USERNAMES.value): for collection_summary in subscribed_collection_summaries: # TODO(sll): Reuse _get_displayable_collection_summary_dicts() # in summary_services, instead of replicating it like this. collections_list.append({ 'id': collection_summary.id, 'title': collection_summary.title, 'category': collection_summary.category, 'objective': collection_summary.objective, 'language_code': collection_summary.language_code, 'last_updated': utils.get_time_in_millisecs( collection_summary.collection_model_last_updated), 'created_on': utils.get_time_in_millisecs( collection_summary.collection_model_created_on), 'status': collection_summary.status, 'community_owned': collection_summary.community_owned, 'thumbnail_icon_url': ( utils.get_thumbnail_icon_url_for_category( collection_summary.category)), 'thumbnail_bg_color': utils.get_hex_color_for_category( collection_summary.category), }) self.values.update({ 'explorations_list': explorations_list, 'collections_list': collections_list, 'dashboard_stats': user_services.get_user_dashboard_stats( self.user_id) }) self.render_json(self.values) class NotificationsHandler(base.BaseHandler): """Provides data about unseen notifications.""" def get(self): """Handles GET requests.""" num_unseen_notifications = 0 if self.user_id and self.username: last_seen_msec = ( subscription_services.get_last_seen_notifications_msec( self.user_id)) _, recent_notifications = ( user_jobs_continuous.DashboardRecentUpdatesAggregator.get_recent_notifications( # pylint: disable=line-too-long self.user_id)) for notification in recent_notifications: if (notification['last_updated_ms'] > last_seen_msec and notification['author_id'] != self.user_id): num_unseen_notifications += 1 self.render_json({ 'num_unseen_notifications': num_unseen_notifications, }) class NewExploration(base.BaseHandler): """Creates a new exploration.""" PAGE_NAME_FOR_CSRF = feconf.CSRF_PAGE_NAME_CREATE_EXPLORATION @base.require_fully_signed_up def post(self): """Handles POST requests.""" title = self.payload.get('title', feconf.DEFAULT_EXPLORATION_TITLE) new_exploration_id = exp_services.get_new_exploration_id() exploration = exp_domain.Exploration.create_default_exploration( new_exploration_id, title=title) exp_services.save_new_exploration(self.user_id, exploration) self.render_json({ EXPLORATION_ID_KEY: new_exploration_id }) class NewCollection(base.BaseHandler): """Creates a new collection.""" PAGE_NAME_FOR_CSRF = 'dashboard' @base.require_fully_signed_up def post(self): """Handles POST requests.""" new_collection_id = collection_services.get_new_collection_id() collection = collection_domain.Collection.create_default_collection( new_collection_id) collection_services.save_new_collection(self.user_id, collection) self.render_json({ COLLECTION_ID_KEY: new_collection_id }) class UploadExploration(base.BaseHandler): """Uploads a new exploration.""" PAGE_NAME_FOR_CSRF = 'dashboard' @base.require_fully_signed_up def post(self): """Handles POST requests.""" yaml_content = self.request.get('yaml_file') new_exploration_id = exp_services.get_new_exploration_id() if feconf.ALLOW_YAML_FILE_UPLOAD: exp_services.save_new_exploration_from_yaml_and_assets( self.user_id, yaml_content, new_exploration_id, []) self.render_json({ EXPLORATION_ID_KEY: new_exploration_id }) else: raise self.InvalidInputException( 'This server does not allow file uploads.') class DashboardRedirectPage(base.BaseHandler): """An page that redirects to the main Dashboard page.""" def get(self): """Handles GET requests.""" self.redirect(feconf.DASHBOARD_URL)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=unused-import,g-bad-import-order """Contains the pooling layer classes and their functional aliases. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from six.moves import xrange # pylint: disable=redefined-builtin import numpy as np from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import nn from tensorflow.python.ops import init_ops from tensorflow.python.ops import standard_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.layers import base from tensorflow.python.layers import utils class _Pooling1D(base.Layer): """Pooling layer for arbitrary pooling functions, for 1D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(_Pooling1D, self).__init__(name=name, **kwargs) self.pool_function = pool_function self.pool_size = utils.normalize_tuple(pool_size, 1, 'pool_size') self.strides = utils.normalize_tuple(strides, 1, 'strides') self.padding = utils.normalize_padding(padding) self.data_format = utils.normalize_data_format(data_format) def build(self, input_shape): if len(input_shape) != 3: raise ValueError('Inputs should have rank 3. ' 'Received input shape:', str(input_shape)) def call(self, inputs): # There is no TF op for 1D pooling, hence we make the inputs 4D. if self.data_format == 'channels_last': inputs = array_ops.expand_dims(inputs, 2) pool_shape = (1,) + self.pool_size + (1, 1) strides = (1,) + self.strides + (1, 1) data_format = 'NHWC' else: inputs = array_ops.expand_dims(inputs, 1) pool_shape = (1, 1) + self.pool_size + (1,) strides = (1, 1) + self.strides + (1,) data_format = 'NCHW' outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper(), data_format=data_format) if self.data_format == 'channels_last': return array_ops.squeeze(outputs, 2) else: return array_ops.squeeze(outputs, 1) class AveragePooling1D(_Pooling1D): """Average Pooling layer for 1D inputs. Arguments: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(AveragePooling1D, self).__init__( nn.avg_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) def average_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average Pooling layer for 1D inputs. Arguments: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. """ layer = AveragePooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) class MaxPooling1D(_Pooling1D): """Max Pooling layer for 1D inputs. Arguments: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(MaxPooling1D, self).__init__( nn.max_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) def max_pooling1d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max Pooling layer for 1D inputs. Arguments: inputs: The tensor over which to pool. Must have rank 3. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, length, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, length)`. name: A string, the name of the layer. Returns: The output tensor, of rank 3. """ layer = MaxPooling1D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) class _Pooling2D(base.Layer): """Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images). This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(_Pooling2D, self).__init__(name=name, **kwargs) self.pool_function = pool_function self.pool_size = utils.normalize_tuple(pool_size, 2, 'pool_size') self.strides = utils.normalize_tuple(strides, 2, 'strides') self.padding = utils.normalize_padding(padding) self.data_format = utils.normalize_data_format(data_format) def build(self, input_shape): if len(input_shape) != 4: raise ValueError('Inputs should have rank 4. ' 'Received input shape:', str(input_shape)) def call(self, inputs): if self.data_format == 'channels_last': pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) else: pool_shape = (1, 1) + self.pool_size strides = (1, 1) + self.strides return self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper(), data_format=utils.convert_data_format(self.data_format, 4)) class AveragePooling2D(_Pooling2D): """Average pooling layer for 2D inputs (e.g. images). Arguments: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(AveragePooling2D, self).__init__( nn.avg_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) def average_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average pooling layer for 2D inputs (e.g. images). Arguments: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. """ layer = AveragePooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) class MaxPooling2D(_Pooling2D): """Max pooling layer for 2D inputs (e.g. images). Arguments: pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(MaxPooling2D, self).__init__( nn.max_pool, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) def max_pooling2d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max pooling layer for 2D inputs (e.g. images). Arguments: inputs: The tensor over which to pool. Must have rank 4. pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. name: A string, the name of the layer. Returns: Output tensor. """ layer = MaxPooling2D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) class _Pooling3D(base.Layer): """Pooling layer for arbitrary pooling functions, for 3D inputs. This class only exists for code reuse. It will never be an exposed API. Arguments: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(_Pooling3D, self).__init__(name=name, **kwargs) self.pool_function = pool_function self.pool_size = utils.normalize_tuple(pool_size, 3, 'pool_size') self.strides = utils.normalize_tuple(strides, 3, 'strides') self.padding = utils.normalize_padding(padding) self.data_format = utils.normalize_data_format(data_format) def build(self, input_shape): if len(input_shape) != 5: raise ValueError('Inputs should have rank 5. ' 'Received input shape:', str(input_shape)) def call(self, inputs): pool_shape = (1,) + self.pool_size + (1,) strides = (1,) + self.strides + (1,) if self.data_format == 'channels_first': # TF does not support channels first with 3D pooling operations, # so we must handle this case manually. inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1)) outputs = self.pool_function( inputs, ksize=pool_shape, strides=strides, padding=self.padding.upper()) if self.data_format == 'channels_first': outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3)) return outputs class AveragePooling3D(_Pooling3D): """Average pooling layer for 3D inputs (e.g. volumes). Arguments: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(AveragePooling3D, self).__init__( nn.avg_pool3d, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) def average_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Average pooling layer for 3D inputs (e.g. volumes). Arguments: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. """ layer = AveragePooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) class MaxPooling3D(_Pooling3D): """Max pooling layer for 3D inputs (e.g. volumes). Arguments: pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. """ def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(MaxPooling3D, self).__init__( nn.max_pool3d, pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs) def max_pooling3d(inputs, pool_size, strides, padding='valid', data_format='channels_last', name=None): """Max pooling layer for 3D inputs (e.g. volumes). Arguments: inputs: The tensor over which to pool. Must have rank 5. pool_size: An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string. The ordering of the dimensions in the inputs. `channels_last` (default) and `channels_first` are supported. `channels_last` corresponds to inputs with shape `(batch, depth, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, depth, height, width)`. name: A string, the name of the layer. Returns: Output tensor. """ layer = MaxPooling3D(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name) return layer.apply(inputs) # Aliases AvgPool2D = AveragePooling2D MaxPool2D = MaxPooling2D max_pool2d = max_pooling2d avg_pool2d = average_pooling2d
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class nstrafficdomain_bridgegroup_binding(base_resource) : """ Binding class showing the bridgegroup that can be bound to nstrafficdomain. """ def __init__(self) : self._bridgegroup = 0 self._bridgegroup = 0 self._td = 0 self.___count = 0 @property def bridgegroup(self) : ur"""ID of the configured bridge to bind to this traffic domain. More than one bridge group can be bound to a traffic domain, but the same bridge group cannot be a part of multiple traffic domains.<br/>Minimum value = 1<br/>Maximum value = 1000. """ try : return self._bridgegroup except Exception as e: raise e @bridgegroup.setter def bridgegroup(self, bridgegroup) : ur"""ID of the configured bridge to bind to this traffic domain. More than one bridge group can be bound to a traffic domain, but the same bridge group cannot be a part of multiple traffic domains.<br/>Minimum value = 1<br/>Maximum value = 1000 """ try : self._bridgegroup = bridgegroup except Exception as e: raise e @property def td(self) : ur"""Integer value that uniquely identifies a traffic domain.<br/>Minimum value = 1<br/>Maximum value = 4094. """ try : return self._td except Exception as e: raise e @td.setter def td(self, td) : ur"""Integer value that uniquely identifies a traffic domain.<br/>Minimum value = 1<br/>Maximum value = 4094 """ try : self._td = td except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(nstrafficdomain_bridgegroup_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.nstrafficdomain_bridgegroup_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.td is not None : return str(self.td) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : try : if resource and type(resource) is not list : updateresource = nstrafficdomain_bridgegroup_binding() updateresource.td = resource.td updateresource.bridgegroup = resource.bridgegroup return updateresource.update_resource(client) else : if resource and len(resource) > 0 : updateresources = [nstrafficdomain_bridgegroup_binding() for _ in range(len(resource))] for i in range(len(resource)) : updateresources[i].td = resource[i].td updateresources[i].bridgegroup = resource[i].bridgegroup return cls.update_bulk_request(client, updateresources) except Exception as e : raise e @classmethod def delete(cls, client, resource) : try : if resource and type(resource) is not list : deleteresource = nstrafficdomain_bridgegroup_binding() deleteresource.td = resource.td deleteresource.bridgegroup = resource.bridgegroup return deleteresource.delete_resource(client) else : if resource and len(resource) > 0 : deleteresources = [nstrafficdomain_bridgegroup_binding() for _ in range(len(resource))] for i in range(len(resource)) : deleteresources[i].td = resource[i].td deleteresources[i].bridgegroup = resource[i].bridgegroup return cls.delete_bulk_request(client, deleteresources) except Exception as e : raise e @classmethod def get(cls, service, td) : ur""" Use this API to fetch nstrafficdomain_bridgegroup_binding resources. """ try : obj = nstrafficdomain_bridgegroup_binding() obj.td = td response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, td, filter_) : ur""" Use this API to fetch filtered set of nstrafficdomain_bridgegroup_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = nstrafficdomain_bridgegroup_binding() obj.td = td option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, td) : ur""" Use this API to count nstrafficdomain_bridgegroup_binding resources configued on NetScaler. """ try : obj = nstrafficdomain_bridgegroup_binding() obj.td = td option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, td, filter_) : ur""" Use this API to count the filtered set of nstrafficdomain_bridgegroup_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = nstrafficdomain_bridgegroup_binding() obj.td = td option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class nstrafficdomain_bridgegroup_binding_response(base_response) : def __init__(self, length=1) : self.nstrafficdomain_bridgegroup_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.nstrafficdomain_bridgegroup_binding = [nstrafficdomain_bridgegroup_binding() for _ in range(length)]
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # coding=utf-8 # # Copyright (c) 2012 NTT DOCOMO, INC # Copyright (c) 2011 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A driver for Bare-metal platform. """ from oslo.config import cfg from nova.compute import power_state from nova import context as nova_context from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import paths from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import db from nova.virt import driver from nova.virt import firewall from nova.virt.libvirt import imagecache opts = [ cfg.BoolOpt('inject_password', default=True, help='Whether baremetal compute injects password or not'), cfg.StrOpt('injected_network_template', default=paths.basedir_def('nova/virt/' 'baremetal/interfaces.template'), help='Template file for injected network'), cfg.StrOpt('vif_driver', default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver', help='Baremetal VIF driver.'), cfg.StrOpt('volume_driver', default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver', help='Baremetal volume driver.'), cfg.ListOpt('instance_type_extra_specs', default=[], help='a list of additional capabilities corresponding to ' 'instance_type_extra_specs for this compute ' 'host to advertise. Valid entries are name=value, pairs ' 'For example, "key1:val1, key2:val2"'), cfg.StrOpt('driver', default='nova.virt.baremetal.pxe.PXE', help='Baremetal driver back-end (pxe or tilera)'), cfg.StrOpt('power_manager', default='nova.virt.baremetal.ipmi.IPMI', help='Baremetal power management method'), cfg.StrOpt('tftp_root', default='/tftpboot', help='Baremetal compute node\'s tftp root path'), ] LOG = logging.getLogger(__name__) baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) CONF.import_opt('host', 'nova.netconf') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( firewall.__name__, firewall.NoopFirewallDriver.__name__) def _get_baremetal_node_by_instance_uuid(instance_uuid): ctx = nova_context.get_admin_context() node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid) if node['service_host'] != CONF.host: LOG.error(_("Request for baremetal node %s " "sent to wrong service host") % instance_uuid) raise exception.InstanceNotFound(instance_id=instance_uuid) return node def _update_state(context, node, instance, state): """Update the node state in baremetal DB If instance is not supplied, reset the instance_uuid field for this node. """ values = {'task_state': state} if not instance: values['instance_uuid'] = None values['instance_name'] = None db.bm_node_update(context, node['id'], values) def get_power_manager(**kwargs): cls = importutils.import_class(CONF.baremetal.power_manager) return cls(**kwargs) class BareMetalDriver(driver.ComputeDriver): """BareMetal hypervisor driver.""" capabilities = { "has_imagecache": True, } def __init__(self, virtapi, read_only=False): super(BareMetalDriver, self).__init__(virtapi) self.driver = importutils.import_object( CONF.baremetal.driver, virtapi) self.vif_driver = importutils.import_object( CONF.baremetal.vif_driver) self.firewall_driver = firewall.load_driver( default=DEFAULT_FIREWALL_DRIVER) self.volume_driver = importutils.import_object( CONF.baremetal.volume_driver, virtapi) self.image_cache_manager = imagecache.ImageCacheManager() extra_specs = {} extra_specs["baremetal_driver"] = CONF.baremetal.driver for pair in CONF.baremetal.instance_type_extra_specs: keyval = pair.split(':', 1) keyval[0] = keyval[0].strip() keyval[1] = keyval[1].strip() extra_specs[keyval[0]] = keyval[1] if 'cpu_arch' not in extra_specs: LOG.warning( _('cpu_arch is not found in instance_type_extra_specs')) extra_specs['cpu_arch'] = '' self.extra_specs = extra_specs self.supported_instances = [ (extra_specs['cpu_arch'], 'baremetal', 'baremetal'), ] @classmethod def instance(cls): if not hasattr(cls, '_instance'): cls._instance = cls() return cls._instance def init_host(self, host): return def get_hypervisor_type(self): return 'baremetal' def get_hypervisor_version(self): # TODO(deva): define the version properly elsewhere return 1 def legacy_nwinfo(self): return False def list_instances(self): l = [] context = nova_context.get_admin_context() for node in db.bm_node_get_associated(context, service_host=CONF.host): l.append(node['instance_name']) return l def _require_node(self, instance): """Get a node's uuid out of a manager instance dict. The compute manager is meant to know the node uuid, so missing uuid a significant issue - it may mean we've been passed someone elses data. """ node_uuid = instance.get('node') if not node_uuid: raise exception.NovaException(_( "Baremetal node id not supplied to driver for %r") % instance['uuid']) return node_uuid def _attach_block_devices(self, instance, block_device_info): block_device_mapping = driver.\ block_device_info_get_mapping(block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mountpoint = vol['mount_device'] self.attach_volume( connection_info, instance['name'], mountpoint) def _detach_block_devices(self, instance, block_device_info): block_device_mapping = driver.\ block_device_info_get_mapping(block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mountpoint = vol['mount_device'] self.detach_volume( connection_info, instance['name'], mountpoint) def _start_firewall(self, instance, network_info): self.firewall_driver.setup_basic_filtering( instance, network_info) self.firewall_driver.prepare_instance_filter( instance, network_info) self.firewall_driver.apply_instance_filter( instance, network_info) def _stop_firewall(self, instance, network_info): self.firewall_driver.unfilter_instance( instance, network_info) def macs_for_instance(self, instance): context = nova_context.get_admin_context() node_uuid = self._require_node(instance) node = db.bm_node_get_by_node_uuid(context, node_uuid) ifaces = db.bm_interface_get_all_by_bm_node_id(context, node['id']) return set(iface['address'] for iface in ifaces) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): node_uuid = self._require_node(instance) # NOTE(deva): this db method will raise an exception if the node is # already in use. We call it here to ensure no one else # allocates this node before we begin provisioning it. node = db.bm_node_associate_and_update(context, node_uuid, {'instance_uuid': instance['uuid'], 'instance_name': instance['hostname'], 'task_state': baremetal_states.BUILDING}) try: self._plug_vifs(instance, network_info, context=context) self._attach_block_devices(instance, block_device_info) self._start_firewall(instance, network_info) self.driver.cache_images( context, node, instance, admin_password=admin_password, image_meta=image_meta, injected_files=injected_files, network_info=network_info, ) self.driver.activate_bootloader(context, node, instance, network_info=network_info) # NOTE(deva): ensure node is really off before we turn it on # fixes bug https://code.launchpad.net/bugs/1178919 self.power_off(instance, node) self.power_on(context, instance, network_info, block_device_info, node) self.driver.activate_node(context, node, instance) _update_state(context, node, instance, baremetal_states.ACTIVE) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_("Error deploying instance %(instance)s " "on baremetal node %(node)s.") % {'instance': instance['uuid'], 'node': node['uuid']}) # Do not set instance=None yet. This prevents another # spawn() while we are cleaning up. _update_state(context, node, instance, baremetal_states.ERROR) self.driver.deactivate_node(context, node, instance) self.power_off(instance, node) self.driver.deactivate_bootloader(context, node, instance) self.driver.destroy_images(context, node, instance) self._detach_block_devices(instance, block_device_info) self._stop_firewall(instance, network_info) self._unplug_vifs(instance, network_info) _update_state(context, node, None, baremetal_states.DELETED) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): node = _get_baremetal_node_by_instance_uuid(instance['uuid']) ctx = nova_context.get_admin_context() pm = get_power_manager(node=node, instance=instance) state = pm.reboot_node() if pm.state != baremetal_states.ACTIVE: raise exception.InstanceRebootFailure(_( "Baremetal power manager failed to restart node " "for instance %r") % instance['uuid']) _update_state(ctx, node, instance, state) def destroy(self, instance, network_info, block_device_info=None): context = nova_context.get_admin_context() try: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) except exception.InstanceNotFound: LOG.warning(_("Destroy called on non-existing instance %s") % instance['uuid']) return try: self.driver.deactivate_node(context, node, instance) self.power_off(instance, node) self.driver.deactivate_bootloader(context, node, instance) self.driver.destroy_images(context, node, instance) self._detach_block_devices(instance, block_device_info) self._stop_firewall(instance, network_info) self._unplug_vifs(instance, network_info) _update_state(context, node, None, baremetal_states.DELETED) except Exception as e: with excutils.save_and_reraise_exception(): try: LOG.error(_("Error from baremetal driver " "during destroy: %s") % e) _update_state(context, node, instance, baremetal_states.ERROR) except Exception: LOG.error(_("Error while recording destroy failure in " "baremetal database: %s") % e) def power_off(self, instance, node=None): """Power off the specified instance.""" if not node: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) pm = get_power_manager(node=node, instance=instance) pm.deactivate_node() if pm.state != baremetal_states.DELETED: raise exception.InstancePowerOffFailure(_( "Baremetal power manager failed to stop node " "for instance %r") % instance['uuid']) pm.stop_console() def power_on(self, context, instance, network_info, block_device_info=None, node=None): """Power on the specified instance.""" if not node: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) pm = get_power_manager(node=node, instance=instance) pm.activate_node() if pm.state != baremetal_states.ACTIVE: raise exception.InstancePowerOnFailure(_( "Baremetal power manager failed to start node " "for instance %r") % instance['uuid']) pm.start_console() def get_volume_connector(self, instance): return self.volume_driver.get_volume_connector(instance) def attach_volume(self, connection_info, instance, mountpoint): return self.volume_driver.attach_volume(connection_info, instance, mountpoint) def detach_volume(self, connection_info, instance_name, mountpoint): return self.volume_driver.detach_volume(connection_info, instance_name, mountpoint) def get_info(self, instance): inst_uuid = instance.get('uuid') node = _get_baremetal_node_by_instance_uuid(inst_uuid) pm = get_power_manager(node=node, instance=instance) # NOTE(deva): Power manager may not be able to determine power state # in which case it may return "None" here. ps = pm.is_power_on() if ps: pstate = power_state.RUNNING elif ps is False: pstate = power_state.SHUTDOWN else: pstate = power_state.NOSTATE return {'state': pstate, 'max_mem': node['memory_mb'], 'mem': node['memory_mb'], 'num_cpu': node['cpus'], 'cpu_time': 0} def refresh_security_group_rules(self, security_group_id): self.firewall_driver.refresh_security_group_rules(security_group_id) return True def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) return True def refresh_provider_fw_rules(self): self.firewall_driver.refresh_provider_fw_rules() def _node_resource(self, node): vcpus_used = 0 memory_mb_used = 0 local_gb_used = 0 vcpus = node['cpus'] memory_mb = node['memory_mb'] local_gb = node['local_gb'] if node['instance_uuid']: vcpus_used = node['cpus'] memory_mb_used = node['memory_mb'] local_gb_used = node['local_gb'] dic = {'vcpus': vcpus, 'memory_mb': memory_mb, 'local_gb': local_gb, 'vcpus_used': vcpus_used, 'memory_mb_used': memory_mb_used, 'local_gb_used': local_gb_used, 'hypervisor_type': self.get_hypervisor_type(), 'hypervisor_version': self.get_hypervisor_version(), 'hypervisor_hostname': str(node['uuid']), 'cpu_info': 'baremetal cpu', } return dic def refresh_instance_security_rules(self, instance): self.firewall_driver.refresh_instance_security_rules(instance) def get_available_resource(self, nodename): context = nova_context.get_admin_context() resource = {} try: node = db.bm_node_get_by_node_uuid(context, nodename) resource = self._node_resource(node) except exception.NodeNotFoundByUUID: pass return resource def ensure_filtering_rules_for_instance(self, instance_ref, network_info): self.firewall_driver.setup_basic_filtering(instance_ref, network_info) self.firewall_driver.prepare_instance_filter(instance_ref, network_info) def unfilter_instance(self, instance_ref, network_info): self.firewall_driver.unfilter_instance(instance_ref, network_info=network_info) def get_host_stats(self, refresh=False): caps = [] context = nova_context.get_admin_context() nodes = db.bm_node_get_all(context, service_host=CONF.host) for node in nodes: res = self._node_resource(node) nodename = str(node['uuid']) data = {} data['vcpus'] = res['vcpus'] data['vcpus_used'] = res['vcpus_used'] data['cpu_info'] = res['cpu_info'] data['disk_total'] = res['local_gb'] data['disk_used'] = res['local_gb_used'] data['disk_available'] = res['local_gb'] - res['local_gb_used'] data['host_memory_total'] = res['memory_mb'] data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used'] data['hypervisor_type'] = res['hypervisor_type'] data['hypervisor_version'] = res['hypervisor_version'] data['hypervisor_hostname'] = nodename data['supported_instances'] = self.supported_instances data.update(self.extra_specs) data['host'] = CONF.host data['node'] = nodename # TODO(NTTdocomo): put node's extra specs here caps.append(data) return caps def plug_vifs(self, instance, network_info): """Plugin VIFs into networks.""" self._plug_vifs(instance, network_info) def _plug_vifs(self, instance, network_info, context=None): if not context: context = nova_context.get_admin_context() node = _get_baremetal_node_by_instance_uuid(instance['uuid']) if node: pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id']) for pif in pifs: if pif['vif_uuid']: db.bm_interface_set_vif_uuid(context, pif['id'], None) for vif in network_info: self.vif_driver.plug(instance, vif) def _unplug_vifs(self, instance, network_info): for vif in network_info: self.vif_driver.unplug(instance, vif) def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" self.image_cache_manager.verify_base_images(context, all_instances) def get_console_output(self, instance): node = _get_baremetal_node_by_instance_uuid(instance['uuid']) return self.driver.get_console_output(node, instance) def get_available_nodes(self): context = nova_context.get_admin_context() return [str(n['uuid']) for n in db.bm_node_get_all(context, service_host=CONF.host)]
# Copyright (C) 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from oslo.utils import units from nova.compute import arch from nova import test from nova.tests.unit import matchers from nova.virt.libvirt import config class LibvirtConfigBaseTest(test.NoDBTestCase): def assertXmlEqual(self, expectedXmlstr, actualXmlstr): self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr)) class LibvirtConfigTest(LibvirtConfigBaseTest): def test_config_plain(self): obj = config.LibvirtConfigObject(root_name="demo") xml = obj.to_xml() self.assertXmlEqual(xml, "<demo/>") def test_config_ns(self): obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo", ns_uri="http://example.com/foo") xml = obj.to_xml() self.assertXmlEqual(xml, """ <foo:demo xmlns:foo="http://example.com/foo"/>""") def test_config_text(self): obj = config.LibvirtConfigObject(root_name="demo") root = obj.format_dom() root.append(obj._text_node("foo", "bar")) xml = etree.tostring(root) self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>") def test_config_text_unicode(self): obj = config.LibvirtConfigObject(root_name='demo') root = obj.format_dom() root.append(obj._text_node('foo', u'\xF0\x9F\x92\xA9')) self.assertXmlEqual('<demo><foo>&#240;&#159;&#146;&#169;</foo></demo>', etree.tostring(root)) def test_config_parse(self): inxml = "<demo><foo/></demo>" obj = config.LibvirtConfigObject(root_name="demo") obj.parse_str(inxml) class LibvirtConfigCapsTest(LibvirtConfigBaseTest): def test_config_host(self): xmlin = """ <capabilities> <host> <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid> <cpu> <arch>x86_64</arch> <model>Opteron_G3</model> <vendor>AMD</vendor> <topology sockets='1' cores='4' threads='1'/> <feature name='ibs'/> <feature name='osvw'/> </cpu> <topology> <cells num='2'> <cell id='0'> <memory unit='KiB'>4048280</memory> <pages unit='KiB' size='4'>1011941</pages> <pages unit='KiB' size='2048'>0</pages> <cpus num='4'> <cpu id='0' socket_id='0' core_id='0' siblings='0'/> <cpu id='1' socket_id='0' core_id='1' siblings='1'/> <cpu id='2' socket_id='0' core_id='2' siblings='2'/> <cpu id='3' socket_id='0' core_id='3' siblings='3'/> </cpus> </cell> <cell id='1'> <memory unit='KiB'>4127684</memory> <pages unit='KiB' size='4'>1031921</pages> <pages unit='KiB' size='2048'>0</pages> <cpus num='4'> <cpu id='4' socket_id='1' core_id='0' siblings='4'/> <cpu id='5' socket_id='1' core_id='1' siblings='5'/> <cpu id='6' socket_id='1' core_id='2' siblings='6'/> <cpu id='7' socket_id='1' core_id='3' siblings='7'/> </cpus> </cell> </cells> </topology> </host> <guest> <os_type>hvm</os_type> <arch name='x86_64'/> </guest> <guest> <os_type>hvm</os_type> <arch name='i686'/> </guest> </capabilities>""" obj = config.LibvirtConfigCaps() obj.parse_str(xmlin) self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost) self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809") xmlout = obj.to_xml() self.assertXmlEqual(xmlin, xmlout) class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest): def test_config_platform(self): obj = config.LibvirtConfigGuestTimer() obj.track = "host" xml = obj.to_xml() self.assertXmlEqual(xml, """ <timer name="platform" track="host"/> """) def test_config_pit(self): obj = config.LibvirtConfigGuestTimer() obj.name = "pit" obj.tickpolicy = "discard" xml = obj.to_xml() self.assertXmlEqual(xml, """ <timer name="pit" tickpolicy="discard"/> """) def test_config_hpet(self): obj = config.LibvirtConfigGuestTimer() obj.name = "hpet" obj.present = False xml = obj.to_xml() self.assertXmlEqual(xml, """ <timer name="hpet" present="no"/> """) class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest): def test_config_utc(self): obj = config.LibvirtConfigGuestClock() xml = obj.to_xml() self.assertXmlEqual(xml, """ <clock offset="utc"/> """) def test_config_localtime(self): obj = config.LibvirtConfigGuestClock() obj.offset = "localtime" xml = obj.to_xml() self.assertXmlEqual(xml, """ <clock offset="localtime"/> """) def test_config_timezone(self): obj = config.LibvirtConfigGuestClock() obj.offset = "timezone" obj.timezone = "EDT" xml = obj.to_xml() self.assertXmlEqual(xml, """ <clock offset="timezone" timezone="EDT"/> """) def test_config_variable(self): obj = config.LibvirtConfigGuestClock() obj.offset = "variable" obj.adjustment = "123456" xml = obj.to_xml() self.assertXmlEqual(xml, """ <clock offset="variable" adjustment="123456"/> """) def test_config_timers(self): obj = config.LibvirtConfigGuestClock() tmpit = config.LibvirtConfigGuestTimer() tmpit.name = "pit" tmpit.tickpolicy = "discard" tmrtc = config.LibvirtConfigGuestTimer() tmrtc.name = "rtc" tmrtc.tickpolicy = "merge" obj.add_timer(tmpit) obj.add_timer(tmrtc) xml = obj.to_xml() self.assertXmlEqual(xml, """ <clock offset="utc"> <timer name="pit" tickpolicy="discard"/> <timer name="rtc" tickpolicy="merge"/> </clock> """) class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigCPUFeature("mtrr") xml = obj.to_xml() self.assertXmlEqual(xml, """ <feature name="mtrr"/> """) class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestCPUFeature("mtrr") obj.policy = "force" xml = obj.to_xml() self.assertXmlEqual(xml, """ <feature name="mtrr" policy="force"/> """) class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest): def test_parse_dom(self): xml = """ <numa> <cell id="0" cpus="0-1" memory="1000000"/> <cell id="1" cpus="2-3" memory="1500000"/> </numa> """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestCPUNUMA() obj.parse_dom(xmldoc) self.assertEqual(2, len(obj.cells)) def test_config_simple(self): obj = config.LibvirtConfigGuestCPUNUMA() cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 0 cell.cpus = set([0, 1]) cell.memory = 1000000 obj.cells.append(cell) cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 1 cell.cpus = set([2, 3]) cell.memory = 1500000 obj.cells.append(cell) xml = obj.to_xml() self.assertXmlEqual(xml, """ <numa> <cell id="0" cpus="0-1" memory="1000000"/> <cell id="1" cpus="2-3" memory="1500000"/> </numa> """) class LibvirtConfigCPUTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <model>Penryn</model> </cpu> """) def test_config_complex(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = arch.X86_64 obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <feature name="apic"/> <feature name="mtrr"/> </cpu> """) def test_only_uniq_cpu_featues(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = arch.X86_64 obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) obj.add_feature(config.LibvirtConfigCPUFeature("apic")) obj.add_feature(config.LibvirtConfigCPUFeature("mtrr")) xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <feature name="apic"/> <feature name="mtrr"/> </cpu> """) def test_config_topology(self): obj = config.LibvirtConfigCPU() obj.model = "Penryn" obj.sockets = 4 obj.cores = 4 obj.threads = 2 xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu> <model>Penryn</model> <topology sockets="4" cores="4" threads="2"/> </cpu> """) class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestCPU() obj.model = "Penryn" xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu match="exact"> <model>Penryn</model> </cpu> """) def test_config_complex(self): obj = config.LibvirtConfigGuestCPU() obj.model = "Penryn" obj.vendor = "Intel" obj.arch = arch.X86_64 obj.mode = "custom" obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr")) obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic")) xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu mode="custom" match="exact"> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <feature name="apic" policy="require"/> <feature name="mtrr" policy="require"/> </cpu> """) def test_config_host(self): obj = config.LibvirtConfigGuestCPU() obj.mode = "host-model" obj.match = "exact" xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu mode="host-model" match="exact"/> """) def test_config_host_with_numa(self): obj = config.LibvirtConfigGuestCPU() obj.mode = "host-model" obj.match = "exact" numa = config.LibvirtConfigGuestCPUNUMA() cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 0 cell.cpus = set([0, 1]) cell.memory = 1000000 numa.cells.append(cell) cell = config.LibvirtConfigGuestCPUNUMACell() cell.id = 1 cell.cpus = set([2, 3]) cell.memory = 1500000 numa.cells.append(cell) obj.numa = numa xml = obj.to_xml() self.assertXmlEqual(xml, """ <cpu mode="host-model" match="exact"> <numa> <cell id="0" cpus="0-1" memory="1000000"/> <cell id="1" cpus="2-3" memory="1500000"/> </numa> </cpu> """) class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestSMBIOS() xml = obj.to_xml() self.assertXmlEqual(xml, """ <smbios mode="sysinfo"/> """) class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest): def test_config_simple(self): obj = config.LibvirtConfigGuestSysinfo() xml = obj.to_xml() self.assertXmlEqual(xml, """ <sysinfo type="smbios"/> """) def test_config_bios(self): obj = config.LibvirtConfigGuestSysinfo() obj.bios_vendor = "Acme" obj.bios_version = "6.6.6" xml = obj.to_xml() self.assertXmlEqual(xml, """ <sysinfo type="smbios"> <bios> <entry name="vendor">Acme</entry> <entry name="version">6.6.6</entry> </bios> </sysinfo> """) def test_config_system(self): obj = config.LibvirtConfigGuestSysinfo() obj.system_manufacturer = "Acme" obj.system_product = "Wile Coyote" obj.system_version = "6.6.6" obj.system_serial = "123456" obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809" xml = obj.to_xml() self.assertXmlEqual(xml, """ <sysinfo type="smbios"> <system> <entry name="manufacturer">Acme</entry> <entry name="product">Wile Coyote</entry> <entry name="version">6.6.6</entry> <entry name="serial">123456</entry> <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry> </system> </sysinfo> """) def test_config_mixed(self): obj = config.LibvirtConfigGuestSysinfo() obj.bios_vendor = "Acme" obj.system_manufacturer = "Acme" obj.system_product = "Wile Coyote" obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809" xml = obj.to_xml() self.assertXmlEqual(xml, """ <sysinfo type="smbios"> <bios> <entry name="vendor">Acme</entry> </bios> <system> <entry name="manufacturer">Acme</entry> <entry name="product">Wile Coyote</entry> <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry> </system> </sysinfo> """) class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest): def test_config_file(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> </disk>""") def test_config_file_parse(self): xml = """<disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> </disk>""" xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') def test_config_file_serial(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9" xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial> </disk>""") def test_config_file_serial_parse(self): xml = """<disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial> </disk>""" xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9') def test_config_file_discard(self): obj = config.LibvirtConfigGuestDisk() obj.driver_name = "qemu" obj.driver_format = "qcow2" obj.driver_cache = "none" obj.driver_discard = "unmap" obj.source_type = "file" obj.source_path = "/tmp/hello.qcow2" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9" xml = obj.to_xml() self.assertXmlEqual(""" <disk type="file" device="disk"> <driver name="qemu" type="qcow2" cache="none" discard="unmap"/> <source file="/tmp/hello.qcow2"/> <target bus="ide" dev="/dev/hda"/> <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial> </disk>""", xml) def test_config_file_discard_parse(self): xml = """ <disk type="file" device="disk"> <driver name="qemu" type="qcow2" cache="none" discard="unmap"/> <source file="/tmp/hello.qcow2"/> <target bus="ide" dev="/dev/hda"/> <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial> </disk>""" xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual('unmap', obj.driver_discard) def test_config_block(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "block" obj.source_path = "/tmp/hello" obj.source_device = "cdrom" obj.driver_name = "qemu" obj.target_dev = "/dev/hdc" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="block" device="cdrom"> <driver name="qemu"/> <source dev="/tmp/hello"/> <target bus="ide" dev="/dev/hdc"/> </disk>""") def test_config_block_parse(self): xml = """<disk type="block" device="cdrom"> <driver name="qemu"/> <source dev="/tmp/hello"/> <target bus="ide" dev="/dev/hdc"/> </disk>""" xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'block') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hdc') self.assertEqual(obj.target_bus, 'ide') def test_config_network(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "network" obj.source_protocol = "iscsi" obj.source_name = "foo.bar.com" obj.driver_name = "qemu" obj.driver_format = "qcow2" obj.target_dev = "/dev/hda" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="network" device="disk"> <driver name="qemu" type="qcow2"/> <source name="foo.bar.com" protocol="iscsi"/> <target bus="ide" dev="/dev/hda"/> </disk>""") def test_config_network_parse(self): xml = """<disk type="network" device="disk"> <driver name="qemu" type="qcow2"/> <source name="foo.bar.com" protocol="iscsi"/> <target bus="ide" dev="/dev/hda"/> </disk>""" xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'network') self.assertEqual(obj.source_protocol, 'iscsi') self.assertEqual(obj.source_name, 'foo.bar.com') self.assertEqual(obj.driver_name, 'qemu') self.assertEqual(obj.driver_format, 'qcow2') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') def test_config_network_no_name(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = 'network' obj.source_protocol = 'nbd' obj.source_hosts = ['foo.bar.com'] obj.source_ports = [None] obj.driver_name = 'qemu' obj.driver_format = 'raw' obj.target_dev = '/dev/vda' obj.target_bus = 'virtio' xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="network" device="disk"> <driver name="qemu" type="raw"/> <source protocol="nbd"> <host name="foo.bar.com"/> </source> <target bus="virtio" dev="/dev/vda"/> </disk>""") def test_config_network_multihost(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = 'network' obj.source_protocol = 'rbd' obj.source_name = 'pool/image' obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4'] obj.source_ports = [None, '123', '456'] obj.driver_name = 'qemu' obj.driver_format = 'raw' obj.target_dev = '/dev/vda' obj.target_bus = 'virtio' xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="network" device="disk"> <driver name="qemu" type="raw"/> <source name="pool/image" protocol="rbd"> <host name="foo.bar.com"/> <host name="::1" port="123"/> <host name="1.2.3.4" port="456"/> </source> <target bus="virtio" dev="/dev/vda"/> </disk>""") def test_config_network_auth(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "network" obj.source_protocol = "rbd" obj.source_name = "pool/image" obj.driver_name = "qemu" obj.driver_format = "raw" obj.target_dev = "/dev/vda" obj.target_bus = "virtio" obj.auth_username = "foo" obj.auth_secret_type = "ceph" obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="network" device="disk"> <driver name="qemu" type="raw"/> <source name="pool/image" protocol="rbd"/> <auth username="foo"> <secret type="ceph" uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/> </auth> <target bus="virtio" dev="/dev/vda"/> </disk>""") def test_config_iotune(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.disk_read_bytes_sec = 1024000 obj.disk_read_iops_sec = 1000 obj.disk_total_bytes_sec = 2048000 obj.disk_write_bytes_sec = 1024000 obj.disk_write_iops_sec = 1000 obj.disk_total_iops_sec = 2000 xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> <iotune> <read_bytes_sec>1024000</read_bytes_sec> <read_iops_sec>1000</read_iops_sec> <write_bytes_sec>1024000</write_bytes_sec> <write_iops_sec>1000</write_iops_sec> <total_bytes_sec>2048000</total_bytes_sec> <total_iops_sec>2000</total_iops_sec> </iotune> </disk>""") def test_config_blockio(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" obj.logical_block_size = "4096" obj.physical_block_size = "4096" xml = obj.to_xml() self.assertXmlEqual(""" <disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> <blockio logical_block_size="4096" physical_block_size="4096"/> </disk>""", xml) class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest): def test_config_file(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "file" obj.source_path = "/tmp/hello" obj.target_dev = "/dev/hda" obj.target_bus = "ide" xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> </disk>""") def test_config_file_parse(self): xml = """<disk type="file" device="disk"> <source file="/tmp/hello"/> <target bus="ide" dev="/dev/hda"/> </disk>""" xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDisk() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_path, '/tmp/hello') self.assertEqual(obj.target_dev, '/dev/hda') self.assertEqual(obj.target_bus, 'ide') class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest): def test_config_file_parse(self): xml = """<backingStore type='file'> <driver name='qemu' type='qcow2'/> <source file='/var/lib/libvirt/images/mid.qcow2'/> <backingStore type='file'> <driver name='qemu' type='qcow2'/> <source file='/var/lib/libvirt/images/base.qcow2'/> <backingStore/> </backingStore> </backingStore> """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDiskBackingStore() obj.parse_dom(xmldoc) self.assertEqual(obj.driver_name, 'qemu') self.assertEqual(obj.driver_format, 'qcow2') self.assertEqual(obj.source_type, 'file') self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2') self.assertEqual(obj.backing_store.driver_name, 'qemu') self.assertEqual(obj.backing_store.source_type, 'file') self.assertEqual(obj.backing_store.source_file, '/var/lib/libvirt/images/base.qcow2') self.assertIsNone(obj.backing_store.backing_store) def test_config_network_parse(self): xml = """<backingStore type='network' index='1'> <format type='qcow2'/> <source protocol='gluster' name='volume1/img1'> <host name='host1' port='24007'/> </source> <backingStore type='network' index='2'> <format type='qcow2'/> <source protocol='gluster' name='volume1/img2'> <host name='host1' port='24007'/> </source> <backingStore/> </backingStore> </backingStore> """ xmldoc = etree.fromstring(xml) obj = config.LibvirtConfigGuestDiskBackingStore() obj.parse_dom(xmldoc) self.assertEqual(obj.source_type, 'network') self.assertEqual(obj.source_protocol, 'gluster') self.assertEqual(obj.source_name, 'volume1/img1') self.assertEqual(obj.source_hosts[0], 'host1') self.assertEqual(obj.source_ports[0], '24007') self.assertEqual(obj.index, '1') self.assertEqual(obj.backing_store.source_name, 'volume1/img2') self.assertEqual(obj.backing_store.index, '2') self.assertEqual(obj.backing_store.source_hosts[0], 'host1') self.assertEqual(obj.backing_store.source_ports[0], '24007') self.assertIsNone(obj.backing_store.backing_store) class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest): def test_config_mount(self): obj = config.LibvirtConfigGuestFilesys() obj.source_type = "mount" obj.source_dir = "/tmp/hello" obj.target_dir = "/mnt" xml = obj.to_xml() self.assertXmlEqual(xml, """ <filesystem type="mount"> <source dir="/tmp/hello"/> <target dir="/mnt"/> </filesystem>""") class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest): def test_config_tablet(self): obj = config.LibvirtConfigGuestInput() xml = obj.to_xml() self.assertXmlEqual(xml, """ <input type="tablet" bus="usb"/>""") class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest): def test_config_graphics(self): obj = config.LibvirtConfigGuestGraphics() obj.type = "vnc" obj.autoport = True obj.keymap = "en_US" obj.listen = "127.0.0.1" xml = obj.to_xml() self.assertXmlEqual(xml, """ <graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/> """) class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest): def test_config_pci_guest_host_dev(self): obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci') xml = obj.to_xml() expected = """ <hostdev mode="subsystem" type="pci" managed="yes"/> """ self.assertXmlEqual(xml, expected) def test_parse_GuestHostdev(self): xmldoc = """<hostdev mode="subsystem" type="pci" managed="yes"/>""" obj = config.LibvirtConfigGuestHostdev() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'pci') self.assertEqual(obj.managed, 'yes') def test_parse_GuestHostdev_non_pci(self): xmldoc = """<hostdev mode="subsystem" type="usb" managed="no"/>""" obj = config.LibvirtConfigGuestHostdev() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'usb') self.assertEqual(obj.managed, 'no') class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest): expected = """ <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address bus="0x11" domain="0x1234" function="0x3" slot="0x22" /> </source> </hostdev> """ def test_config_guest_hosdev_pci(self): hostdev = config.LibvirtConfigGuestHostdevPCI() hostdev.domain = "1234" hostdev.bus = "11" hostdev.slot = "22" hostdev.function = "3" xml = hostdev.to_xml() self.assertXmlEqual(self.expected, xml) def test_parse_guest_hosdev_pci(self): xmldoc = self.expected obj = config.LibvirtConfigGuestHostdevPCI() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'pci') self.assertEqual(obj.managed, 'yes') self.assertEqual(obj.domain, '0x1234') self.assertEqual(obj.bus, '0x11') self.assertEqual(obj.slot, '0x22') self.assertEqual(obj.function, '0x3') def test_parse_guest_hosdev_usb(self): xmldoc = """<hostdev mode='subsystem' type='usb'> <source startupPolicy='optional'> <vendor id='0x1234'/> <product id='0xbeef'/> </source> <boot order='2'/> </hostdev>""" obj = config.LibvirtConfigGuestHostdevPCI() obj.parse_str(xmldoc) self.assertEqual(obj.mode, 'subsystem') self.assertEqual(obj.type, 'usb') class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest): def test_config_file(self): obj = config.LibvirtConfigGuestSerial() obj.type = "file" obj.source_path = "/tmp/vm.log" xml = obj.to_xml() self.assertXmlEqual(xml, """ <serial type="file"> <source path="/tmp/vm.log"/> </serial>""") def test_config_serial_port(self): obj = config.LibvirtConfigGuestSerial() obj.type = "tcp" obj.listen_port = 11111 obj.listen_host = "0.0.0.0" xml = obj.to_xml() self.assertXmlEqual(xml, """ <serial type="tcp"> <source host="0.0.0.0" service="11111" mode="bind"/> </serial>""") class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest): def test_config_pty(self): obj = config.LibvirtConfigGuestConsole() obj.type = "pty" xml = obj.to_xml() self.assertXmlEqual(xml, """ <console type="pty"/>""") class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest): def test_config_spice_minimal(self): obj = config.LibvirtConfigGuestChannel() obj.type = "spicevmc" xml = obj.to_xml() self.assertXmlEqual(xml, """ <channel type="spicevmc"> <target type='virtio'/> </channel>""") def test_config_spice_full(self): obj = config.LibvirtConfigGuestChannel() obj.type = "spicevmc" obj.target_name = "com.redhat.spice.0" xml = obj.to_xml() self.assertXmlEqual(xml, """ <channel type="spicevmc"> <target type='virtio' name='com.redhat.spice.0'/> </channel>""") def test_config_qga_full(self): obj = config.LibvirtConfigGuestChannel() obj.type = "unix" obj.target_name = "org.qemu.guest_agent.0" obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % ( obj.target_name, "instance-name") xml = obj.to_xml() self.assertXmlEqual(xml, """ <channel type="unix"> <source path="%s" mode="bind"/> <target type="virtio" name="org.qemu.guest_agent.0"/> </channel>""" % obj.source_path) class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest): def test_config_ethernet(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "ethernet" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "vnet0" obj.driver_name = "vhost" obj.vif_inbound_average = 1024000 obj.vif_inbound_peak = 10240000 obj.vif_inbound_burst = 1024000 obj.vif_outbound_average = 1024000 obj.vif_outbound_peak = 10240000 obj.vif_outbound_burst = 1024000 xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="ethernet"> <mac address="DE:AD:BE:EF:CA:FE"/> <model type="virtio"/> <driver name="vhost"/> <target dev="vnet0"/> <bandwidth> <inbound average="1024000" peak="10240000" burst="1024000"/> <outbound average="1024000" peak="10240000" burst="1024000"/> </bandwidth> </interface>""") def test_config_bridge(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "bridge" obj.source_dev = "br0" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "tap12345678" obj.filtername = "clean-traffic" obj.filterparams.append({"key": "IP", "value": "192.168.122.1"}) obj.vif_inbound_average = 1024000 obj.vif_inbound_peak = 10240000 obj.vif_inbound_burst = 1024000 obj.vif_outbound_average = 1024000 obj.vif_outbound_peak = 10240000 obj.vif_outbound_burst = 1024000 xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="bridge"> <mac address="DE:AD:BE:EF:CA:FE"/> <model type="virtio"/> <source bridge="br0"/> <target dev="tap12345678"/> <filterref filter="clean-traffic"> <parameter name="IP" value="192.168.122.1"/> </filterref> <bandwidth> <inbound average="1024000" peak="10240000" burst="1024000"/> <outbound average="1024000" peak="10240000" burst="1024000"/> </bandwidth> </interface>""") def test_config_bridge_ovs(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "bridge" obj.source_dev = "br0" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "tap12345678" obj.vporttype = "openvswitch" obj.vportparams.append({"key": "instanceid", "value": "foobar"}) xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="bridge"> <mac address="DE:AD:BE:EF:CA:FE"/> <model type="virtio"/> <source bridge="br0"/> <target dev="tap12345678"/> <virtualport type="openvswitch"> <parameters instanceid="foobar"/> </virtualport> </interface>""") def test_config_8021Qbh(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "direct" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.target_dev = "tap12345678" obj.source_dev = "eth0" obj.vporttype = "802.1Qbh" xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="direct"> <mac address="DE:AD:BE:EF:CA:FE"/> <model type="virtio"/> <source dev="eth0" mode="private"/> <target dev="tap12345678"/> <virtualport type="802.1Qbh"/> </interface>""") def test_config_direct(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "direct" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.model = "virtio" obj.source_dev = "eth0" obj.source_mode = "passthrough" xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="direct"> <mac address="DE:AD:BE:EF:CA:FE"/> <model type="virtio"/> <source dev="eth0" mode="passthrough"/> </interface>""") def test_config_8021Qbh_hostdev(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "hostdev" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.source_dev = "0000:0a:00.1" obj.vporttype = "802.1Qbh" obj.add_vport_param("profileid", "MyPortProfile") xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="hostdev" managed="yes"> <mac address="DE:AD:BE:EF:CA:FE"/> <source> <address type="pci" domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/> </source> <virtualport type="802.1Qbh"> <parameters profileid="MyPortProfile"/> </virtualport> </interface>""") def test_config_hw_veb_hostdev(self): obj = config.LibvirtConfigGuestInterface() obj.net_type = "hostdev" obj.mac_addr = "DE:AD:BE:EF:CA:FE" obj.source_dev = "0000:0a:00.1" obj.vlan = "100" xml = obj.to_xml() self.assertXmlEqual(xml, """ <interface type="hostdev" managed="yes"> <mac address="DE:AD:BE:EF:CA:FE"/> <source> <address type="pci" domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/> </source> <vlan> <tag id="100"/> </vlan> </interface>""") class LibvirtConfigGuestTest(LibvirtConfigBaseTest): def test_config_lxc(self): obj = config.LibvirtConfigGuest() obj.virt_type = "lxc" obj.memory = 100 * units.Mi obj.vcpus = 2 obj.cpuset = set([0, 1, 3, 4, 5]) obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "exe" obj.os_init_path = "/sbin/init" fs = config.LibvirtConfigGuestFilesys() fs.source_dir = "/root/lxc" fs.target_dir = "/" obj.add_device(fs) xml = obj.to_xml() self.assertXmlEqual(xml, """ <domain type="lxc"> <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid> <name>demo</name> <memory>104857600</memory> <vcpu cpuset="0-1,3-5">2</vcpu> <os> <type>exe</type> <init>/sbin/init</init> </os> <devices> <filesystem type="mount"> <source dir="/root/lxc"/> <target dir="/"/> </filesystem> </devices> </domain>""") def test_config_lxc_with_idmap(self): obj = config.LibvirtConfigGuest() obj.virt_type = "lxc" obj.memory = 100 * units.Mi obj.vcpus = 2 obj.cpuset = set([0, 1, 3, 4, 5]) obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "exe" obj.os_init_path = "/sbin/init" uidmap = config.LibvirtConfigGuestUIDMap() uidmap.target = "10000" uidmap.count = "1" obj.idmaps.append(uidmap) gidmap = config.LibvirtConfigGuestGIDMap() gidmap.target = "10000" gidmap.count = "1" obj.idmaps.append(gidmap) fs = config.LibvirtConfigGuestFilesys() fs.source_dir = "/root/lxc" fs.target_dir = "/" obj.add_device(fs) xml = obj.to_xml() self.assertXmlEqual(""" <domain type="lxc"> <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid> <name>demo</name> <memory>104857600</memory> <vcpu cpuset="0-1,3-5">2</vcpu> <os> <type>exe</type> <init>/sbin/init</init> </os> <devices> <filesystem type="mount"> <source dir="/root/lxc"/> <target dir="/"/> </filesystem> </devices> <idmap> <uid start="0" target="10000" count="1"/> <gid start="0" target="10000" count="1"/> </idmap> </domain>""", xml) def test_config_xen_pv(self): obj = config.LibvirtConfigGuest() obj.virt_type = "xen" obj.memory = 100 * units.Mi obj.vcpus = 2 obj.cpuset = set([0, 1, 3, 4, 5]) obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "linux" obj.os_kernel = "/tmp/vmlinuz" obj.os_initrd = "/tmp/ramdisk" obj.os_cmdline = "console=xvc0" disk = config.LibvirtConfigGuestDisk() disk.source_type = "file" disk.source_path = "/tmp/img" disk.target_dev = "/dev/xvda" disk.target_bus = "xen" obj.add_device(disk) xml = obj.to_xml() self.assertXmlEqual(xml, """ <domain type="xen"> <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid> <name>demo</name> <memory>104857600</memory> <vcpu cpuset="0-1,3-5">2</vcpu> <os> <type>linux</type> <kernel>/tmp/vmlinuz</kernel> <initrd>/tmp/ramdisk</initrd> <cmdline>console=xvc0</cmdline> </os> <devices> <disk type="file" device="disk"> <source file="/tmp/img"/> <target bus="xen" dev="/dev/xvda"/> </disk> </devices> </domain>""") def test_config_xen_hvm(self): obj = config.LibvirtConfigGuest() obj.virt_type = "xen" obj.memory = 100 * units.Mi obj.vcpus = 2 obj.cpuset = set([0, 1, 3, 4, 5]) obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "hvm" obj.os_loader = '/usr/lib/xen/boot/hvmloader' obj.os_root = "root=xvda" obj.os_cmdline = "console=xvc0" obj.pae = True obj.acpi = True obj.apic = True disk = config.LibvirtConfigGuestDisk() disk.source_type = "file" disk.source_path = "/tmp/img" disk.target_dev = "/dev/xvda" disk.target_bus = "xen" obj.add_device(disk) xml = obj.to_xml() self.assertXmlEqual(xml, """ <domain type="xen"> <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid> <name>demo</name> <memory>104857600</memory> <vcpu cpuset="0-1,3-5">2</vcpu> <os> <type>hvm</type> <loader>/usr/lib/xen/boot/hvmloader</loader> <cmdline>console=xvc0</cmdline> <root>root=xvda</root> </os> <features> <acpi/> <apic/> <pae/> </features> <devices> <disk type="file" device="disk"> <source file="/tmp/img"/> <target bus="xen" dev="/dev/xvda"/> </disk> </devices> </domain>""") def test_config_kvm(self): obj = config.LibvirtConfigGuest() obj.virt_type = "kvm" obj.memory = 100 * units.Mi obj.vcpus = 2 obj.cpuset = set([0, 1, 3, 4, 5]) obj.cputune = config.LibvirtConfigGuestCPUTune() obj.cputune.shares = 100 obj.cputune.quota = 50000 obj.cputune.period = 25000 obj.membacking = config.LibvirtConfigGuestMemoryBacking() obj.membacking.hugepages = True obj.memtune = config.LibvirtConfigGuestMemoryTune() obj.memtune.hard_limit = 496 obj.memtune.soft_limit = 672 obj.memtune.swap_hard_limit = 1638 obj.memtune.min_guarantee = 2970 obj.numatune = config.LibvirtConfigGuestNUMATune() numamemory = config.LibvirtConfigGuestNUMATuneMemory() numamemory.mode = "preferred" numamemory.nodeset = [0, 1, 2, 3, 8] obj.numatune.memory = numamemory numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode() numamemnode0.cellid = 0 numamemnode0.mode = "preferred" numamemnode0.nodeset = [0, 1] numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode() numamemnode1.cellid = 1 numamemnode1.mode = "preferred" numamemnode1.nodeset = [2, 3] numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode() numamemnode2.cellid = 2 numamemnode2.mode = "preferred" numamemnode2.nodeset = [8] obj.numatune.memnodes.extend([numamemnode0, numamemnode1, numamemnode2]) obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "linux" obj.os_boot_dev = ["hd", "cdrom", "fd"] obj.os_smbios = config.LibvirtConfigGuestSMBIOS() obj.pae = True obj.acpi = True obj.apic = True obj.sysinfo = config.LibvirtConfigGuestSysinfo() obj.sysinfo.bios_vendor = "Acme" obj.sysinfo.system_version = "1.0.0" disk = config.LibvirtConfigGuestDisk() disk.source_type = "file" disk.source_path = "/tmp/img" disk.target_dev = "/dev/vda" disk.target_bus = "virtio" obj.add_device(disk) xml = obj.to_xml() self.assertXmlEqual(xml, """ <domain type="kvm"> <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid> <name>demo</name> <memory>104857600</memory> <memoryBacking> <hugepages/> </memoryBacking> <memtune> <hard_limit units="K">496</hard_limit> <soft_limit units="K">672</soft_limit> <swap_hard_limit units="K">1638</swap_hard_limit> <min_guarantee units="K">2970</min_guarantee> </memtune> <numatune> <memory mode="preferred" nodeset="0-3,8"/> <memnode cellid="0" mode="preferred" nodeset="0-1"/> <memnode cellid="1" mode="preferred" nodeset="2-3"/> <memnode cellid="2" mode="preferred" nodeset="8"/> </numatune> <vcpu cpuset="0-1,3-5">2</vcpu> <sysinfo type='smbios'> <bios> <entry name="vendor">Acme</entry> </bios> <system> <entry name="version">1.0.0</entry> </system> </sysinfo> <os> <type>linux</type> <boot dev="hd"/> <boot dev="cdrom"/> <boot dev="fd"/> <smbios mode="sysinfo"/> </os> <features> <acpi/> <apic/> <pae/> </features> <cputune> <shares>100</shares> <quota>50000</quota> <period>25000</period> </cputune> <devices> <disk type="file" device="disk"> <source file="/tmp/img"/> <target bus="virtio" dev="/dev/vda"/> </disk> </devices> </domain>""") def test_config_machine_type(self): obj = config.LibvirtConfigGuest() obj.virt_type = "kvm" obj.memory = 100 * units.Mi obj.vcpus = 2 obj.name = "demo" obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147" obj.os_type = "hvm" obj.os_mach_type = "fake_machine_type" xml = obj.to_xml() self.assertXmlEqual(xml, """ <domain type="kvm"> <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid> <name>demo</name> <memory>104857600</memory> <vcpu>2</vcpu> <os> <type machine="fake_machine_type">hvm</type> </os> </domain>""") def test_ConfigGuest_parse_devices(self): xmldoc = """ <domain type="kvm"> <devices> <hostdev mode="subsystem" type="pci" managed="no"> </hostdev> </devices> </domain> """ obj = config.LibvirtConfigGuest() obj.parse_str(xmldoc) self.assertEqual(len(obj.devices), 1) self.assertIsInstance(obj.devices[0], config.LibvirtConfigGuestHostdevPCI) self.assertEqual(obj.devices[0].mode, 'subsystem') self.assertEqual(obj.devices[0].managed, 'no') def test_ConfigGuest_parse_devices_wrong_type(self): xmldoc = """ <domain type="kvm"> <devices> <hostdev mode="subsystem" type="xxxx" managed="no"> </hostdev> </devices> </domain> """ obj = config.LibvirtConfigGuest() obj.parse_str(xmldoc) self.assertEqual(len(obj.devices), 0) def test_ConfigGuest_parese_cpu(self): xmldoc = """ <domain> <cpu mode='custom' match='exact'> <model>kvm64</model> </cpu> </domain> """ obj = config.LibvirtConfigGuest() obj.parse_str(xmldoc) self.assertEqual(obj.cpu.mode, 'custom') self.assertEqual(obj.cpu.match, 'exact') self.assertEqual(obj.cpu.model, 'kvm64') class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest): def test_config_snapshot(self): obj = config.LibvirtConfigGuestSnapshot() obj.name = "Demo" xml = obj.to_xml() self.assertXmlEqual(xml, """ <domainsnapshot> <name>Demo</name> <disks/> </domainsnapshot>""") def test_config_snapshot_with_disks(self): obj = config.LibvirtConfigGuestSnapshot() obj.name = "Demo" disk = config.LibvirtConfigGuestSnapshotDisk() disk.name = 'vda' disk.source_path = 'source-path' disk.source_type = 'file' disk.snapshot = 'external' disk.driver_name = 'qcow2' obj.add_disk(disk) disk2 = config.LibvirtConfigGuestSnapshotDisk() disk2.name = 'vdb' disk2.snapshot = 'no' obj.add_disk(disk2) xml = obj.to_xml() self.assertXmlEqual(xml, """ <domainsnapshot> <name>Demo</name> <disks> <disk name='vda' snapshot='external' type='file'> <source file='source-path'/> </disk> <disk name='vdb' snapshot='no'/> </disks> </domainsnapshot>""") def test_config_snapshot_with_network_disks(self): obj = config.LibvirtConfigGuestSnapshot() obj.name = "Demo" disk = config.LibvirtConfigGuestSnapshotDisk() disk.name = 'vda' disk.source_name = 'source-file' disk.source_type = 'network' disk.source_hosts = ['host1'] disk.source_ports = ['12345'] disk.source_protocol = 'glusterfs' disk.snapshot = 'external' disk.driver_name = 'qcow2' obj.add_disk(disk) disk2 = config.LibvirtConfigGuestSnapshotDisk() disk2.name = 'vdb' disk2.snapshot = 'no' obj.add_disk(disk2) xml = obj.to_xml() self.assertXmlEqual(xml, """ <domainsnapshot> <name>Demo</name> <disks> <disk name='vda' snapshot='external' type='network'> <source protocol='glusterfs' name='source-file'> <host name='host1' port='12345'/> </source> </disk> <disk name='vdb' snapshot='no'/> </disks> </domainsnapshot>""") class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest): def test_config_virt_usb_device(self): xmlin = """ <device> <name>usb_0000_09_00_0</name> <parent>pci_0000_00_1c_0</parent> <driver> <name>vxge</name> </driver> <capability type="usb"> <domain>0</domain> <capability type="fake_usb"> <address fake_usb="fake"/> </capability> </capability> </device>""" obj = config.LibvirtConfigNodeDevice() obj.parse_str(xmlin) self.assertIsNone(obj.pci_capability) def test_config_virt_device(self): xmlin = """ <device> <name>pci_0000_09_00_0</name> <parent>pci_0000_00_1c_0</parent> <driver> <name>vxge</name> </driver> <capability type="pci"> <domain>0</domain> <bus>9</bus> <slot>0</slot> <function>0</function> <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product> <vendor id="0x17d5">Neterion Inc.</vendor> <capability type="virt_functions"> <address domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/> <address domain="0x0000" bus="0x0a" slot="0x00" function="0x2"/> <address domain="0x0000" bus="0x0a" slot="0x00" function="0x3"/> </capability> </capability> </device>""" obj = config.LibvirtConfigNodeDevice() obj.parse_str(xmlin) self.assertIsInstance(obj.pci_capability, config.LibvirtConfigNodeDevicePciCap) self.assertIsInstance(obj.pci_capability.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.pci_capability.fun_capability[0].type, "virt_functions") self.assertEqual(len(obj.pci_capability.fun_capability[0]. device_addrs), 3) self.assertEqual(obj.pci_capability.bus, 9) def test_config_phy_device(self): xmlin = """ <device> <name>pci_0000_33_00_0</name> <parent>pci_0000_22_1c_0</parent> <driver> <name>vxx</name> </driver> <capability type="pci"> <domain>0</domain> <bus>9</bus> <slot>0</slot> <function>0</function> <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product> <vendor id="0x17d5">Neterion Inc.</vendor> <capability type="phys_function"> <address domain='0x0000' bus='0x09' slot='0x00' function='0x0'/> </capability> </capability> </device>""" obj = config.LibvirtConfigNodeDevice() obj.parse_str(xmlin) self.assertIsInstance(obj.pci_capability, config.LibvirtConfigNodeDevicePciCap) self.assertIsInstance(obj.pci_capability.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.pci_capability.fun_capability[0].type, "phys_function") self.assertEqual(len(obj.pci_capability.fun_capability[0]. device_addrs), 1) def test_config_non_device(self): xmlin = """ <device> <name>pci_0000_33_00_0</name> <parent>pci_0000_22_1c_0</parent> <driver> <name>vxx</name> </driver> <capability type="pci"> <domain>0</domain> <bus>9</bus> <slot>0</slot> <function>0</function> <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product> <vendor id="0x17d5">Neterion Inc.</vendor> <capability type="virt_functions"/> </capability> </device>""" obj = config.LibvirtConfigNodeDevice() obj.parse_str(xmlin) self.assertIsInstance(obj.pci_capability, config.LibvirtConfigNodeDevicePciCap) self.assertIsInstance(obj.pci_capability.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.pci_capability.fun_capability[0].type, "virt_functions") def test_config_fail_device(self): xmlin = """ <device> <name>pci_0000_33_00_0</name> <parent>pci_0000_22_1c_0</parent> <driver> <name>vxx</name> </driver> <capability type="pci"> <domain>0</domain> <bus>9</bus> <slot>0</slot> <function>0</function> <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product> <vendor id="0x17d5">Neterion Inc.</vendor> <capability type="virt_functions"> </capability> </capability> </device>""" obj = config.LibvirtConfigNodeDevice() obj.parse_str(xmlin) self.assertIsInstance(obj.pci_capability, config.LibvirtConfigNodeDevicePciCap) self.assertIsInstance(obj.pci_capability.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.pci_capability.fun_capability[0].type, "virt_functions") def test_config_2cap_device(self): xmlin = """ <device> <name>pci_0000_04_10_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>16</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual</product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> </capability> </device>""" obj = config.LibvirtConfigNodeDevice() obj.parse_str(xmlin) self.assertIsInstance(obj.pci_capability, config.LibvirtConfigNodeDevicePciCap) self.assertIsInstance(obj.pci_capability.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.pci_capability.fun_capability[0].type, "phys_function") self.assertEqual(obj.pci_capability.fun_capability[1].type, "virt_functions") class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest): def test_config_device_pci_cap(self): xmlin = """ <capability type="pci"> <domain>0</domain> <bus>10</bus> <slot>1</slot> <function>5</function> <product id="0x10bd">Intel 10 Gigabit Ethernet</product> <vendor id="0x8086">Intel Inc.</vendor> <capability type="virt_functions"> <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/> <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/> </capability> </capability>""" obj = config.LibvirtConfigNodeDevicePciCap() obj.parse_str(xmlin) self.assertEqual(obj.domain, 0) self.assertEqual(obj.bus, 10) self.assertEqual(obj.slot, 1) self.assertEqual(obj.function, 5) self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet") self.assertEqual(obj.product_id, 0x10bd) self.assertEqual(obj.vendor, "Intel Inc.") self.assertEqual(obj.vendor_id, 0x8086) self.assertIsInstance(obj.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.fun_capability[0].type, 'virt_functions') self.assertEqual(obj.fun_capability[0].device_addrs, [(0, 10, 1, 1), (1, 10, 2, 3), ]) def test_config_device_pci_2cap(self): xmlin = """ <capability type="pci"> <domain>0</domain> <bus>10</bus> <slot>1</slot> <function>5</function> <product id="0x10bd">Intel 10 Gigabit Ethernet</product> <vendor id="0x8086">Intel Inc.</vendor> <capability type="virt_functions"> <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/> <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/> </capability> <capability type="phys_function"> <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/> </capability> </capability>""" obj = config.LibvirtConfigNodeDevicePciCap() obj.parse_str(xmlin) self.assertEqual(obj.domain, 0) self.assertEqual(obj.bus, 10) self.assertEqual(obj.slot, 1) self.assertEqual(obj.function, 5) self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet") self.assertEqual(obj.product_id, 0x10bd) self.assertEqual(obj.vendor, "Intel Inc.") self.assertEqual(obj.vendor_id, 0x8086) self.assertIsInstance(obj.fun_capability[0], config.LibvirtConfigNodeDevicePciSubFunctionCap) self.assertEqual(obj.fun_capability[0].type, 'virt_functions') self.assertEqual(obj.fun_capability[0].device_addrs, [(0, 10, 1, 1), (1, 10, 2, 3), ]) self.assertEqual(obj.fun_capability[1].type, 'phys_function') self.assertEqual(obj.fun_capability[1].device_addrs, [(0, 10, 1, 1), ]) def test_config_read_only_disk(self): obj = config.LibvirtConfigGuestDisk() obj.source_type = "disk" obj.source_device = "disk" obj.driver_name = "kvm" obj.target_dev = "/dev/hdc" obj.target_bus = "virtio" obj.readonly = True xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="disk" device="disk"> <driver name="kvm"/> <target bus="virtio" dev="/dev/hdc"/> <readonly/> </disk>""") obj.readonly = False xml = obj.to_xml() self.assertXmlEqual(xml, """ <disk type="disk" device="disk"> <driver name="kvm"/> <target bus="virtio" dev="/dev/hdc"/> </disk>""") class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest): def test_config_device_pci_subfunction(self): xmlin = """ <capability type="virt_functions"> <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/> <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/> </capability>""" fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap() fun_capability.parse_str(xmlin) self.assertEqual('virt_functions', fun_capability.type) self.assertEqual([(0, 10, 1, 1), (1, 10, 2, 3)], fun_capability.device_addrs) class LibvirtConfigGuestVideoTest(LibvirtConfigBaseTest): def test_config_video_driver(self): obj = config.LibvirtConfigGuestVideo() obj.type = 'qxl' xml = obj.to_xml() self.assertXmlEqual(xml, """ <video> <model type='qxl'/> </video>""") def test_config_video_driver_vram_heads(self): obj = config.LibvirtConfigGuestVideo() obj.type = 'qxl' obj.vram = '9216' obj.heads = '1' xml = obj.to_xml() self.assertXmlEqual(xml, """ <video> <model type='qxl' vram='9216' heads='1'/> </video>""") class LibvirtConfigGuestSeclabel(LibvirtConfigBaseTest): def test_config_seclabel_config(self): obj = config.LibvirtConfigSeclabel() xml = obj.to_xml() self.assertXmlEqual(xml, """ <seclabel type='dynamic'/>""") def test_config_seclabel_baselabel(self): obj = config.LibvirtConfigSeclabel() obj.type = 'dynamic' obj.baselabel = 'system_u:system_r:my_svirt_t:s0' xml = obj.to_xml() self.assertXmlEqual(xml, """ <seclabel type='dynamic'> <baselabel>system_u:system_r:my_svirt_t:s0</baselabel> </seclabel>""") class LibvirtConfigGuestRngTest(LibvirtConfigBaseTest): def test_config_rng_driver(self): obj = config.LibvirtConfigGuestRng() xml = obj.to_xml() self.assertXmlEqual(xml, """ <rng model='virtio'> <backend model='random'/> </rng>""") def test_config_rng_driver_with_rate(self): obj = config.LibvirtConfigGuestRng() obj.backend = '/dev/random' obj.rate_period = '12' obj.rate_bytes = '34' xml = obj.to_xml() self.assertXmlEqual(xml, """ <rng model='virtio'> <rate period='12' bytes='34'/> <backend model='random'>/dev/random</backend> </rng>""") class LibvirtConfigGuestControllerTest(LibvirtConfigBaseTest): def test_config_guest_contoller(self): obj = config.LibvirtConfigGuestController() obj.type = 'scsi' obj.index = 0 obj.model = 'virtio-scsi' xml = obj.to_xml() self.assertXmlEqual(xml, """ <controller type='scsi' index='0' model='virtio-scsi'/>""") class LibvirtConfigGuestWatchdogTest(LibvirtConfigBaseTest): def test_config_watchdog(self): obj = config.LibvirtConfigGuestWatchdog() obj.action = 'none' xml = obj.to_xml() self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='none'/>") def test_config_watchdog_default_action(self): obj = config.LibvirtConfigGuestWatchdog() xml = obj.to_xml() self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='reset'/>") class LibvirtConfigGuestCPUTuneTest(LibvirtConfigBaseTest): def test_config_cputune_timeslice(self): cputune = config.LibvirtConfigGuestCPUTune() cputune.shares = 100 cputune.quota = 50000 cputune.period = 25000 xml = cputune.to_xml() self.assertXmlEqual(xml, """ <cputune> <shares>100</shares> <quota>50000</quota> <period>25000</period> </cputune>""") def test_config_cputune_vcpus(self): cputune = config.LibvirtConfigGuestCPUTune() vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin() vcpu0.id = 0 vcpu0.cpuset = set([0, 1]) vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin() vcpu1.id = 1 vcpu1.cpuset = set([2, 3]) vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin() vcpu2.id = 2 vcpu2.cpuset = set([4, 5]) vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin() vcpu3.id = 3 vcpu3.cpuset = set([6, 7]) cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3]) xml = cputune.to_xml() self.assertXmlEqual(xml, """ <cputune> <vcpupin vcpu="0" cpuset="0-1"/> <vcpupin vcpu="1" cpuset="2-3"/> <vcpupin vcpu="2" cpuset="4-5"/> <vcpupin vcpu="3" cpuset="6-7"/> </cputune>""") class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest): def test_config_memory_backing_none(self): obj = config.LibvirtConfigGuestMemoryBacking() xml = obj.to_xml() self.assertXmlEqual(xml, "<memoryBacking/>") def test_config_memory_backing_all(self): obj = config.LibvirtConfigGuestMemoryBacking() obj.locked = True obj.sharedpages = False obj.hugepages = True xml = obj.to_xml() self.assertXmlEqual(xml, """ <memoryBacking> <hugepages/> <nosharedpages/> <locked/> </memoryBacking>""") class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest): def test_config_memory_backing_none(self): obj = config.LibvirtConfigGuestMemoryTune() xml = obj.to_xml() self.assertXmlEqual(xml, "<memtune/>") def test_config_memory_backing_all(self): obj = config.LibvirtConfigGuestMemoryTune() obj.soft_limit = 6 obj.hard_limit = 28 obj.swap_hard_limit = 140 obj.min_guarantee = 270 xml = obj.to_xml() self.assertXmlEqual(xml, """ <memtune> <hard_limit units="K">28</hard_limit> <soft_limit units="K">6</soft_limit> <swap_hard_limit units="K">140</swap_hard_limit> <min_guarantee units="K">270</min_guarantee> </memtune>""") class LibvirtConfigGuestNUMATuneTest(LibvirtConfigBaseTest): def test_config_numa_tune_none(self): obj = config.LibvirtConfigGuestNUMATune() xml = obj.to_xml() self.assertXmlEqual("<numatune/>", xml) def test_config_numa_tune_memory(self): obj = config.LibvirtConfigGuestNUMATune() numamemory = config.LibvirtConfigGuestNUMATuneMemory() numamemory.nodeset = [0, 1, 2, 3, 8] obj.memory = numamemory xml = obj.to_xml() self.assertXmlEqual(""" <numatune> <memory mode="strict" nodeset="0-3,8"/> </numatune>""", xml) def test_config_numa_tune_memnodes(self): obj = config.LibvirtConfigGuestNUMATune() numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode() numamemnode0.cellid = 0 numamemnode0.nodeset = [0, 1] numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode() numamemnode1.cellid = 1 numamemnode1.nodeset = [2, 3] numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode() numamemnode2.cellid = 2 numamemnode2.nodeset = [8] obj.memnodes.extend([numamemnode0, numamemnode1, numamemnode2]) xml = obj.to_xml() self.assertXmlEqual(""" <numatune> <memnode cellid="0" mode="strict" nodeset="0-1"/> <memnode cellid="1" mode="strict" nodeset="2-3"/> <memnode cellid="2" mode="strict" nodeset="8"/> </numatune>""", xml) class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest): def test_config_metadata(self): meta = config.LibvirtConfigGuestMetaNovaInstance() meta.package = "2014.2.3" meta.name = "moonbuggy" meta.creationTime = 1234567890 meta.roottype = "image" meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426" owner = config.LibvirtConfigGuestMetaNovaOwner() owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670" owner.username = "buzz" owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021" owner.projectname = "moonshot" meta.owner = owner flavor = config.LibvirtConfigGuestMetaNovaFlavor() flavor.name = "m1.lowgravity" flavor.vcpus = 8 flavor.memory = 2048 flavor.swap = 10 flavor.disk = 50 flavor.ephemeral = 10 meta.flavor = flavor xml = meta.to_xml() self.assertXmlEqual(xml, """ <nova:instance xmlns:nova='http://openstack.org/xmlns/libvirt/nova/1.0'> <nova:package version="2014.2.3"/> <nova:name>moonbuggy</nova:name> <nova:creationTime>2009-02-13 23:31:30</nova:creationTime> <nova:flavor name="m1.lowgravity"> <nova:memory>2048</nova:memory> <nova:disk>50</nova:disk> <nova:swap>10</nova:swap> <nova:ephemeral>10</nova:ephemeral> <nova:vcpus>8</nova:vcpus> </nova:flavor> <nova:owner> <nova:user uuid="3472c2a6-de91-4fb5-b618-42bc781ef670">buzz</nova:user> <nova:project uuid="f241e906-010e-4917-ae81-53f4fb8aa021">moonshot</nova:project> </nova:owner> <nova:root type="image" uuid="fe55c69a-8b2e-4bbc-811a-9ad2023a0426"/> </nova:instance> """) class LibvirtConfigGuestIDMap(LibvirtConfigBaseTest): def test_config_id_map_parse_start_not_int(self): xmlin = "<uid start='a' target='20000' count='5'/>" obj = config.LibvirtConfigGuestIDMap() self.assertRaises(ValueError, obj.parse_str, xmlin) def test_config_id_map_parse_target_not_int(self): xmlin = "<uid start='2' target='a' count='5'/>" obj = config.LibvirtConfigGuestIDMap() self.assertRaises(ValueError, obj.parse_str, xmlin) def test_config_id_map_parse_count_not_int(self): xmlin = "<uid start='2' target='20000' count='a'/>" obj = config.LibvirtConfigGuestIDMap() self.assertRaises(ValueError, obj.parse_str, xmlin) def test_config_uid_map(self): obj = config.LibvirtConfigGuestUIDMap() obj.start = 1 obj.target = 10000 obj.count = 2 xml = obj.to_xml() self.assertXmlEqual("<uid start='1' target='10000' count='2'/>", xml) def test_config_uid_map_parse(self): xmlin = "<uid start='2' target='20000' count='5'/>" obj = config.LibvirtConfigGuestUIDMap() obj.parse_str(xmlin) self.assertEqual(2, obj.start) self.assertEqual(20000, obj.target) self.assertEqual(5, obj.count) def test_config_gid_map(self): obj = config.LibvirtConfigGuestGIDMap() obj.start = 1 obj.target = 10000 obj.count = 2 xml = obj.to_xml() self.assertXmlEqual("<gid start='1' target='10000' count='2'/>", xml) def test_config_gid_map_parse(self): xmlin = "<gid start='2' target='20000' count='5'/>" obj = config.LibvirtConfigGuestGIDMap() obj.parse_str(xmlin) self.assertEqual(2, obj.start) self.assertEqual(20000, obj.target) self.assertEqual(5, obj.count) class LibvirtConfigMemoryBalloonTest(LibvirtConfigBaseTest): def test_config_memory_balloon_period(self): balloon = config.LibvirtConfigMemoryBalloon() balloon.model = 'fake_virtio' balloon.period = 11 xml = balloon.to_xml() expected_xml = """ <memballoon model='fake_virtio'> <stats period='11'/> </memballoon>""" self.assertXmlEqual(expected_xml, xml)
from __future__ import unicode_literals, division, absolute_import import os import logging import re import urlparse import xmlrpclib from flexget import plugin from flexget.event import event from flexget.entry import Entry from flexget.utils.template import RenderError from flexget.plugin import get_plugin_by_name from socket import error as socket_error log = logging.getLogger('aria2') # TODO: stop using torrent_info_hash[0:16] as the GID # for RENAME_CONTENT_FILES: # to rename TV episodes, content_is_episodes must be set to yes class OutputAria2(object): """ aria2 output plugin Version 1.0.0 Configuration: server: Where aria2 daemon is running. default 'localhost' port: Port of that server. default '6800' username: XML-RPC username set in aria2. default '' password: XML-RPC password set in aria2. default '' do: [add-new|remove-completed] What action to take with incoming entries. uri: URI of file to download. Can include inline Basic Auth para- meters and use jinja2 templating with any fields available in the entry. If you are using any of the dynamic renaming options below, the filename can be included in this setting using {{filename}}. exclude_samples: [yes|no] Exclude any files that include the word 'sample' in their name. default 'no' exclude_non_content: [yes|no] Exclude any non-content files, as defined by filename extensions not listed in file_exts. (See below.) default 'no' rename_content_files: [yes|no] If set, rename all content files (as defined by extensions listed in file_exts). default 'no' rename_template: If set, and rename_content_files is yes, all content files will be renamed using the value of this field as a template. Will be parsed with jinja2 and can include any fields available in the entry. default '' parse_filename: [yes|no] If yes, filenames will be parsed with either the series parser (if content_is_episodes is set to yes) or the movie parser. default: 'no' content_is_episodes: [yes|no] If yes, files will be parsed by the series plugin parser to attempt to determine series name and series_id. If no, files will be treated as movies. Note this has no effect unless parse_filename is set to yes. default 'no' keep_parent_folders: [yes|no] If yes, any parent folders within the torrent itself will be kept and created within the download directory. For example, if a torrent has this structure: MyTorrent/ MyFile.mkv If this is set to yes, the MyTorrent folder will be created in the download directory. If set to no, the folder will be ignored and the file will be downloaded directly into the download directory. default: 'no' fix_year: [yes|no] If yes, and the last four characters of the series name are numbers, enclose them in parantheses as they are likely a year. Example: Show Name 1995 S01E01.mkv would become Show Name (1995) S01E01.mkv. default 'yes' file_exts: [list] File extensions of all files considered to be content files. Used to determine which files to rename or which files to exclude from download, with appropriate options set. (See above.) default: ['.mkv', '.avi', '.mp4', '.wmv', '.asf', '.divx', '.mov', '.mpg', '.rm'] aria_config: "Parent folder" for any options to be passed directly to aria. Any command line option listed at http://aria2.sourceforge.net/manual/en/html/aria2c.html#options can be used by removing the two dashes (--) in front of the command name, and changing key=value to key: value. All options will be treated as jinja2 templates and rendered prior to passing to aria2. default '' Sample configuration: aria2: server: myserver port: 6802 do: add-new exclude_samples: yes exclude_non_content: yes parse_filename: yes content_is_episodes: yes rename_content_files: yes rename_template: '{{series_name}} - {{series_id||lower}}' aria_config: max-connection-per-server: 4 max-concurrent-downloads: 4 split: 4 file-allocation: none dir: "/Volumes/all_my_tv/{{series_name}}" """ schema = { 'type': 'object', 'properties': { 'server': {'type': 'string', 'default': 'localhost'}, 'port': {'type': 'integer', 'default': 6800}, 'username': {'type': 'string', 'default': ''}, 'password': {'type': 'string', 'default': ''}, 'do': {'type': 'string', 'enum': ['add-new', 'remove-completed']}, 'uri': {'type': 'string'}, 'exclude_samples': {'type': 'boolean', 'default': False}, 'exclude_non_content': {'type': 'boolean', 'default': True}, 'rename_content_files': {'type': 'boolean', 'default': False}, 'content_is_episodes': {'type': 'boolean', 'default': False}, 'keep_parent_folders': {'type': 'boolean', 'default': False}, 'parse_filename': {'type': 'boolean', 'default': False}, 'fix_year': {'type': 'boolean', 'default': True}, 'rename_template': {'type': 'string', 'default': ''}, 'file_exts': { 'type': 'array', 'items': {'type': 'string'}, 'default': ['.mkv', '.avi', '.mp4', '.wmv', '.asf', '.divx', '.mov', '.mpg', '.rm'] }, 'aria_config': { 'type': 'object', 'additionalProperties': {'oneOf': [{'type': 'string'}, {'type': 'integer'}]} } }, 'required': ['do'], 'additionalProperties': False } def on_task_output(self, task, config): if 'aria_config' not in config: config['aria_config'] = {} if 'uri' not in config and config['do'] == 'add-new': raise plugin.PluginError('uri (path to folder containing file(s) on server) is required when adding new ' 'downloads.', log) if 'dir' not in config['aria_config']: if config['do'] == 'add-new': raise plugin.PluginError('dir (destination directory) is required.', log) else: config['aria_config']['dir'] = '' if config['keep_parent_folders'] and config['aria_config']['dir'].find('{{parent_folders}}') == -1: raise plugin.PluginError('When using keep_parent_folders, you must specify {{parent_folders}} in the dir ' 'option to show where it goes.', log) if config['rename_content_files'] and not config['rename_template']: raise plugin.PluginError('When using rename_content_files, you must specify a rename_template.', log) if config['username'] and not config['password']: raise plugin.PluginError('If you specify an aria2 username, you must specify a password.') try: userpass = '' if config['username']: userpass = '%s:%s@' % (config['username'], config['password']) baseurl = 'http://%s%s:%s/rpc' % (userpass, config['server'], config['port']) log.debug('base url: %s' % baseurl) s = xmlrpclib.ServerProxy(baseurl) log.info('Connected to daemon at ' + baseurl + '.') except xmlrpclib.ProtocolError as err: raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s' % (baseurl, err.errcode, err.errmsg), log) except xmlrpclib.Fault as err: raise plugin.PluginError('XML-RPC fault: Unable to connect to aria2 daemon at %s: %s' % (baseurl, err.faultString), log) except socket_error as (error, msg): raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (baseurl, msg), log) except: raise plugin.PluginError('Unidentified error during connection to aria2 daemon at %s' % baseurl, log) # loop entries for entry in task.accepted: config['aria_dir'] = config['aria_config']['dir'] if 'aria_gid' in entry: config['aria_config']['gid'] = entry['aria_gid'] elif 'torrent_info_hash' in entry: config['aria_config']['gid'] = entry['torrent_info_hash'][0:16] elif 'gid' in config['aria_config']: del(config['aria_config']['gid']) if 'content_files' not in entry: if entry['url']: entry['content_files'] = [entry['url']] else: entry['content_files'] = [entry['title']] else: if not isinstance(entry['content_files'], list): entry['content_files'] = [entry['content_files']] counter = 0 for cur_file in entry['content_files']: entry['parent_folders'] = '' # reset the 'dir' or it will only be rendered on the first loop config['aria_config']['dir'] = config['aria_dir'] cur_filename = cur_file.split('/')[-1] if cur_file.split('/')[0] != cur_filename and config['keep_parent_folders']: lastSlash = cur_file.rfind('/') cur_path = cur_file[:lastSlash] if cur_path[0:1] == '/': cur_path = cur_path[1:] entry['parent_folders'] = cur_path log.debug('parent folders: %s' % entry['parent_folders']) file_dot = cur_filename.rfind(".") file_ext = cur_filename[file_dot:] if len(entry['content_files']) > 1 and 'gid' in config['aria_config']: # if there is more than 1 file, need to give unique gids, this will work up to 999 files counter += 1 strCounter = str(counter) if len(entry['content_files']) > 99: # sorry not sorry if you have more than 999 files config['aria_config']['gid'] = ''.join([config['aria_config']['gid'][0:-3], strCounter.rjust(3, str('0'))]) else: config['aria_config']['gid'] = ''.join([config['aria_config']['gid'][0:-2], strCounter.rjust(2, str('0'))]) if config['exclude_samples'] == True: # remove sample files from download list if cur_filename.lower().find('sample') > -1: continue if file_ext in config['file_exts']: if config['exclude_non_content'] == True: # don't download non-content files, like nfos - definable in file_exts continue if config['parse_filename']: if config['content_is_episodes']: metainfo_series = plugin.get_plugin_by_name('metainfo_series') guess_series = metainfo_series.instance.guess_series if guess_series(cur_filename): parser = guess_series(cur_filename) entry['series_name'] = parser.name # if the last four chars are numbers, REALLY good chance it's actually a year... # fix it if so desired log.verbose(entry['series_name']) if re.search(r'\d{4}', entry['series_name'][-4:]) is not None and config['fix_year']: entry['series_name'] = ''.join([entry['series_name'][0:-4], '(', entry['series_name'][-4:], ')']) log.verbose(entry['series_name']) parser.data = cur_filename parser.parse log.debug(parser.id_type) if parser.id_type == 'ep': entry['series_id'] = ''.join(['S', str(parser.season).rjust(2, str('0')), 'E', str(parser.episode).rjust(2, str('0'))]) elif parser.id_type == 'sequence': entry['series_id'] = parser.episode elif parser.id_type and parser.id: entry['series_id'] = parser.id else: parser = get_plugin_by_name('parsing').instance.parse_movie(cur_filename) parser.parse() log.info(parser) testname = parser.name testyear = parser.year parser.data = entry['title'] parser.parse() log.info(parser) if len(parser.name) > len(testname): entry['name'] = parser.name entry['movie_name'] = parser.name else: entry['name'] = testname entry['movie_name'] = testname if parser.year: entry['year'] = parser.year entry['movie_year'] = parser.year else: entry['year'] = testyear entry['movie_year'] = testyear if config['rename_content_files']: if config['content_is_episodes']: try: config['aria_config']['out'] = entry.render(config['rename_template']) + file_ext log.verbose(config['aria_config']['out']) except RenderError as e: log.error('Could not rename file %s: %s.' % (cur_filename, e)) continue else: try: config['aria_config']['out'] = entry.render(config['rename_template']) + file_ext log.verbose(config['aria_config']['out']) except RenderError as e: log.error('Could not rename file %s: %s. Try enabling imdb_lookup in this task' ' to assist.' % (cur_filename, e)) continue else: config['aria_config']['out'] = cur_filename if config['do'] == 'add-new': log.debug('Adding new file') new_download = 0 if 'gid' in config['aria_config']: try: r = s.aria2.tellStatus(config['aria_config']['gid'], ['gid', 'status']) log.info('Download status for %s (gid %s): %s' % (config['aria_config']['out'], r['gid'], r['status'])) if r['status'] == 'paused': try: if not task.manager.options.test: s.aria2.unpause(r['gid']) log.info(' Unpaused download.') except xmlrpclib.Fault as err: raise plugin.PluginError('aria2 response to unpause request: %s' % err.faultString, log) else: log.info(' Therefore, not re-adding.') except xmlrpclib.Fault as err: if err.faultString[-12:] == 'is not found': new_download = 1 else: raise plugin.PluginError('aria2 response to download status request: %s' % err.faultString, log) except xmlrpclib.ProtocolError as err: raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s' % (baseurl, err.errcode, err.errmsg), log) except socket_error as (error, msg): raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (baseurl, msg), log) else: new_download = 1 if new_download == 1: try: entry['filename'] = cur_file cur_uri = entry.render(config['uri']) log.verbose('uri: %s' % cur_uri) except RenderError as e: raise plugin.PluginError('Unable to render uri: %s' % e) try: for key, value in config['aria_config'].iteritems(): log.trace('rendering %s: %s' % (key, value)) config['aria_config'][key] = entry.render(unicode(value)) log.debug('dir: %s' % config['aria_config']['dir']) if not task.manager.options.test: r = s.aria2.addUri([cur_uri], config['aria_config']) else: if 'gid' not in config['aria_config']: r = '1234567890123456' else: r = config['aria_config']['gid'] log.info('%s successfully added to aria2 with gid %s.' % (config['aria_config']['out'], r)) except xmlrpclib.Fault as err: raise plugin.PluginError('aria2 response to add URI request: %s' % err.faultString, log) except socket_error as (error, msg): raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (baseurl, msg), log) except RenderError as e: raise plugin.PluginError('Unable to render one of the fields being passed to aria2:' '%s' % e) elif config['do'] == 'remove-completed': try: r = s.aria2.tellStatus(config['aria_config']['gid'], ['gid', 'status']) log.info('Status of download with gid %s: %s' % (r['gid'], r['status'])) if r['status'] in ['complete', 'removed']: if not task.manager.options.test: try: a = s.aria2.removeDownloadResult(r['gid']) if a == 'OK': log.info('Download with gid %s removed from memory' % r['gid']) except xmlrpclib.Fault as err: raise plugin.PluginError('aria2 response to remove request: %s' % err.faultString, log) except socket_error as (error, msg): raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (baseurl, msg), log) else: log.info('Download with gid %s could not be removed because of its status: %s' % (r['gid'], r['status'])) except xmlrpclib.Fault as err: if err.faultString[-12:] == 'is not found': log.warning('Download with gid %s could not be removed because it was not found. It was ' 'possibly previously removed or never added.' % config['aria_config']['gid']) else: raise plugin.PluginError('aria2 response to status request: %s' % err.faultString, log) except socket_error as (error, msg): raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (baseurl, msg), log) @event('plugin.register') def register_plugin(): plugin.register(OutputAria2, 'aria2', api_ver=2)
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import unittest from telemetry import page as page_module from telemetry.page import page_set from telemetry import value class TestBase(unittest.TestCase): def setUp(self): ps = page_set.PageSet(base_dir=os.path.dirname(__file__)) ps.AddStory(page_module.Page("http://www.bar.com/", ps, ps.base_dir)) ps.AddStory(page_module.Page("http://www.baz.com/", ps, ps.base_dir)) ps.AddStory(page_module.Page("http://www.foo.com/", ps, ps.base_dir)) self.page_set = ps @property def pages(self): return self.page_set.pages class ValueForTest(value.Value): @classmethod def MergeLikeValuesFromSamePage(cls, values): pass @classmethod def MergeLikeValuesFromDifferentPages(cls, values, group_by_name_suffix=False): pass def GetBuildbotDataType(self, output_context): pass def GetBuildbotValue(self): pass def GetChartAndTraceNameForComputedSummaryResult( self, trace_tag): pass def GetRepresentativeNumber(self): pass def GetRepresentativeString(self): pass @staticmethod def GetJSONTypeName(): pass class ValueForAsDictTest(ValueForTest): @staticmethod def GetJSONTypeName(): return 'baz' class ValueForFromDictTest(ValueForTest): @staticmethod def FromDict(value_dict, page_dict): kwargs = value.Value.GetConstructorKwArgs(value_dict, page_dict) return ValueForFromDictTest(**kwargs) @staticmethod def GetJSONTypeName(): return 'value_for_from_dict_test' class ValueTest(TestBase): def testCompat(self): page0 = self.pages[0] page1 = self.pages[0] a = value.Value(page0, 'x', 'unit', important=False, description=None, tir_label='foo') b = value.Value(page1, 'x', 'unit', important=False, description=None, tir_label='foo') self.assertTrue(b.IsMergableWith(a)) a = value.Value(page0, 'x', 'unit', important=False, description=None, tir_label='foo') b = value.Value(page0, 'x', 'unit', important=False, description=None, tir_label='bar') self.assertTrue(b.IsMergableWith(a)) def testIncompat(self): page0 = self.pages[0] a = value.Value(page0, 'x', 'unit', important=False, description=None, tir_label=None) b = value.Value(page0, 'x', 'incompatUnit', important=False, tir_label=None, description=None) self.assertFalse(b.IsMergableWith(a)) a = value.Value(page0, 'x', 'unit', important=False, description=None, tir_label=None) b = value.Value(page0, 'x', 'unit', important=True, description=None, tir_label=None) self.assertFalse(b.IsMergableWith(a)) a = value.Value(page0, 'x', 'unit', important=False, description=None, tir_label=None) b = ValueForTest(page0, 'x', 'unit', important=True, description=None, tir_label=None) self.assertFalse(b.IsMergableWith(a)) def testNameMustBeString(self): with self.assertRaises(ValueError): value.Value(None, 42, 'unit', important=False, description=None, tir_label=None) def testUnitsMustBeString(self): with self.assertRaises(ValueError): value.Value(None, 'x', 42, important=False, description=None, tir_label=None) def testImportantMustBeBool(self): with self.assertRaises(ValueError): value.Value(None, 'x', 'unit', important='foo', description=None, tir_label=None) def testDescriptionMustBeStringOrNone(self): with self.assertRaises(ValueError): value.Value(None, 'x', 'unit', important=False, description=42, tir_label=None) def testInteractionRecordMustBeStringOrNone(self): with self.assertRaises(ValueError): value.Value(None, 'x', 'unit', important=False, description=None, tir_label=42) def testAsDictBaseKeys(self): v = ValueForAsDictTest(None, 'x', 'unit', important=True, description=None, tir_label='bar') d = v.AsDict() self.assertEquals(d, { 'name': 'x', 'type': 'baz', 'units': 'unit', 'important': True, 'tir_label': 'bar' }) def testAsDictWithPage(self): page0 = self.pages[0] v = ValueForAsDictTest(page0, 'x', 'unit', important=False, description=None, tir_label=None) d = v.AsDict() self.assertIn('page_id', d) def testAsDictWithoutPage(self): v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None, tir_label=None) d = v.AsDict() self.assertNotIn('page_id', d) def testAsDictWithDescription(self): v = ValueForAsDictTest(None, 'x', 'unit', important=False, description='Some description.', tir_label=None) d = v.AsDict() self.assertEqual('Some description.', d['description']) def testAsDictWithoutDescription(self): v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None, tir_label=None) self.assertNotIn('description', v.AsDict()) def testAsDictWithInteractionRecord(self): v = ValueForAsDictTest(None, 'x', 'unit', important=False, description='Some description.', tir_label='foo') d = v.AsDict() self.assertEqual('foo', d['tir_label']) def testAsDictWithoutInteractionRecord(self): v = ValueForAsDictTest(None, 'x', 'unit', important=False, description=None, tir_label=None) self.assertNotIn('tir_label', v.AsDict()) def testFromDictBaseKeys(self): d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit' } v = value.Value.FromDict(d, None) self.assertEquals(v.name, 'x') self.assertTrue(isinstance(v, ValueForFromDictTest)) self.assertEquals(v.units, 'unit') def testFromDictWithPage(self): page0 = self.pages[0] page_dict = {page0.id: page0} d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit', 'page_id': page0.id } v = value.Value.FromDict(d, page_dict) self.assertEquals(v.page.id, page0.id) def testFromDictWithoutPage(self): d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit' } v = value.Value.FromDict(d, {}) self.assertEquals(v.page, None) def testFromDictWithDescription(self): d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit', 'description': 'foo' } v = value.Value.FromDict(d, {}) self.assertEquals(v.description, 'foo') def testFromDictWithoutDescription(self): d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit' } v = value.Value.FromDict(d, {}) self.assertEquals(v.description, None) def testFromDictWithInteractionRecord(self): d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit', 'description': 'foo', 'tir_label': 'bar' } v = value.Value.FromDict(d, {}) self.assertEquals(v.tir_label, 'bar') def testFromDictWithoutInteractionRecord(self): d = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit' } v = value.Value.FromDict(d, {}) self.assertEquals(v.tir_label, None) def testListOfValuesFromListOfDicts(self): d0 = { 'type': 'value_for_from_dict_test', 'name': 'x', 'units': 'unit' } d1 = { 'type': 'value_for_from_dict_test', 'name': 'y', 'units': 'unit' } vs = value.Value.ListOfValuesFromListOfDicts([d0, d1], {}) self.assertEquals(vs[0].name, 'x') self.assertEquals(vs[1].name, 'y')
import fnmatch import os.path import re import unittest import pydev_runfiles_unittest from pydevd_constants import * #@UnusedWildImport import time from pydev_runfiles_coverage import StartCoverageSupport #======================================================================================================================= # Configuration #======================================================================================================================= class Configuration: def __init__( self, files_or_dirs='', verbosity=2, include_tests=None, tests=None, port=None, files_to_tests=None, jobs=1, split_jobs='tests', coverage_output_dir=None, coverage_include=None, coverage_output_file=None, exclude_files=None, exclude_tests=None, include_files=None, ): self.files_or_dirs = files_or_dirs self.verbosity = verbosity self.include_tests = include_tests self.tests = tests self.port = port self.files_to_tests = files_to_tests self.jobs = jobs self.split_jobs = split_jobs if include_tests: assert isinstance(include_tests, (list, tuple)) if exclude_files: assert isinstance(exclude_files, (list, tuple)) if exclude_tests: assert isinstance(exclude_tests, (list, tuple)) self.exclude_files = exclude_files self.include_files = include_files self.exclude_tests = exclude_tests self.coverage_output_dir = coverage_output_dir self.coverage_include = coverage_include self.coverage_output_file = coverage_output_file def __str__(self): return '''Configuration - files_or_dirs: %s - verbosity: %s - tests: %s - port: %s - files_to_tests: %s - jobs: %s - split_jobs: %s - include_files: %s - include_tests: %s - exclude_files: %s - exclude_tests: %s - coverage_output_dir: %s - coverage_include_dir: %s - coverage_output_file: %s ''' % ( self.files_or_dirs, self.verbosity, self.tests, self.port, self.files_to_tests, self.jobs, self.split_jobs, self.include_files, self.include_tests, self.exclude_files, self.exclude_tests, self.coverage_output_dir, self.coverage_include, self.coverage_output_file, ) #======================================================================================================================= # parse_cmdline #======================================================================================================================= def parse_cmdline(argv=None): """ Parses command line and returns test directories, verbosity, test filter and test suites usage: runfiles.py -v|--verbosity <level> -t|--tests <Test.test1,Test2> dirs|files Multiprocessing options: jobs=number (with the number of jobs to be used to run the tests) split_jobs='module'|'tests' if == module, a given job will always receive all the tests from a module if == tests, the tests will be split independently of their originating module (default) --exclude_files = comma-separated list of patterns with files to exclude (fnmatch style) --include_files = comma-separated list of patterns with files to include (fnmatch style) --exclude_tests = comma-separated list of patterns with test names to exclude (fnmatch style) Note: if --tests is given, --exclude_files, --include_files and --exclude_tests are ignored! """ if argv is None: argv = sys.argv verbosity = 2 include_tests = None tests = None port = None jobs = 1 split_jobs = 'tests' files_to_tests = {} coverage_output_dir = None coverage_include = None exclude_files = None exclude_tests = None include_files = None from _pydev_getopt import gnu_getopt optlist, dirs = gnu_getopt( argv[1:], "", [ "verbosity=", "tests=", "port=", "config_file=", "jobs=", "split_jobs=", "include_tests=", "include_files=", "exclude_files=", "exclude_tests=", "coverage_output_dir=", "coverage_include=", ] ) for opt, value in optlist: if opt in ("-v", "--verbosity"): verbosity = value elif opt in ("-p", "--port"): port = int(value) elif opt in ("-j", "--jobs"): jobs = int(value) elif opt in ("-s", "--split_jobs"): split_jobs = value if split_jobs not in ('module', 'tests'): raise AssertionError('Expected split to be either "module" or "tests". Was :%s' % (split_jobs,)) elif opt in ("-d", "--coverage_output_dir",): coverage_output_dir = value.strip() elif opt in ("-i", "--coverage_include",): coverage_include = value.strip() elif opt in ("-I", "--include_tests"): include_tests = value.split(',') elif opt in ("-E", "--exclude_files"): exclude_files = value.split(',') elif opt in ("-F", "--include_files"): include_files = value.split(',') elif opt in ("-e", "--exclude_tests"): exclude_tests = value.split(',') elif opt in ("-t", "--tests"): tests = value.split(',') elif opt in ("-c", "--config_file"): config_file = value.strip() if os.path.exists(config_file): f = open(config_file, 'rU') try: config_file_contents = f.read() finally: f.close() if config_file_contents: config_file_contents = config_file_contents.strip() if config_file_contents: for line in config_file_contents.splitlines(): file_and_test = line.split('|') if len(file_and_test) == 2: file, test = file_and_test if DictContains(files_to_tests, file): files_to_tests[file].append(test) else: files_to_tests[file] = [test] else: sys.stderr.write('Could not find config file: %s\n' % (config_file,)) if type([]) != type(dirs): dirs = [dirs] ret_dirs = [] for d in dirs: if '|' in d: #paths may come from the ide separated by | ret_dirs.extend(d.split('|')) else: ret_dirs.append(d) verbosity = int(verbosity) if tests: if verbosity > 4: sys.stdout.write('--tests provided. Ignoring --exclude_files, --exclude_tests and --include_files\n') exclude_files = exclude_tests = include_files = None config = Configuration( ret_dirs, verbosity, include_tests, tests, port, files_to_tests, jobs, split_jobs, coverage_output_dir, coverage_include, exclude_files=exclude_files, exclude_tests=exclude_tests, include_files=include_files, ) if verbosity > 5: sys.stdout.write(str(config)+'\n') return config #======================================================================================================================= # PydevTestRunner #======================================================================================================================= class PydevTestRunner(object): """ finds and runs a file or directory of files as a unit test """ __py_extensions = ["*.py", "*.pyw"] __exclude_files = ["__init__.*"] #Just to check that only this attributes will be written to this file __slots__= [ 'verbosity', #Always used 'files_to_tests', #If this one is given, the ones below are not used 'files_or_dirs', #Files or directories received in the command line 'include_tests', #The filter used to collect the tests 'tests', #Strings with the tests to be run 'jobs', #Integer with the number of jobs that should be used to run the test cases 'split_jobs', #String with 'tests' or 'module' (how should the jobs be split) 'configuration', 'coverage', ] def __init__(self, configuration): self.verbosity = configuration.verbosity self.jobs = configuration.jobs self.split_jobs = configuration.split_jobs files_to_tests = configuration.files_to_tests if files_to_tests: self.files_to_tests = files_to_tests self.files_or_dirs = list(files_to_tests.keys()) self.tests = None else: self.files_to_tests = {} self.files_or_dirs = configuration.files_or_dirs self.tests = configuration.tests self.configuration = configuration self.__adjust_path() def __adjust_path(self): """ add the current file or directory to the python path """ path_to_append = None for n in xrange(len(self.files_or_dirs)): dir_name = self.__unixify(self.files_or_dirs[n]) if os.path.isdir(dir_name): if not dir_name.endswith("/"): self.files_or_dirs[n] = dir_name + "/" path_to_append = os.path.normpath(dir_name) elif os.path.isfile(dir_name): path_to_append = os.path.dirname(dir_name) else: msg = ("unknown type. \n%s\nshould be file or a directory.\n" % (dir_name)) raise RuntimeError(msg) if path_to_append is not None: #Add it as the last one (so, first things are resolved against the default dirs and #if none resolves, then we try a relative import). sys.path.append(path_to_append) def __is_valid_py_file(self, fname): """ tests that a particular file contains the proper file extension and is not in the list of files to exclude """ is_valid_fname = 0 for invalid_fname in self.__class__.__exclude_files: is_valid_fname += int(not fnmatch.fnmatch(fname, invalid_fname)) if_valid_ext = 0 for ext in self.__class__.__py_extensions: if_valid_ext += int(fnmatch.fnmatch(fname, ext)) return is_valid_fname > 0 and if_valid_ext > 0 def __unixify(self, s): """ stupid windows. converts the backslash to forwardslash for consistency """ return os.path.normpath(s).replace(os.sep, "/") def __importify(self, s, dir=False): """ turns directory separators into dots and removes the ".py*" extension so the string can be used as import statement """ if not dir: dirname, fname = os.path.split(s) if fname.count('.') > 1: #if there's a file named xxx.xx.py, it is not a valid module, so, let's not load it... return imp_stmt_pieces = [dirname.replace("\\", "/").replace("/", "."), os.path.splitext(fname)[0]] if len(imp_stmt_pieces[0]) == 0: imp_stmt_pieces = imp_stmt_pieces[1:] return ".".join(imp_stmt_pieces) else: #handle dir return s.replace("\\", "/").replace("/", ".") def __add_files(self, pyfiles, root, files): """ if files match, appends them to pyfiles. used by os.path.walk fcn """ for fname in files: if self.__is_valid_py_file(fname): name_without_base_dir = self.__unixify(os.path.join(root, fname)) pyfiles.append(name_without_base_dir) def find_import_files(self): """ return a list of files to import """ if self.files_to_tests: pyfiles = self.files_to_tests.keys() else: pyfiles = [] for base_dir in self.files_or_dirs: if os.path.isdir(base_dir): if hasattr(os, 'walk'): for root, dirs, files in os.walk(base_dir): self.__add_files(pyfiles, root, files) else: # jython2.1 is too old for os.walk! os.path.walk(base_dir, self.__add_files, pyfiles) elif os.path.isfile(base_dir): pyfiles.append(base_dir) if self.configuration.exclude_files or self.configuration.include_files: ret = [] for f in pyfiles: add = True basename = os.path.basename(f) if self.configuration.include_files: add = False for pat in self.configuration.include_files: if fnmatch.fnmatchcase(basename, pat): add = True break if not add: if self.verbosity > 3: sys.stdout.write('Skipped file: %s (did not match any include_files pattern: %s)\n' % (f, self.configuration.include_files)) elif self.configuration.exclude_files: for pat in self.configuration.exclude_files: if fnmatch.fnmatchcase(basename, pat): if self.verbosity > 3: sys.stdout.write('Skipped file: %s (matched exclude_files pattern: %s)\n' % (f, pat)) elif self.verbosity > 2: sys.stdout.write('Skipped file: %s\n' % (f,)) add = False break if add: if self.verbosity > 3: sys.stdout.write('Adding file: %s for test discovery.\n' % (f,)) ret.append(f) pyfiles = ret return pyfiles def __get_module_from_str(self, modname, print_exception, pyfile): """ Import the module in the given import path. * Returns the "final" module, so importing "coilib40.subject.visu" returns the "visu" module, not the "coilib40" as returned by __import__ """ try: mod = __import__(modname) for part in modname.split('.')[1:]: mod = getattr(mod, part) return mod except: if print_exception: import pydev_runfiles_xml_rpc import pydevd_io buf_err = pydevd_io.StartRedirect(keep_original_redirection=True, std='stderr') buf_out = pydevd_io.StartRedirect(keep_original_redirection=True, std='stdout') try: import traceback;traceback.print_exc() sys.stderr.write('ERROR: Module: %s could not be imported (file: %s).\n' % (modname, pyfile)) finally: pydevd_io.EndRedirect('stderr') pydevd_io.EndRedirect('stdout') pydev_runfiles_xml_rpc.notifyTest( 'error', buf_out.getvalue(), buf_err.getvalue(), pyfile, modname, 0) return None def find_modules_from_files(self, pyfiles): """ returns a list of modules given a list of files """ #let's make sure that the paths we want are in the pythonpath... imports = [(s, self.__importify(s)) for s in pyfiles] system_paths = [] for s in sys.path: system_paths.append(self.__importify(s, True)) ret = [] for pyfile, imp in imports: if imp is None: continue #can happen if a file is not a valid module choices = [] for s in system_paths: if imp.startswith(s): add = imp[len(s) + 1:] if add: choices.append(add) #sys.stdout.write(' ' + add + ' ') if not choices: sys.stdout.write('PYTHONPATH not found for file: %s\n' % imp) else: for i, import_str in enumerate(choices): print_exception = i == len(choices) - 1 mod = self.__get_module_from_str(import_str, print_exception, pyfile) if mod is not None: ret.append((pyfile, mod, import_str)) break return ret #=================================================================================================================== # GetTestCaseNames #=================================================================================================================== class GetTestCaseNames: """Yes, we need a class for that (cannot use outer context on jython 2.1)""" def __init__(self, accepted_classes, accepted_methods): self.accepted_classes = accepted_classes self.accepted_methods = accepted_methods def __call__(self, testCaseClass): """Return a sorted sequence of method names found within testCaseClass""" testFnNames = [] className = testCaseClass.__name__ if DictContains(self.accepted_classes, className): for attrname in dir(testCaseClass): #If a class is chosen, we select all the 'test' methods' if attrname.startswith('test') and hasattr(getattr(testCaseClass, attrname), '__call__'): testFnNames.append(attrname) else: for attrname in dir(testCaseClass): #If we have the class+method name, we must do a full check and have an exact match. if DictContains(self.accepted_methods, className + '.' + attrname): if hasattr(getattr(testCaseClass, attrname), '__call__'): testFnNames.append(attrname) #sorted() is not available in jython 2.1 testFnNames.sort() return testFnNames def _decorate_test_suite(self, suite, pyfile, module_name): if isinstance(suite, unittest.TestSuite): add = False suite.__pydev_pyfile__ = pyfile suite.__pydev_module_name__ = module_name for t in suite._tests: t.__pydev_pyfile__ = pyfile t.__pydev_module_name__ = module_name if self._decorate_test_suite(t, pyfile, module_name): add = True return add elif isinstance(suite, unittest.TestCase): return True else: return False def find_tests_from_modules(self, file_and_modules_and_module_name): """ returns the unittests given a list of modules """ #Use our own suite! unittest.TestLoader.suiteClass = pydev_runfiles_unittest.PydevTestSuite loader = unittest.TestLoader() ret = [] if self.files_to_tests: for pyfile, m, module_name in file_and_modules_and_module_name: accepted_classes = {} accepted_methods = {} tests = self.files_to_tests[pyfile] for t in tests: accepted_methods[t] = t loader.getTestCaseNames = self.GetTestCaseNames(accepted_classes, accepted_methods) suite = loader.loadTestsFromModule(m) if self._decorate_test_suite(suite, pyfile, module_name): ret.append(suite) return ret if self.tests: accepted_classes = {} accepted_methods = {} for t in self.tests: splitted = t.split('.') if len(splitted) == 1: accepted_classes[t] = t elif len(splitted) == 2: accepted_methods[t] = t loader.getTestCaseNames = self.GetTestCaseNames(accepted_classes, accepted_methods) for pyfile, m, module_name in file_and_modules_and_module_name: suite = loader.loadTestsFromModule(m) if self._decorate_test_suite(suite, pyfile, module_name): ret.append(suite) return ret def filter_tests(self, test_objs, internal_call=False): """ based on a filter name, only return those tests that have the test case names that match """ if not internal_call: if not self.configuration.include_tests and not self.tests and not self.configuration.exclude_tests: #No need to filter if we have nothing to filter! return test_objs if self.verbosity > 1: if self.configuration.include_tests: sys.stdout.write('Tests to include: %s\n' % (self.configuration.include_tests,)) if self.tests: sys.stdout.write('Tests to run: %s\n' % (self.tests,)) if self.configuration.exclude_tests: sys.stdout.write('Tests to exclude: %s\n' % (self.configuration.exclude_tests,)) test_suite = [] for test_obj in test_objs: if isinstance(test_obj, unittest.TestSuite): #Note: keep the suites as they are and just 'fix' the tests (so, don't use the iter_tests). if test_obj._tests: test_obj._tests = self.filter_tests(test_obj._tests, True) if test_obj._tests: #Only add the suite if we still have tests there. test_suite.append(test_obj) elif isinstance(test_obj, unittest.TestCase): try: testMethodName = test_obj._TestCase__testMethodName except AttributeError: #changed in python 2.5 testMethodName = test_obj._testMethodName add = True if self.configuration.exclude_tests: for pat in self.configuration.exclude_tests: if fnmatch.fnmatchcase(testMethodName, pat): if self.verbosity > 3: sys.stdout.write('Skipped test: %s (matched exclude_tests pattern: %s)\n' % (testMethodName, pat)) elif self.verbosity > 2: sys.stdout.write('Skipped test: %s\n' % (testMethodName,)) add = False break if add: if self.__match_tests(self.tests, test_obj, testMethodName): include = True if self.configuration.include_tests: include = False for pat in self.configuration.include_tests: if fnmatch.fnmatchcase(testMethodName, pat): include = True break if include: test_suite.append(test_obj) else: if self.verbosity > 3: sys.stdout.write('Skipped test: %s (did not match any include_tests pattern %s)\n' % (self.configuration.include_tests,)) return test_suite def iter_tests(self, test_objs): #Note: not using yield because of Jython 2.1. tests = [] for test_obj in test_objs: if isinstance(test_obj, unittest.TestSuite): tests.extend(self.iter_tests(test_obj._tests)) elif isinstance(test_obj, unittest.TestCase): tests.append(test_obj) return tests def list_test_names(self, test_objs): names = [] for tc in self.iter_tests(test_objs): try: testMethodName = tc._TestCase__testMethodName except AttributeError: #changed in python 2.5 testMethodName = tc._testMethodName names.append(testMethodName) return names def __match_tests(self, tests, test_case, test_method_name): if not tests: return 1 for t in tests: class_and_method = t.split('.') if len(class_and_method) == 1: #only class name if class_and_method[0] == test_case.__class__.__name__: return 1 elif len(class_and_method) == 2: if class_and_method[0] == test_case.__class__.__name__ and class_and_method[1] == test_method_name: return 1 return 0 def __match(self, filter_list, name): """ returns whether a test name matches the test filter """ if filter_list is None: return 1 for f in filter_list: if re.match(f, name): return 1 return 0 def run_tests(self, handle_coverage=True): """ runs all tests """ sys.stdout.write("Finding files... ") files = self.find_import_files() if self.verbosity > 3: sys.stdout.write('%s ... done.\n' % (self.files_or_dirs)) else: sys.stdout.write('done.\n') sys.stdout.write("Importing test modules ... ") if handle_coverage: coverage_files, coverage = StartCoverageSupport(self.configuration) file_and_modules_and_module_name = self.find_modules_from_files(files) sys.stdout.write("done.\n") all_tests = self.find_tests_from_modules(file_and_modules_and_module_name) all_tests = self.filter_tests(all_tests) test_suite = pydev_runfiles_unittest.PydevTestSuite(all_tests) import pydev_runfiles_xml_rpc pydev_runfiles_xml_rpc.notifyTestsCollected(test_suite.countTestCases()) executed_in_parallel = False start_time = time.time() if self.jobs > 1: import pydev_runfiles_parallel #What may happen is that the number of jobs needed is lower than the number of jobs requested #(e.g.: 2 jobs were requested for running 1 test) -- in which case ExecuteTestsInParallel will #return False and won't run any tests. executed_in_parallel = pydev_runfiles_parallel.ExecuteTestsInParallel( all_tests, self.jobs, self.split_jobs, self.verbosity, coverage_files, self.configuration.coverage_include) if not executed_in_parallel: #If in coverage, we don't need to pass anything here (coverage is already enabled for this execution). runner = pydev_runfiles_unittest.PydevTextTestRunner(stream=sys.stdout, descriptions=1, verbosity=self.verbosity) sys.stdout.write('\n') runner.run(test_suite) if handle_coverage: coverage.stop() coverage.save() total_time = 'Finished in: %.2f secs.' % (time.time() - start_time,) pydev_runfiles_xml_rpc.notifyTestRunFinished(total_time) #======================================================================================================================= # main #======================================================================================================================= def main(configuration): PydevTestRunner(configuration).run_tests()
# -*- coding: utf-8 -*- from bda.plone.orders.interfaces import IGlobalNotificationText from bda.plone.orders.interfaces import IItemNotificationText from bda.plone.orders.interfaces import IOrdersExtensionLayer from bda.plone.shop import message_factory as _ from collective.z3cform.datagridfield import DataGridFieldFactory from collective.z3cform.datagridfield.registry import DictRow from plone.autoform.directives import widget from plone.supermodel import model from z3c.form.browser.checkbox import CheckBoxFieldWidget from zope import schema from zope.interface import Attribute from zope.interface import Interface from zope.interface import provider import zope.deferredimport zope.deferredimport.deprecated( "Import from bda.plone.orders.interfaces instead", IBuyable='bda.plone.orders:interfaces.IBuyable' ) class IShopExtensionLayer(IOrdersExtensionLayer): """Browser layer for bda.plone.shop """ class IPotentiallyBuyable(Interface): """Mark item as potentially buyable. Considered for providing action in UI. """ class IBuyablePeriod(Interface): """Define period in which an item is buyable. """ effective = Attribute(u"Buyable effective date") expires = Attribute(u"Buyable expires date") class IShopSettingsProvider(Interface): """A marker interface for plone.registry configuration interfaces """ class IShopSettings(model.Schema): """Shop controlpanel schema. """ admin_email = schema.ASCIILine( title=_(u"label_admin_email", default=u'Shop Admin E-Mail'), description=_(u"help_admin_email", default=u'No typos please....'), required=True, default="" ) # XXX: change value type to schema.TextLine (needs migration) admin_name = schema.ASCIILine( title=_(u"label_admin_name", default=u'Shop Admin Name'), description=_(u"help_admin_name", default=u'Name used for Shop E-Mails.'), required=True, default="" ) add_customer_role_to_new_users = schema.Bool( title=_( u'label_add_customer_role_to_new_users', default=u'Add Customer role to new Users' ), required=False, default=True ) # XXX: this is an article setting, move to IShopArticleSettings default_item_display_gross = schema.Bool( title=_( u'label_default_item_display_gross', default=u'Display Gross by default' ), required=False ) currency = schema.Choice( title=_(u"label_currency", default="Currency"), description=_( u"help_currency", default=u"Choose the default currency" ), vocabulary='bda.plone.shop.vocabularies.AvailableCurrenciesVocabulary' ) show_currency = schema.Choice( title=_( u"label_show_currency", default=u"Show the currency for items" ), description=_(u"help_show_currency", default=u""), vocabulary='bda.plone.shop.vocabularies.' 'CurrencyDisplayOptionsVocabulary' ) @provider(IShopSettingsProvider) class IShopCartSettings(model.Schema): """Shop controlpanel schema for cart settings. """ model.fieldset( 'cart', label=_(u'Cart', default=u'Cart'), fields=[ 'hide_cart_if_empty', 'max_artice_count', 'disable_max_article', 'summary_total_only', 'show_checkout', 'show_to_cart', ], ) hide_cart_if_empty = schema.Bool( title=_(u"label_hide_cart_if_empty", default=u"Hide Cart if empty"), description=_( u"help_hide_cart_if_empty", default=u"Hide cart if no items contained" ), default=False ) max_artice_count = schema.Int( title=_( u'label_max_artice_count', default=u'Maximum number articles in cart' ), description=_( u"help_max_artice_count", default=u"Maximum number of articles in cart if disable max " u"article flag set" ), required=False ) disable_max_article = schema.Bool( title=_(u"label_disable_max_article", default=u"Disable max article"), description=_( u"help_disable_max_article", default=u"No total number of items in cart limit" ), default=True ) summary_total_only = schema.Bool( title=_( u"label_summary_total_only", default=u"Cart Summary total only" ), description=_( u"help_summary_total_only", default=u"Show only total value in cart summary" ), default=False ) show_checkout = schema.Bool( title=_( u"label_show_checkout", default=u"Show checkout link in portlet" ), description=_(u"help_show_checkout", default=""), default=False ) show_to_cart = schema.Bool( title=_( u"label_show_to_cart", default=u"Show link to cart in portlet" ), description=_(u"help_show_to_cart", default=u""), default=True ) @provider(IShopSettingsProvider) class IShopArticleSettings(model.Schema): """Shop controlpanel schema for article settings. """ model.fieldset( 'article', label=_(u'Article', default=u'Article'), fields=[ 'quantity_units', 'default_item_net', 'default_item_quantity_unit', 'default_item_comment_enabled', 'default_item_comment_required', 'default_item_quantity_unit_float', 'default_item_cart_count_limit', 'default_item_stock_warning_threshold' ], ) widget('quantity_units', CheckBoxFieldWidget) quantity_units = schema.List( title=_( u"label_quantity_units", default=u"Specify quantity units allowed in shop." ), description=_( u"help_quantity_units", default=u'Quantity units (what the buyable items are measured in)' ), required=True, missing_value=set(), value_type=schema.Choice( vocabulary='bda.plone.shop.vocabularies.' 'AvailableQuantityUnitVocabulary' ) ) default_item_quantity_unit = schema.Choice( title=_( u"label_default_quantity_units", default=u"Specify default quantity name." ), description=_( u"help_default_quantity_unit", default=u'default measurement' ), vocabulary='bda.plone.shop.vocabularies.QuantityUnitVocabulary' ) default_item_net = schema.Float( title=_( u'label_default_item_net', default=u'Default Item net price' ), required=False ) default_item_comment_enabled = schema.Bool( title=_( u'label_default_item_comment_enabled', default='Comment enabled by default' ), required=False ) default_item_comment_required = schema.Bool( title=_( u'label_default_item_comment_required', default='Comment required by default' ), required=False ) default_item_quantity_unit_float = schema.Bool( title=_( u'label_default_item_quantity_unit_float', default='Quantity as float as default' ), required=False ) default_item_cart_count_limit = schema.Float( title=_( u'label_default_item_cart_count_limit', default='Quantity limit of an item in the cart.' ), required=False ) default_item_stock_warning_threshold = schema.Float( title=_( u'label_default_item_stock_warning_threshold', default='Item stock warning threshold.', ), description=_( 'help_default_item_stock_warning_threshold', default=u'Shop administrator will be notified if stock is less ' u'than the specified threshold.' ), required=False ) @provider(IShopSettingsProvider) class IShopShippingSettings(model.Schema): """Shop controlpanel schema for article settings. """ model.fieldset( 'shipping', label=_(u'Shipping', default=u'Shipping'), fields=[ 'default_shipping_item_shippable', 'available_shipping_methods', 'shipping_method', 'shipping_vat', 'shipping_limit_from_gross', 'free_shipping_limit', 'flat_shipping_cost', 'item_shipping_cost', ], ) default_shipping_item_shippable = schema.Bool( title=_( u'label_default_shipping_item_shippable', default=u'Item Shippable by default'), description=_('help_default_shipping_item_shippable', default=u'Flag whether item is shippable by default, ' u'i.e. downloads are not') ) available_shipping_methods = schema.List( title=_(u"label_available_shipping_methods", default=u"Available Shipping Methods"), description=_(u"help_available_shipping_methods", default=u"Available shipping methods in checkout"), required=True, min_length=1, value_type=schema.Choice( vocabulary='bda.plone.shop.vocabularies.' 'AvailableShippingMethodsVocabulary' ) ) shipping_method = schema.Choice( title=_(u"label_shipping_method", default=u"Shipping Method"), description=_(u"help_shipping_method", default=u"Default shipping method in checkout"), vocabulary='bda.plone.shop.vocabularies.' 'ShippingMethodsVocabulary' ) shipping_vat = schema.Choice( title=_(u"label_shipping_vat", default=u'Shipping VAT'), description=_( u"help_shipping_vat", default=u"VAT used to calculate shipping costs" ), vocabulary='bda.plone.shop.vocabularies.VatVocabulary' ) # default shipping related settings free_shipping_limit = schema.Float( title=_(u"label_free_shipping_limit", default=u"Free Shipping Limit"), description=_(u"help_free_shipping_limit", default=u"Do not add shipping costs to orders " u"with price bigger than limit. If limit " u"applies to gross or net purchase price " u"depends on 'Calculate shipping limit from " u"gross' setting"), required=True, default=200.0 ) shipping_limit_from_gross = schema.Bool( title=_(u"label_shipping_limit_from_gross", default=u"Calculate shipping limit from gross"), description=_(u"help_shipping_limit_from_gross", default=u"If set to False, shipping limit gets " u"calculated from net price instead of gross.") ) flat_shipping_cost = schema.Float( title=_(u"label_flat_shipping_cost", default=u"Flat shipping cost"), description=_(u"help_flat_shipping_cost", default=u"Net flat shipping cost"), required=True, default=10.0 ) item_shipping_cost = schema.Float( title=_(u"label_item_shipping_cost", default=u"Item shipping cost"), description=_(u"help_item_shipping_cost", default=u"Net shipping cost per item in cart. If flat " u"shipping cost set and item shipping cost " u"below flat shipping cost, flat shipping cost " u"is used"), required=True, default=0.0 ) @provider(IShopSettingsProvider) class IShopTaxSettings(model.Schema): """Shop controlpanel schema for tax settings. """ model.fieldset( 'tax', label=_(u'Tax Settings'), fields=[ 'vat', 'default_item_vat', ], ) widget('vat', CheckBoxFieldWidget) vat = schema.List( title=_(u"label_vat", default=u'VAT in %'), description=_( u"help_vat", default=u"Specify all allowed vat settings" ), required=True, missing_value=set(), value_type=schema.Choice( vocabulary='bda.plone.shop.vocabularies.AvailableVatVocabulary' ) ) default_item_vat = schema.Choice( title=_(u"label_default_vat", default=u'Default Value added tax name'), description=_( u"help_default_vat", default=u"Specify default vat name" ), vocabulary='bda.plone.shop.vocabularies.VatVocabulary' ) class ILanguageAwareTextRow(model.Schema): lang = schema.Choice( title=_(u'language', default=u'Language'), vocabulary='plone.app.vocabularies.SupportedContentLanguages', required=False ) text = schema.Text( title=_(u'text', default=u'Text'), required=False ) @provider(IShopSettingsProvider) class INotificationTextSettings( model.Schema, IGlobalNotificationText, IItemNotificationText ): model.fieldset( 'notifications', label=_(u'Notifications', default=u'Notifications'), fields=[ 'global_order_text', 'global_overbook_text', 'order_text', 'overbook_text', ], ) widget('order_text', DataGridFieldFactory) order_text = schema.List( title=_( u"label_site_item_notification_text", default=u"Default notification text for items in order " u"confirmation mail" ), value_type=DictRow( title=_(u'order_text', default='Order Text'), schema=ILanguageAwareTextRow), required=False ) widget('overbook_text', DataGridFieldFactory) overbook_text = schema.List( title=_( u"label_site_item_overbook_notification_text", default=u"Default notification text for items in order " u"confirmation mail if item out of stock." ), value_type=DictRow( title=_(u'overbook_text', default='Overbook Text'), schema=ILanguageAwareTextRow), required=False ) widget('global_order_text', DataGridFieldFactory) global_order_text = schema.List( title=_( u"label_site_global_notification_text", default=u"Overall notification text for order confirmation mail" ), value_type=DictRow( title=_(u'order_text', default='Order Text'), schema=ILanguageAwareTextRow), required=False ) widget('global_overbook_text', DataGridFieldFactory) global_overbook_text = schema.List( title=_( u"label_site_global_overbook_notification_text", default=u"Overall notification text for order confirmation mail " u"if order contains items out of stock" ), value_type=DictRow( title=_(u'overbook_text', default='Overbook Text'), schema=ILanguageAwareTextRow), required=False ) class ILanguageAndPaymentAwareTextRow(model.Schema): payment = schema.Choice( title=_(u'payment', default=u'Payment'), vocabulary='bda.plone.shop.vocabularies.PaymentMethodsVocabulary', required=False ) lang = schema.Choice( title=_(u'language', default=u'Language'), vocabulary='plone.app.vocabularies.SupportedContentLanguages', required=False ) text = schema.Text( title=_(u'text', default=u'Text'), required=False ) @provider(IShopSettingsProvider) class IPaymentTextSettings(model.Schema): # XXX: rename to IPaymentSettings model.fieldset( 'payment', label=_(u'Payment', default=u'Payment'), fields=[ 'available_payment_methods', 'payment_method', 'skip_payment_if_order_contains_reservations', 'payment_text', 'cash_on_delivery_costs', 'percent_surcharge', 'fixed_surcharge', 'surchargeable_payment_methods', 'surcharge_vat', ], ) available_payment_methods = schema.List( title=_(u"label_available_payment_methods", default=u"Available Payment Methods"), description=_(u"help_available_payment_methods", default=u"Available payment methods in checkout"), required=True, min_length=1, value_type=schema.Choice( vocabulary='bda.plone.shop.vocabularies.' 'AvailablePaymentMethodsVocabulary' ) ) payment_method = schema.Choice( title=_(u"label_payment_method", default=u"Payment Method"), description=_(u"help_payment_method", default=u"Default payment method in checkout"), vocabulary='bda.plone.shop.vocabularies.' 'PaymentMethodsVocabulary' ) skip_payment_if_order_contains_reservations = schema.Bool( title=_( u'label_skip_payment_if_order_contains_reservations', default=u'Skip Payment if order contains reservations' ), required=False ) widget('payment_text', DataGridFieldFactory) payment_text = schema.List( title=_( u"label_payment_text", default=u"Payment Texts" ), value_type=DictRow( title=_(u'payment_text', default='Payment Text'), schema=ILanguageAndPaymentAwareTextRow), required=False ) cash_on_delivery_costs = schema.Float( title=_( u'label_cash_on_delivery_costs', default=u'Cash on delivery costs in gross' ), required=False ) fixed_surcharge = schema.Float( title=_( u'fixed_surcharge', default=u'Fixed surcharge to be added to selected payment methods' ), required=False ) percent_surcharge = schema.Float( title=_( u'percent_surcharge', default=u'Percentage surcharge to be added to selected payment ' u'methods' ), required=False ) surchargeable_payment_methods = schema.List( title=_(u"label_surchargeable_payment_methods", default=u"Payment Methods to have surcharge added"), description=_(u"help_surchareable_payment_methods", default=u"Payment methods selected here will have the " u"fixed and percentage surcharge amounts set " u"above added to the order total and displayed " u"in the cart"), required=True, min_length=1, value_type=schema.Choice( vocabulary='bda.plone.shop.vocabularies.' 'SurchargeablePaymentMethodsVocabulary' ) ) surcharge_vat = schema.Choice( title=_(u"label_surcharge_vat", default=u'Surcharge VAT'), description=_( u"help_surcharge_vat", default=u"VAT used to calculate surcharge amount" ), vocabulary='bda.plone.shop.vocabularies.VatVocabulary' )
#MenuTitle: Stitcher # -*- coding: utf-8 -*- __doc__=""" Turn your paths into dotted lines, and specify a component as dot, i.e. stitch components onto paths in selected glyphs. Respects origin anchor in your source component. """ from GlyphsApp import MOVE import math, vanilla, traceback def deleteAllComponents( thisLayer ): try: # print "-- Deleting %i existing components." % ( len(thisLayer.components) ) #DEBUG while len(thisLayer.components) > 0: # print " Deleting component", thisLayer.components[0].componentName del thisLayer.components[0] return True except Exception as e: print traceback.format_exc() return False def bezier( A, B, C, D, t ): x1, y1 = A.x, A.y x2, y2 = B.x, B.y x3, y3 = C.x, C.y x4, y4 = D.x, D.y x = x1*(1-t)**3 + x2*3*t*(1-t)**2 + x3*3*t**2*(1-t) + x4*t**3 y = y1*(1-t)**3 + y2*3*t*(1-t)**2 + y3*3*t**2*(1-t) + y4*t**3 return x, y def distance( node1, node2 ): return math.hypot( node1.x - node2.x, node1.y - node2.y ) def getFineGrainPointsForPath( thisPath, distanceBetweenDots ): try: layerCoords = [ ] pathSegments = thisPath.segments # fix for new way open paths are stored (including MOVE and LINE segments) try: if thisPath.closed == False and thisPath.segments[0][0].type == MOVE: pathSegments = thisPath.segments[2:] except: # seems not to work/be necessary anymore...? pass for thisSegment in pathSegments: if len( thisSegment ) == 2: # straight line: beginPoint = thisSegment[0].pointValue() endPoint = thisSegment[1].pointValue() dotsPerSegment = int( ( distance( beginPoint, endPoint ) / distanceBetweenDots ) * 11 ) for i in range( dotsPerSegment ): x = float( endPoint.x * i ) / dotsPerSegment + float( beginPoint.x * ( dotsPerSegment-i ) ) / dotsPerSegment y = float( endPoint.y * i ) / dotsPerSegment + float( beginPoint.y * ( dotsPerSegment-i ) ) / dotsPerSegment layerCoords += [ NSPoint( x, y ) ] elif len( thisSegment ) == 4: # curved segment: bezierPointA = thisSegment[0].pointValue() bezierPointB = thisSegment[1].pointValue() bezierPointC = thisSegment[2].pointValue() bezierPointD = thisSegment[3].pointValue() bezierLength = distance( bezierPointA, bezierPointB ) + distance( bezierPointB, bezierPointC ) + distance( bezierPointC, bezierPointD ) # very rough approximation, up to 11% too long dotsPerSegment = int( ( bezierLength / distanceBetweenDots ) * 10 ) for i in range( 1, dotsPerSegment ): t = float( i ) / float( dotsPerSegment ) x, y = bezier( bezierPointA, bezierPointB, bezierPointC, bezierPointD, t ) layerCoords += [ NSPoint( x, y ) ] layerCoords += [ NSPoint( bezierPointD.x, bezierPointD.y ) ] return layerCoords except Exception as e: print traceback.format_exc() def dotCoordsOnPath( thisPath, distanceBetweenDots ): try: dotPoints = [ thisPath.nodes[0] ] fineGrainPoints = getFineGrainPointsForPath( thisPath, distanceBetweenDots ) myLastPoint = dotPoints[-1] for thisPoint in fineGrainPoints: if distance( myLastPoint, thisPoint ) >= distanceBetweenDots: dotPoints += [thisPoint] myLastPoint = thisPoint # print "-- Placed %s at %s." % ( componentName, str(thisPoint) ) # DEBUG else: pass return dotPoints except Exception as e: print traceback.format_exc() def placeDots( thisLayer, useBackground, componentName, distanceBetweenDots ): try: # find out component offset: xOffset = 0.0 yOffset = 0.0 Font = thisLayer.parent.parent FontMasterID = thisLayer.associatedMasterId sourceComponent = Font.glyphs[ componentName ] if sourceComponent: try: sourceAnchor = sourceComponent.layers[thisLayer.associatedMasterId].anchors["origin"] xOffset, yOffset = -sourceAnchor.position.x, -sourceAnchor.position.y except: pass #print "-- Note: no origin anchor in '%s'." % ( componentName ) # use background if specified: if useBackground: sourceLayer = thisLayer.background else: sourceLayer = thisLayer for thisPath in sourceLayer.paths: for thisPoint in dotCoordsOnPath( thisPath, distanceBetweenDots ): newComp = GSComponent( componentName, NSPoint( thisPoint.x + xOffset, thisPoint.y + yOffset ) ) thisLayer.addComponent_( newComp ) return True else: return False except Exception as e: print traceback.format_exc() return False def minimumOfOne( value ): try: returnValue = float( value ) if returnValue < 1.0: returnValue = 1.0 except: returnValue = 1.0 return returnValue def process( thisLayer, deleteComponents, componentName, distanceBetweenDots, useBackground ): try: if deleteComponents: if not deleteAllComponents( thisLayer ): print "-- Error deleting previously placed components." if useBackground and len( thisLayer.paths ) > 0: if thisLayer.className() == "GSBackgroundLayer": thisLayer = thisLayer.foreground() thisLayer.background.clear() for thisPath in thisLayer.paths: thisLayer.background.paths.append( thisPath.copy() ) thisLayer.paths = [] if not placeDots( thisLayer, useBackground, componentName, distanceBetweenDots ): print "-- Could not place components at intervals of %.1f units." % distanceBetweenDots except Exception as e: print traceback.format_exc() class ComponentOnLines( object ): def __init__( self ): windowHeight = 150 self.w = vanilla.FloatingWindow( (350, windowHeight), "Stitcher", minSize=(300, windowHeight), maxSize=(500, windowHeight), autosaveName="com.mekkablue.ComponentsOnNodes.mainwindow" ) self.w.text_1 = vanilla.TextBox( (15-1, 12+2, 15+95, 14), "Place component:", sizeStyle='small' ) self.w.text_2 = vanilla.TextBox( (15-1, 12+25+2, 15+95, 14), "At intervals of:", sizeStyle='small' ) self.w.componentName = vanilla.EditText( (15+100, 12-1, -15, 19), "_circle", sizeStyle='small', callback=self.SavePreferences ) self.w.sliderMin = vanilla.EditText( ( 15+100, 12+25-1, 50, 19), "30", sizeStyle='small', callback=self.SavePreferences ) self.w.sliderMax = vanilla.EditText( (-15-50, 12+25-1, -15, 19), "60", sizeStyle='small', callback=self.SavePreferences ) self.w.intervalSlider= vanilla.Slider((15+100+50+10, 12+25, -15-50-10, 19), value=0, minValue=0.0, maxValue=1.0, sizeStyle='small', callback=self.ComponentOnLinesMain ) #self.w.replaceComponents = vanilla.CheckBox((15+3, 12+25+25, -15, 19), "Replace existing components", value=True, sizeStyle='small', callback=self.SavePreferences ) self.w.liveSlider = vanilla.CheckBox((15+3, 12+25+25, -15, 19), "Live slider", value=False, sizeStyle='small' ) self.w.useBackground = vanilla.CheckBox((15+3, 12+25+25+20, -15, 19), "Keep paths in background", value=True, sizeStyle='small', callback=self.SavePreferences ) self.w.runButton = vanilla.Button((-80-15, -20-15, -15, -15), "Stitch", sizeStyle='regular', callback=self.ComponentOnLinesMain ) self.w.setDefaultButton( self.w.runButton ) try: self.LoadPreferences() except: pass self.w.open() self.w.makeKey() def SavePreferences( self, sender ): try: Glyphs.defaults["com.mekkablue.ComponentOnLines.componentName"] = self.w.componentName.get() Glyphs.defaults["com.mekkablue.ComponentOnLines.sliderMin"] = self.w.sliderMin.get() Glyphs.defaults["com.mekkablue.ComponentOnLines.sliderMax"] = self.w.sliderMax.get() Glyphs.defaults["com.mekkablue.ComponentOnLines.intervalSlider"] = self.w.intervalSlider.get() Glyphs.defaults["com.mekkablue.ComponentOnLines.liveSlider"] = self.w.liveSlider.get() #Glyphs.defaults["com.mekkablue.ComponentOnLines.replaceComponents"] = self.w.replaceComponents.get() Glyphs.defaults["com.mekkablue.ComponentOnLines.useBackground"] = self.w.useBackground.get() except: print traceback.format_exc() return False return True def LoadPreferences( self ): try: NSUserDefaults.standardUserDefaults().registerDefaults_( { "com.mekkablue.ComponentOnLines.componentName": "_circle", "com.mekkablue.ComponentOnLines.sliderMin": "30", "com.mekkablue.ComponentOnLines.sliderMin": "60" } ) self.w.componentName.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.componentName"] ) self.w.sliderMin.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.sliderMin"] ) self.w.sliderMax.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.sliderMax"] ) self.w.intervalSlider.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.intervalSlider"] ) self.w.liveSlider.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.liveSlider"] ) #self.w.replaceComponents.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.replaceComponents"] ) self.w.useBackground.set( Glyphs.defaults["com.mekkablue.ComponentOnLines.useBackground"] ) except: print traceback.format_exc() return False return True def ComponentOnLinesMain( self, sender ): try: if ( bool(self.w.liveSlider.get()) and sender == self.w.intervalSlider ) or sender != self.w.intervalSlider: Font = Glyphs.font FontMaster = Font.selectedFontMaster selectedLayers = Font.selectedLayers # deleteComponents = bool( self.w.replaceComponents.get() ) deleteComponents = True componentName = self.w.componentName.get() sliderMin = minimumOfOne( self.w.sliderMin.get() ) sliderMax = minimumOfOne( self.w.sliderMax.get() ) sliderPos = float( self.w.intervalSlider.get() ) distanceBetweenDots = sliderMin * ( 1.0 - sliderPos ) + sliderMax * sliderPos useBackground = bool( self.w.useBackground.get() ) Font.disableUpdateInterface() for thisLayer in selectedLayers: thisGlyph = thisLayer.parent # print "Processing", thisGlyph.name thisGlyph.beginUndo() process( thisLayer, deleteComponents, componentName, distanceBetweenDots, useBackground ) thisGlyph.endUndo() Font.enableUpdateInterface() if not self.SavePreferences( self ): print "Note: could not write preferences." # self.w.close() except: print traceback.format_exc() ComponentOnLines()
#!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2018 The Machinecoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Machinecoin test framework primitive and message structures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in machinecoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization.""" from codecs import encode import copy import hashlib from io import BytesIO import random import socket import struct import time from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, bytes_to_hex_str MIN_VERSION_SUPPORTED = 60001 MY_VERSION = 70014 # past bip-31 for ping/pong MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MAX_INV_SZ = 50000 MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 COIN = 100000000 # 1 mac in satoshis BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out NODE_NETWORK = (1 << 0) # NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) NODE_NETWORK_LIMITED = (1 << 10) MSG_TX = 1 MSG_BLOCK = 2 MSG_WITNESS_FLAG = 1 << 30 MSG_TYPE_MASK = 0xffffffff >> 2 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack("<BH", 253, l) elif l < 0x100000000: r = struct.pack("<BI", 254, l) else: r = struct.pack("<BQ", 255, l) return r def deser_compact_size(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] return nit def deser_string(f): nit = deser_compact_size(f) return f.read(nit) def ser_string(s): return ser_compact_size(len(s)) + s def deser_uint256(f): r = 0 for i in range(8): t = struct.unpack("<I", f.read(4))[0] r += t << (i * 32) return r def ser_uint256(u): rs = b"" for i in range(8): rs += struct.pack("<I", u & 0xFFFFFFFF) u >>= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("<IIIIIIII", s[:32]) for i in range(8): r += t[i] << (i * 32) return r def uint256_from_compact(c): nbytes = (c >> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector (we use this for serializing the vector of transactions # for a witness block). def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r # Deserialize from a hex string representation (eg from RPC) def FromHex(obj, hex_string): obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj # Convert a binary-serializable object to hex (eg for submission via RPC) def ToHex(obj): return bytes_to_hex_str(obj.serialize()) # Objects that map to machinecoind objects, which can be serialized/deserialized class CAddress(): def __init__(self): self.time = 0 self.nServices = 1 self.pchReserved = b"\x00" * 10 + b"\xff" * 2 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f, with_time=True): if with_time: self.time = struct.unpack("<i", f.read(4))[0] self.nServices = struct.unpack("<Q", f.read(8))[0] self.pchReserved = f.read(12) self.ip = socket.inet_ntoa(f.read(4)) self.port = struct.unpack(">H", f.read(2))[0] def serialize(self, with_time=True): r = b"" if with_time: r += struct.pack("<i", self.time) r += struct.pack("<Q", self.nServices) r += self.pchReserved r += socket.inet_aton(self.ip) r += struct.pack(">H", self.port) return r def __repr__(self): return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port) class CInv(): typemap = { 0: "Error", 1: "TX", 2: "Block", 1|MSG_WITNESS_FLAG: "WitnessTx", 2|MSG_WITNESS_FLAG : "WitnessBlock", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack("<i", f.read(4))[0] self.hash = deser_uint256(f) def serialize(self): r = b"" r += struct.pack("<i", self.type) r += ser_uint256(self.hash) return r def __repr__(self): return "CInv(type=%s hash=%064x)" \ % (self.typemap[self.type], self.hash) class CBlockLocator(): def __init__(self): self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vHave = deser_uint256_vector(f) def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256_vector(self.vHave) return r def __repr__(self): return "CBlockLocator(nVersion=%i vHave=%s)" \ % (self.nVersion, repr(self.vHave)) class COutPoint(): def __init__(self, hash=0, n=0): self.hash = hash self.n = n def deserialize(self, f): self.hash = deser_uint256(f) self.n = struct.unpack("<I", f.read(4))[0] def serialize(self): r = b"" r += ser_uint256(self.hash) r += struct.pack("<I", self.n) return r def __repr__(self): return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) class CTxIn(): def __init__(self, outpoint=None, scriptSig=b"", nSequence=0): if outpoint is None: self.prevout = COutPoint() else: self.prevout = outpoint self.scriptSig = scriptSig self.nSequence = nSequence def deserialize(self, f): self.prevout = COutPoint() self.prevout.deserialize(f) self.scriptSig = deser_string(f) self.nSequence = struct.unpack("<I", f.read(4))[0] def serialize(self): r = b"" r += self.prevout.serialize() r += ser_string(self.scriptSig) r += struct.pack("<I", self.nSequence) return r def __repr__(self): return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \ % (repr(self.prevout), bytes_to_hex_str(self.scriptSig), self.nSequence) class CTxOut(): def __init__(self, nValue=0, scriptPubKey=b""): self.nValue = nValue self.scriptPubKey = scriptPubKey def deserialize(self, f): self.nValue = struct.unpack("<q", f.read(8))[0] self.scriptPubKey = deser_string(f) def serialize(self): r = b"" r += struct.pack("<q", self.nValue) r += ser_string(self.scriptPubKey) return r def __repr__(self): return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \ % (self.nValue // COIN, self.nValue % COIN, bytes_to_hex_str(self.scriptPubKey)) class CScriptWitness(): def __init__(self): # stack is a vector of strings self.stack = [] def __repr__(self): return "CScriptWitness(%s)" % \ (",".join([bytes_to_hex_str(x) for x in self.stack])) def is_null(self): if self.stack: return False return True class CTxInWitness(): def __init__(self): self.scriptWitness = CScriptWitness() def deserialize(self, f): self.scriptWitness.stack = deser_string_vector(f) def serialize(self): return ser_string_vector(self.scriptWitness.stack) def __repr__(self): return repr(self.scriptWitness) def is_null(self): return self.scriptWitness.is_null() class CTxWitness(): def __init__(self): self.vtxinwit = [] def deserialize(self, f): for i in range(len(self.vtxinwit)): self.vtxinwit[i].deserialize(f) def serialize(self): r = b"" # This is different than the usual vector serialization -- # we omit the length of the vector, which is required to be # the same length as the transaction's vin vector. for x in self.vtxinwit: r += x.serialize() return r def __repr__(self): return "CTxWitness(%s)" % \ (';'.join([repr(x) for x in self.vtxinwit])) def is_null(self): for x in self.vtxinwit: if not x.is_null(): return False return True class CTransaction(): def __init__(self, tx=None): if tx is None: self.nVersion = 1 self.vin = [] self.vout = [] self.wit = CTxWitness() self.nLockTime = 0 self.sha256 = None self.hash = None else: self.nVersion = tx.nVersion self.vin = copy.deepcopy(tx.vin) self.vout = copy.deepcopy(tx.vout) self.nLockTime = tx.nLockTime self.sha256 = tx.sha256 self.hash = tx.hash self.wit = copy.deepcopy(tx.wit) def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vin = deser_vector(f, CTxIn) flags = 0 if len(self.vin) == 0: flags = struct.unpack("<B", f.read(1))[0] # Not sure why flags can't be zero, but this # matches the implementation in machinecoind if (flags != 0): self.vin = deser_vector(f, CTxIn) self.vout = deser_vector(f, CTxOut) else: self.vout = deser_vector(f, CTxOut) if flags != 0: self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] self.wit.deserialize(f) self.nLockTime = struct.unpack("<I", f.read(4))[0] self.sha256 = None self.hash = None def serialize_without_witness(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_vector(self.vin) r += ser_vector(self.vout) r += struct.pack("<I", self.nLockTime) return r # Only serialize with witness when explicitly called for def serialize_with_witness(self): flags = 0 if not self.wit.is_null(): flags |= 1 r = b"" r += struct.pack("<i", self.nVersion) if flags: dummy = [] r += ser_vector(dummy) r += struct.pack("<B", flags) r += ser_vector(self.vin) r += ser_vector(self.vout) if flags & 1: if (len(self.wit.vtxinwit) != len(self.vin)): # vtxinwit must have the same length as vin self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)] for i in range(len(self.wit.vtxinwit), len(self.vin)): self.wit.vtxinwit.append(CTxInWitness()) r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) return r # Regular serialization is with witness -- must explicitly # call serialize_without_witness to exclude witness data. def serialize(self): return self.serialize_with_witness() # Recalculate the txid (transaction hash without witness) def rehash(self): self.sha256 = None self.calc_sha256() return self.hash # We will only cache the serialization without witness in # self.sha256 and self.hash -- those are expected to be the txid. def calc_sha256(self, with_witness=False): if with_witness: # Don't cache the result, just return it return uint256_from_str(hash256(self.serialize_with_witness())) if self.sha256 is None: self.sha256 = uint256_from_str(hash256(self.serialize_without_witness())) self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii') def is_valid(self): self.calc_sha256() for tout in self.vout: if tout.nValue < 0 or tout.nValue > 21000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) class CBlockHeader(): def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.hashPrevBlock = deser_uint256(f) self.hashMerkleRoot = deser_uint256(f) self.nTime = struct.unpack("<I", f.read(4))[0] self.nBits = struct.unpack("<I", f.read(4))[0] self.nNonce = struct.unpack("<I", f.read(4))[0] self.sha256 = None self.hash = None def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256(self.hashPrevBlock) r += ser_uint256(self.hashMerkleRoot) r += struct.pack("<I", self.nTime) r += struct.pack("<I", self.nBits) r += struct.pack("<I", self.nNonce) return r def calc_sha256(self): if self.sha256 is None: r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256(self.hashPrevBlock) r += ser_uint256(self.hashMerkleRoot) r += struct.pack("<I", self.nTime) r += struct.pack("<I", self.nBits) r += struct.pack("<I", self.nNonce) self.sha256 = uint256_from_str(hash256(r)) self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii') def rehash(self): self.sha256 = None self.calc_sha256() return self.sha256 def __repr__(self): return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce) class CBlock(CBlockHeader): def __init__(self, header=None): super(CBlock, self).__init__(header) self.vtx = [] def deserialize(self, f): super(CBlock, self).deserialize(f) self.vtx = deser_vector(f, CTransaction) def serialize(self, with_witness=False): r = b"" r += super(CBlock, self).serialize() if with_witness: r += ser_vector(self.vtx, "serialize_with_witness") else: r += ser_vector(self.vtx, "serialize_without_witness") return r # Calculate the merkle root given a vector of transaction hashes @classmethod def get_merkle_root(cls, hashes): while len(hashes) > 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i+1, len(hashes)-1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def calc_witness_merkle_root(self): # For witness root purposes, the hash of the # coinbase, with witness, is defined to be 0...0 hashes = [ser_uint256(0)] for tx in self.vtx[1:]: # Calculate the hashes with witness data hashes.append(ser_uint256(tx.calc_sha256(True))) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction(): def __init__(self, index=0, tx = None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self, with_witness=True): r = b"" r += ser_compact_size(self.index) if with_witness: r += self.tx.serialize_with_witness() else: r += self.tx.serialize_without_witness() return r def serialize_without_witness(self): return self.serialize(with_witness=False) def serialize_with_witness(self): return self.serialize(with_witness=True) def __repr__(self): return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs(): def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("<Q", f.read(8))[0] self.shortids_length = deser_compact_size(f) for i in range(self.shortids_length): # shortids are defined to be 6 bytes in the spec, so append # two zero bytes and read it in as an 8-byte number self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0]) self.prefilled_txn = deser_vector(f, PrefilledTransaction) self.prefilled_txn_length = len(self.prefilled_txn) # When using version 2 compact blocks, we must serialize with_witness. def serialize(self, with_witness=False): r = b"" r += self.header.serialize() r += struct.pack("<Q", self.nonce) r += ser_compact_size(self.shortids_length) for x in self.shortids: # We only want the first 6 bytes r += struct.pack("<Q", x)[0:6] if with_witness: r += ser_vector(self.prefilled_txn, "serialize_with_witness") else: r += ser_vector(self.prefilled_txn, "serialize_without_witness") return r def __repr__(self): return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn)) # P2P version of the above that will use witness serialization (for compact # block version 2) class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs): def serialize(self): return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True) # Calculate the BIP 152-compact blocks shortid for a given transaction hash def calculate_shortid(k0, k1, tx_hash): expected_shortid = siphash256(k0, k1, tx_hash) expected_shortid &= 0x0000ffffffffffff return expected_shortid # This version gets rid of the array lengths, and reinterprets the differential # encoding into indices that can be used for lookup. class HeaderAndShortIDs(): def __init__(self, p2pheaders_and_shortids = None): self.header = CBlockHeader() self.nonce = 0 self.shortids = [] self.prefilled_txn = [] self.use_witness = False if p2pheaders_and_shortids != None: self.header = p2pheaders_and_shortids.header self.nonce = p2pheaders_and_shortids.nonce self.shortids = p2pheaders_and_shortids.shortids last_index = -1 for x in p2pheaders_and_shortids.prefilled_txn: self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx)) last_index = self.prefilled_txn[-1].index def to_p2p(self): if self.use_witness: ret = P2PHeaderAndShortWitnessIDs() else: ret = P2PHeaderAndShortIDs() ret.header = self.header ret.nonce = self.nonce ret.shortids_length = len(self.shortids) ret.shortids = self.shortids ret.prefilled_txn_length = len(self.prefilled_txn) ret.prefilled_txn = [] last_index = -1 for x in self.prefilled_txn: ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx)) last_index = x.index return ret def get_siphash_keys(self): header_nonce = self.header.serialize() header_nonce += struct.pack("<Q", self.nonce) hash_header_nonce_as_str = sha256(header_nonce) key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0] key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0] return [ key0, key1 ] # Version 2 compact blocks use wtxid in shortids (rather than txid) def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False): self.header = CBlockHeader(block) self.nonce = nonce self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ] self.shortids = [] self.use_witness = use_witness [k0, k1] = self.get_siphash_keys() for i in range(len(block.vtx)): if i not in prefill_list: tx_hash = block.vtx[i].sha256 if use_witness: tx_hash = block.vtx[i].calc_sha256(with_witness=True) self.shortids.append(calculate_shortid(k0, k1, tx_hash)) def __repr__(self): return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn)) class BlockTransactionsRequest(): def __init__(self, blockhash=0, indexes = None): self.blockhash = blockhash self.indexes = indexes if indexes != None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) indexes_length = deser_compact_size(f) for i in range(indexes_length): self.indexes.append(deser_compact_size(f)) def serialize(self): r = b"" r += ser_uint256(self.blockhash) r += ser_compact_size(len(self.indexes)) for x in self.indexes: r += ser_compact_size(x) return r # helper to set the differentially encoded indexes from absolute ones def from_absolute(self, absolute_indexes): self.indexes = [] last_index = -1 for x in absolute_indexes: self.indexes.append(x-last_index-1) last_index = x def to_absolute(self): absolute_indexes = [] last_index = -1 for x in self.indexes: absolute_indexes.append(x+last_index+1) last_index = absolute_indexes[-1] return absolute_indexes def __repr__(self): return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes)) class BlockTransactions(): def __init__(self, blockhash=0, transactions = None): self.blockhash = blockhash self.transactions = transactions if transactions != None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) self.transactions = deser_vector(f, CTransaction) def serialize(self, with_witness=True): r = b"" r += ser_uint256(self.blockhash) if with_witness: r += ser_vector(self.transactions, "serialize_with_witness") else: r += ser_vector(self.transactions, "serialize_without_witness") return r def __repr__(self): return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions)) class CPartialMerkleTree(): def __init__(self): self.nTransactions = 0 self.vHash = [] self.vBits = [] self.fBad = False def deserialize(self, f): self.nTransactions = struct.unpack("<i", f.read(4))[0] self.vHash = deser_uint256_vector(f) vBytes = deser_string(f) self.vBits = [] for i in range(len(vBytes) * 8): self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0) def serialize(self): r = b"" r += struct.pack("<i", self.nTransactions) r += ser_uint256_vector(self.vHash) vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8)) for i in range(len(self.vBits)): vBytesArray[i // 8] |= self.vBits[i] << (i % 8) r += ser_string(bytes(vBytesArray)) return r def __repr__(self): return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits)) class CMerkleBlock(): def __init__(self): self.header = CBlockHeader() self.txn = CPartialMerkleTree() def deserialize(self, f): self.header.deserialize(f) self.txn.deserialize(f) def serialize(self): r = b"" r += self.header.serialize() r += self.txn.serialize() return r def __repr__(self): return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn)) # Objects that correspond to messages on the wire class msg_version(): command = b"version" def __init__(self): self.nVersion = MY_VERSION self.nServices = NODE_NETWORK | NODE_WITNESS self.nTime = int(time.time()) self.addrTo = CAddress() self.addrFrom = CAddress() self.nNonce = random.getrandbits(64) self.strSubVer = MY_SUBVERSION self.nStartingHeight = -1 self.nRelay = MY_RELAY def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] if self.nVersion == 10300: self.nVersion = 300 self.nServices = struct.unpack("<Q", f.read(8))[0] self.nTime = struct.unpack("<q", f.read(8))[0] self.addrTo = CAddress() self.addrTo.deserialize(f, False) if self.nVersion >= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f, False) self.nNonce = struct.unpack("<Q", f.read(8))[0] self.strSubVer = deser_string(f) else: self.addrFrom = None self.nNonce = None self.strSubVer = None self.nStartingHeight = None if self.nVersion >= 209: self.nStartingHeight = struct.unpack("<i", f.read(4))[0] else: self.nStartingHeight = None if self.nVersion >= 70001: # Relay field is optional for version 70001 onwards try: self.nRelay = struct.unpack("<b", f.read(1))[0] except: self.nRelay = 0 else: self.nRelay = 0 def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += struct.pack("<Q", self.nServices) r += struct.pack("<q", self.nTime) r += self.addrTo.serialize(False) r += self.addrFrom.serialize(False) r += struct.pack("<Q", self.nNonce) r += ser_string(self.strSubVer) r += struct.pack("<i", self.nStartingHeight) r += struct.pack("<b", self.nRelay) return r def __repr__(self): return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \ % (self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, self.strSubVer, self.nStartingHeight, self.nRelay) class msg_verack(): command = b"verack" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_verack()" class msg_addr(): command = b"addr" def __init__(self): self.addrs = [] def deserialize(self, f): self.addrs = deser_vector(f, CAddress) def serialize(self): return ser_vector(self.addrs) def __repr__(self): return "msg_addr(addrs=%s)" % (repr(self.addrs)) class msg_inv(): command = b"inv" def __init__(self, inv=None): if inv is None: self.inv = [] else: self.inv = inv def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_inv(inv=%s)" % (repr(self.inv)) class msg_getdata(): command = b"getdata" def __init__(self, inv=None): self.inv = inv if inv != None else [] def deserialize(self, f): self.inv = deser_vector(f, CInv) def serialize(self): return ser_vector(self.inv) def __repr__(self): return "msg_getdata(inv=%s)" % (repr(self.inv)) class msg_getblocks(): command = b"getblocks" def __init__(self): self.locator = CBlockLocator() self.hashstop = 0 def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = b"" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getblocks(locator=%s hashstop=%064x)" \ % (repr(self.locator), self.hashstop) class msg_tx(): command = b"tx" def __init__(self, tx=CTransaction()): self.tx = tx def deserialize(self, f): self.tx.deserialize(f) def serialize(self): return self.tx.serialize_without_witness() def __repr__(self): return "msg_tx(tx=%s)" % (repr(self.tx)) class msg_witness_tx(msg_tx): def serialize(self): return self.tx.serialize_with_witness() class msg_block(): command = b"block" def __init__(self, block=None): if block is None: self.block = CBlock() else: self.block = block def deserialize(self, f): self.block.deserialize(f) def serialize(self): return self.block.serialize(with_witness=False) def __repr__(self): return "msg_block(block=%s)" % (repr(self.block)) # for cases where a user needs tighter control over what is sent over the wire # note that the user must supply the name of the command, and the data class msg_generic(): def __init__(self, command, data=None): self.command = command self.data = data def serialize(self): return self.data def __repr__(self): return "msg_generic()" class msg_witness_block(msg_block): def serialize(self): r = self.block.serialize(with_witness=True) return r class msg_getaddr(): command = b"getaddr" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_getaddr()" class msg_ping(): command = b"ping" def __init__(self, nonce=0): self.nonce = nonce def deserialize(self, f): self.nonce = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.nonce) return r def __repr__(self): return "msg_ping(nonce=%08x)" % self.nonce class msg_pong(): command = b"pong" def __init__(self, nonce=0): self.nonce = nonce def deserialize(self, f): self.nonce = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.nonce) return r def __repr__(self): return "msg_pong(nonce=%08x)" % self.nonce class msg_mempool(): command = b"mempool" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_mempool()" class msg_sendheaders(): command = b"sendheaders" def __init__(self): pass def deserialize(self, f): pass def serialize(self): return b"" def __repr__(self): return "msg_sendheaders()" # getheaders message has # number of entries # vector of hashes # hash_stop (hash of last desired block header, 0 to get as many as possible) class msg_getheaders(): command = b"getheaders" def __init__(self): self.locator = CBlockLocator() self.hashstop = 0 def deserialize(self, f): self.locator = CBlockLocator() self.locator.deserialize(f) self.hashstop = deser_uint256(f) def serialize(self): r = b"" r += self.locator.serialize() r += ser_uint256(self.hashstop) return r def __repr__(self): return "msg_getheaders(locator=%s, stop=%064x)" \ % (repr(self.locator), self.hashstop) # headers message has # <count> <vector of block headers> class msg_headers(): command = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in machinecoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers=%s)" % repr(self.headers) class msg_reject(): command = b"reject" REJECT_MALFORMED = 1 def __init__(self): self.message = b"" self.code = 0 self.reason = b"" self.data = 0 def deserialize(self, f): self.message = deser_string(f) self.code = struct.unpack("<B", f.read(1))[0] self.reason = deser_string(f) if (self.code != self.REJECT_MALFORMED and (self.message == b"block" or self.message == b"tx")): self.data = deser_uint256(f) def serialize(self): r = ser_string(self.message) r += struct.pack("<B", self.code) r += ser_string(self.reason) if (self.code != self.REJECT_MALFORMED and (self.message == b"block" or self.message == b"tx")): r += ser_uint256(self.data) return r def __repr__(self): return "msg_reject: %s %d %s [%064x]" \ % (self.message, self.code, self.reason, self.data) class msg_feefilter(): command = b"feefilter" def __init__(self, feerate=0): self.feerate = feerate def deserialize(self, f): self.feerate = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.feerate) return r def __repr__(self): return "msg_feefilter(feerate=%08x)" % self.feerate class msg_sendcmpct(): command = b"sendcmpct" def __init__(self): self.announce = False self.version = 1 def deserialize(self, f): self.announce = struct.unpack("<?", f.read(1))[0] self.version = struct.unpack("<Q", f.read(8))[0] def serialize(self): r = b"" r += struct.pack("<?", self.announce) r += struct.pack("<Q", self.version) return r def __repr__(self): return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version) class msg_cmpctblock(): command = b"cmpctblock" def __init__(self, header_and_shortids = None): self.header_and_shortids = header_and_shortids def deserialize(self, f): self.header_and_shortids = P2PHeaderAndShortIDs() self.header_and_shortids.deserialize(f) def serialize(self): r = b"" r += self.header_and_shortids.serialize() return r def __repr__(self): return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids) class msg_getblocktxn(): command = b"getblocktxn" def __init__(self): self.block_txn_request = None def deserialize(self, f): self.block_txn_request = BlockTransactionsRequest() self.block_txn_request.deserialize(f) def serialize(self): r = b"" r += self.block_txn_request.serialize() return r def __repr__(self): return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request)) class msg_blocktxn(): command = b"blocktxn" def __init__(self): self.block_transactions = BlockTransactions() def deserialize(self, f): self.block_transactions.deserialize(f) def serialize(self): r = b"" r += self.block_transactions.serialize(with_witness=False) return r def __repr__(self): return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions)) class msg_witness_blocktxn(msg_blocktxn): def serialize(self): r = b"" r += self.block_transactions.serialize(with_witness=True) return r
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Usage: %prog [options] [<commitref>]* If no <commitref>'s are supplied, it defaults to HEAD. Calculates the generation number for one or more commits in a git repo. Generation number of a commit C with parents P is defined as: generation_number(C, []) = 0 generation_number(C, P) = max(map(generation_number, P)) + 1 This number can be used to order commits relative to each other, as long as for any pair of the commits, one is an ancestor of the other. Since calculating the generation number of a commit requires walking that commit's entire history, this script caches all calculated data inside the git repo that it operates on in the ref 'refs/number/commits'. """ import binascii import collections import logging import optparse import os import struct import sys import tempfile import git_common as git import subprocess2 CHUNK_FMT = '!20sL' CHUNK_SIZE = struct.calcsize(CHUNK_FMT) DIRTY_TREES = collections.defaultdict(int) REF = 'refs/number/commits' # Number of bytes to use for the prefix on our internal number structure. # 0 is slow to deserialize. 2 creates way too much bookeeping overhead (would # need to reimplement cache data structures to be a bit more sophisticated than # dicts. 1 seems to be just right. PREFIX_LEN = 1 # Set this to 'threads' to gather coverage data while testing. POOL_KIND = 'procs' def pathlify(hash_prefix): """Converts a binary object hash prefix into a posix path, one folder per byte. >>> pathlify('\xDE\xAD') 'de/ad' """ return '/'.join('%02x' % ord(b) for b in hash_prefix) @git.memoize_one(threadsafe=False) def get_number_tree(prefix_bytes): """Returns a dictionary of the git-number registry specified by |prefix_bytes|. This is in the form of {<full binary ref>: <gen num> ...} >>> get_number_tree('\x83\xb4') {'\x83\xb4\xe3\xe4W\xf9J*\x8f/c\x16\xecD\xd1\x04\x8b\xa9qz': 169, ...} """ ref = '%s:%s' % (REF, pathlify(prefix_bytes)) try: raw = buffer(git.run('cat-file', 'blob', ref, autostrip=False)) return dict(struct.unpack_from(CHUNK_FMT, raw, i * CHUNK_SIZE) for i in xrange(len(raw) / CHUNK_SIZE)) except subprocess2.CalledProcessError: return {} @git.memoize_one(threadsafe=False) def get_num(commit_hash): """Returns the generation number for a commit. Returns None if the generation number for this commit hasn't been calculated yet (see load_generation_numbers()). """ return get_number_tree(commit_hash[:PREFIX_LEN]).get(commit_hash) def clear_caches(on_disk=False): """Clears in-process caches for e.g. unit testing.""" get_number_tree.clear() get_num.clear() if on_disk: git.run('update-ref', '-d', REF) def intern_number_tree(tree): """Transforms a number tree (in the form returned by |get_number_tree|) into a git blob. Returns the git blob id as hex-encoded string. >>> d = {'\x83\xb4\xe3\xe4W\xf9J*\x8f/c\x16\xecD\xd1\x04\x8b\xa9qz': 169} >>> intern_number_tree(d) 'c552317aa95ca8c3f6aae3357a4be299fbcb25ce' """ with tempfile.TemporaryFile() as f: for k, v in sorted(tree.iteritems()): f.write(struct.pack(CHUNK_FMT, k, v)) f.seek(0) return git.intern_f(f) def leaf_map_fn((pre, tree)): """Converts a prefix and number tree into a git index line.""" return '100644 blob %s\t%s\0' % (intern_number_tree(tree), pathlify(pre)) def finalize(targets): """Saves all cache data to the git repository. After calculating the generation number for |targets|, call finalize() to save all the work to the git repository. This in particular saves the trees referred to by DIRTY_TREES. """ if not DIRTY_TREES: return msg = 'git-number Added %s numbers' % sum(DIRTY_TREES.itervalues()) idx = os.path.join(git.run('rev-parse', '--git-dir'), 'number.idx') env = os.environ.copy() env['GIT_INDEX_FILE'] = idx progress_message = 'Finalizing: (%%(count)d/%d)' % len(DIRTY_TREES) with git.ProgressPrinter(progress_message) as inc: git.run('read-tree', REF, env=env) prefixes_trees = ((p, get_number_tree(p)) for p in sorted(DIRTY_TREES)) updater = subprocess2.Popen(['git', 'update-index', '-z', '--index-info'], stdin=subprocess2.PIPE, env=env) with git.ScopedPool(kind=POOL_KIND) as leaf_pool: for item in leaf_pool.imap(leaf_map_fn, prefixes_trees): updater.stdin.write(item) inc() updater.stdin.close() updater.wait() assert updater.returncode == 0 tree_id = git.run('write-tree', env=env) commit_cmd = ['commit-tree', '-m', msg, '-p'] + git.hashes(REF) for t in targets: commit_cmd.extend(['-p', binascii.hexlify(t)]) commit_cmd.append(tree_id) commit_hash = git.run(*commit_cmd) git.run('update-ref', REF, commit_hash) DIRTY_TREES.clear() def preload_tree(prefix): """Returns the prefix and parsed tree object for the specified prefix.""" return prefix, get_number_tree(prefix) def all_prefixes(depth=PREFIX_LEN): for x in (chr(i) for i in xrange(255)): # This isn't covered because PREFIX_LEN currently == 1 if depth > 1: # pragma: no cover for r in all_prefixes(depth - 1): yield x + r else: yield x def load_generation_numbers(targets): """Populates the caches of get_num and get_number_tree so they contain the results for |targets|. Loads cached numbers from disk, and calculates missing numbers if one or more of |targets| is newer than the cached calculations. Args: targets - An iterable of binary-encoded full git commit hashes. """ # In case they pass us a generator, listify targets. targets = list(targets) if all(get_num(t) is not None for t in targets): return if git.tree(REF) is None: empty = git.mktree({}) commit_hash = git.run('commit-tree', '-m', 'Initial commit from git-number', empty) git.run('update-ref', REF, commit_hash) with git.ScopedPool(kind=POOL_KIND) as pool: preload_iter = pool.imap_unordered(preload_tree, all_prefixes()) rev_list = [] with git.ProgressPrinter('Loading commits: %(count)d') as inc: # Curiously, buffering the list into memory seems to be the fastest # approach in python (as opposed to iterating over the lines in the # stdout as they're produced). GIL strikes again :/ cmd = [ 'rev-list', '--topo-order', '--parents', '--reverse', '^' + REF, ] + map(binascii.hexlify, targets) for line in git.run(*cmd).splitlines(): tokens = map(binascii.unhexlify, line.split()) rev_list.append((tokens[0], tokens[1:])) inc() get_number_tree.update(preload_iter) with git.ProgressPrinter('Counting: %%(count)d/%d' % len(rev_list)) as inc: for commit_hash, pars in rev_list: num = max(map(get_num, pars)) + 1 if pars else 0 prefix = commit_hash[:PREFIX_LEN] get_number_tree(prefix)[commit_hash] = num DIRTY_TREES[prefix] += 1 get_num.set(commit_hash, num) inc() def main(): # pragma: no cover parser = optparse.OptionParser(usage=sys.modules[__name__].__doc__) parser.add_option('--no-cache', action='store_true', help='Do not actually cache anything we calculate.') parser.add_option('--reset', action='store_true', help='Reset the generation number cache and quit.') parser.add_option('-v', '--verbose', action='count', default=0, help='Be verbose. Use more times for more verbosity.') opts, args = parser.parse_args() levels = [logging.ERROR, logging.INFO, logging.DEBUG] logging.basicConfig(level=levels[min(opts.verbose, len(levels) - 1)]) try: if opts.reset: clear_caches(on_disk=True) return try: targets = git.parse_commitrefs(*(args or ['HEAD'])) except git.BadCommitRefException as e: parser.error(e) load_generation_numbers(targets) if not opts.no_cache: finalize(targets) print '\n'.join(map(str, map(get_num, targets))) return 0 except KeyboardInterrupt: return 1 if __name__ == '__main__': # pragma: no cover sys.exit(main())
########################################################################## # # Copyright 2008-2009 VMware, Inc. # All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/ """d3d9.h""" from winapi import * from d3d9types import * from d3d9caps import * D3DSHADER9 = Blob(Const(DWORD), "_shaderSize(pFunction)") D3DSPD = Flags(DWORD, [ "D3DSPD_IUNKNOWN", ]) D3DADAPTER = FakeEnum(UINT, [ "D3DADAPTER_DEFAULT", ]) D3DENUM = FakeEnum(DWORD, [ "D3DENUM_WHQL_LEVEL", ]) D3DSGR = Flags(DWORD, [ "D3DSGR_NO_CALIBRATION", "D3DSGR_CALIBRATE", ]) D3DCURSOR = Flags(DWORD, [ "D3DCURSOR_IMMEDIATE_UPDATE", ]) D3DPRESENT = Flags(DWORD, [ "D3DPRESENT_DONOTWAIT", "D3DPRESENT_LINEAR_CONTENT", "D3DPRESENT_DONOTFLIP", "D3DPRESENT_FLIPRESTART", "D3DPRESENT_VIDEO_RESTRICT_TO_MONITOR", ]) HRESULT = MAKE_HRESULT(ok = "D3D_OK", errors = [ "D3DERR_WRONGTEXTUREFORMAT", "D3DERR_UNSUPPORTEDCOLOROPERATION", "D3DERR_UNSUPPORTEDCOLORARG", "D3DERR_UNSUPPORTEDALPHAOPERATION", "D3DERR_UNSUPPORTEDALPHAARG", "D3DERR_TOOMANYOPERATIONS", "D3DERR_CONFLICTINGTEXTUREFILTER", "D3DERR_UNSUPPORTEDFACTORVALUE", "D3DERR_CONFLICTINGRENDERSTATE", "D3DERR_UNSUPPORTEDTEXTUREFILTER", "D3DERR_CONFLICTINGTEXTUREPALETTE", "D3DERR_DRIVERINTERNALERROR", "D3DERR_NOTFOUND", "D3DERR_MOREDATA", "D3DERR_DEVICELOST", "D3DERR_DEVICENOTRESET", "D3DERR_NOTAVAILABLE", "D3DERR_OUTOFVIDEOMEMORY", "D3DERR_INVALIDDEVICE", "D3DERR_INVALIDCALL", "D3DERR_DRIVERINVALIDCALL", "D3DERR_WASSTILLDRAWING", "D3DOK_NOAUTOGEN", "D3DERR_DEVICEREMOVED", "S_NOT_RESIDENT", "S_RESIDENT_IN_SHARED_MEMORY", "S_PRESENT_MODE_CHANGED", "S_PRESENT_OCCLUDED", "D3DERR_DEVICEHUNG", ]) # If we ever swizzle shared handles, it will have to be done manually SHARED_HANDLE = RAW_HANDLE # System memory textures # https://msdn.microsoft.com/en-us/library/windows/desktop/bb219800.aspx#Textures SHARED_HANDLE_SYSMEM = Polymorphic('Pool', [ ('D3DPOOL_SYSTEMMEM', Blob(Void, '_getLockSize(Format, false, Width, Height)')) ], SHARED_HANDLE, contextLess=False) IDirect3D9 = Interface("IDirect3D9", IUnknown) IDirect3DDevice9 = Interface("IDirect3DDevice9", IUnknown) IDirect3DStateBlock9 = Interface("IDirect3DStateBlock9", IUnknown) IDirect3DSwapChain9 = Interface("IDirect3DSwapChain9", IUnknown) IDirect3DResource9 = Interface("IDirect3DResource9", IUnknown) IDirect3DVertexDeclaration9 = Interface("IDirect3DVertexDeclaration9", IUnknown) IDirect3DVertexShader9 = Interface("IDirect3DVertexShader9", IUnknown) IDirect3DPixelShader9 = Interface("IDirect3DPixelShader9", IUnknown) IDirect3DBaseTexture9 = Interface("IDirect3DBaseTexture9", IDirect3DResource9) IDirect3DTexture9 = Interface("IDirect3DTexture9", IDirect3DBaseTexture9) IDirect3DVolumeTexture9 = Interface("IDirect3DVolumeTexture9", IDirect3DBaseTexture9) IDirect3DCubeTexture9 = Interface("IDirect3DCubeTexture9", IDirect3DBaseTexture9) IDirect3DVertexBuffer9 = Interface("IDirect3DVertexBuffer9", IDirect3DResource9) IDirect3DIndexBuffer9 = Interface("IDirect3DIndexBuffer9", IDirect3DResource9) IDirect3DSurface9 = Interface("IDirect3DSurface9", IDirect3DResource9) IDirect3DVolume9 = Interface("IDirect3DVolume9", IUnknown) IDirect3DQuery9 = Interface("IDirect3DQuery9", IUnknown) IDirect3D9Ex = Interface("IDirect3D9Ex", IDirect3D9) IDirect3DDevice9Ex = Interface("IDirect3DDevice9Ex", IDirect3DDevice9) IDirect3DSwapChain9Ex = Interface("IDirect3DSwapChain9Ex", IDirect3DSwapChain9) PDIRECT3D9 = ObjPointer(IDirect3D9) PDIRECT3DDEVICE9 = ObjPointer(IDirect3DDevice9) PDIRECT3DSTATEBLOCK9 = ObjPointer(IDirect3DStateBlock9) PDIRECT3DSWAPCHAIN9 = ObjPointer(IDirect3DSwapChain9) PDIRECT3DRESOURCE9 = ObjPointer(IDirect3DResource9) PDIRECT3DVERTEXDECLARATION9 = ObjPointer(IDirect3DVertexDeclaration9) PDIRECT3DVERTEXSHADER9 = ObjPointer(IDirect3DVertexShader9) PDIRECT3DPIXELSHADER9 = ObjPointer(IDirect3DPixelShader9) PDIRECT3DBASETEXTURE9 = ObjPointer(IDirect3DBaseTexture9) PDIRECT3DTEXTURE9 = ObjPointer(IDirect3DTexture9) PDIRECT3DVOLUMETEXTURE9 = ObjPointer(IDirect3DVolumeTexture9) PDIRECT3DCUBETEXTURE9 = ObjPointer(IDirect3DCubeTexture9) PDIRECT3DVERTEXBUFFER9 = ObjPointer(IDirect3DVertexBuffer9) PDIRECT3DINDEXBUFFER9 = ObjPointer(IDirect3DIndexBuffer9) PDIRECT3DSURFACE9 = ObjPointer(IDirect3DSurface9) PDIRECT3DVOLUME9 = ObjPointer(IDirect3DVolume9) PDIRECT3DQUERY9 = ObjPointer(IDirect3DQuery9) PDIRECT3D9EX = ObjPointer(IDirect3D9Ex) PDIRECT3DDEVICE9EX = ObjPointer(IDirect3DDevice9Ex) PDIRECT3DSWAPCHAIN9EX = ObjPointer(IDirect3DSwapChain9Ex) IDirect3D9.methods += [ StdMethod(HRESULT, "RegisterSoftwareDevice", [(OpaquePointer(Void), "pInitializeFunction")], sideeffects=False), StdMethod(UINT, "GetAdapterCount", [], sideeffects=False), StdMethod(HRESULT, "GetAdapterIdentifier", [(D3DADAPTER, "Adapter"), (D3DENUM, "Flags"), Out(Pointer(D3DADAPTER_IDENTIFIER9), "pIdentifier")], sideeffects=False), StdMethod(UINT, "GetAdapterModeCount", [(D3DADAPTER, "Adapter"), (D3DFORMAT, "Format")], sideeffects=False), StdMethod(HRESULT, "EnumAdapterModes", [(D3DADAPTER, "Adapter"), (D3DFORMAT, "Format"), (UINT, "Mode"), Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False), StdMethod(HRESULT, "GetAdapterDisplayMode", [(D3DADAPTER, "Adapter"), Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False), StdMethod(HRESULT, "CheckDeviceType", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DevType"), (D3DFORMAT, "AdapterFormat"), (D3DFORMAT, "BackBufferFormat"), (BOOL, "bWindowed")], sideeffects=False), StdMethod(HRESULT, "CheckDeviceFormat", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "AdapterFormat"), (D3DUSAGE, "Usage"), (D3DRESOURCETYPE, "RType"), (D3DFORMAT, "CheckFormat")], sideeffects=False), StdMethod(HRESULT, "CheckDeviceMultiSampleType", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "SurfaceFormat"), (BOOL, "Windowed"), (D3DMULTISAMPLE_TYPE, "MultiSampleType"), Out(Pointer(DWORD), "pQualityLevels")], sideeffects=False), StdMethod(HRESULT, "CheckDepthStencilMatch", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "AdapterFormat"), (D3DFORMAT, "RenderTargetFormat"), (D3DFORMAT, "DepthStencilFormat")], sideeffects=False), StdMethod(HRESULT, "CheckDeviceFormatConversion", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (D3DFORMAT, "SourceFormat"), (D3DFORMAT, "TargetFormat")], sideeffects=False), StdMethod(HRESULT, "GetDeviceCaps", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), Out(Pointer(D3DCAPS9), "pCaps")], sideeffects=False), StdMethod(HMONITOR, "GetAdapterMonitor", [(D3DADAPTER, "Adapter")], sideeffects=False), StdMethod(HRESULT, "CreateDevice", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (HWND, "hFocusWindow"), (D3DCREATE, "BehaviorFlags"), InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), Out(Pointer(PDIRECT3DDEVICE9), "ppReturnedDeviceInterface")]), ] IDirect3DDevice9.methods += [ StdMethod(HRESULT, "TestCooperativeLevel", []), StdMethod(UINT, "GetAvailableTextureMem", [], sideeffects=False), StdMethod(HRESULT, "EvictManagedResources", []), StdMethod(HRESULT, "GetDirect3D", [Out(Pointer(PDIRECT3D9), "ppD3D9")]), StdMethod(HRESULT, "GetDeviceCaps", [Out(Pointer(D3DCAPS9), "pCaps")], sideeffects=False), StdMethod(HRESULT, "GetDisplayMode", [(UINT, "iSwapChain"), Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False), StdMethod(HRESULT, "GetCreationParameters", [Out(Pointer(D3DDEVICE_CREATION_PARAMETERS), "pParameters")], sideeffects=False), StdMethod(HRESULT, "SetCursorProperties", [(UINT, "XHotSpot"), (UINT, "YHotSpot"), (PDIRECT3DSURFACE9, "pCursorBitmap")]), StdMethod(Void, "SetCursorPosition", [(Int, "X"), (Int, "Y"), (D3DCURSOR, "Flags")]), StdMethod(BOOL, "ShowCursor", [(BOOL, "bShow")]), StdMethod(HRESULT, "CreateAdditionalSwapChain", [InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), Out(Pointer(PDIRECT3DSWAPCHAIN9), "pSwapChain")]), StdMethod(HRESULT, "GetSwapChain", [(UINT, "iSwapChain"), Out(Pointer(PDIRECT3DSWAPCHAIN9), "pSwapChain")]), StdMethod(UINT, "GetNumberOfSwapChains", [], sideeffects=False), StdMethod(HRESULT, "Reset", [InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters")]), StdMethod(HRESULT, "Present", [(ConstPointer(RECT), "pSourceRect"), (ConstPointer(RECT), "pDestRect"), (HWND, "hDestWindowOverride"), (ConstPointer(RGNDATA), "pDirtyRegion")]), StdMethod(HRESULT, "GetBackBuffer", [(UINT, "iSwapChain"), (UINT, "iBackBuffer"), (D3DBACKBUFFER_TYPE, "Type"), Out(Pointer(PDIRECT3DSURFACE9), "ppBackBuffer")]), StdMethod(HRESULT, "GetRasterStatus", [(UINT, "iSwapChain"), Out(Pointer(D3DRASTER_STATUS), "pRasterStatus")], sideeffects=False), StdMethod(HRESULT, "SetDialogBoxMode", [(BOOL, "bEnableDialogs")]), StdMethod(Void, "SetGammaRamp", [(UINT, "iSwapChain"), (D3DSGR, "Flags"), (ConstPointer(D3DGAMMARAMP), "pRamp")]), StdMethod(Void, "GetGammaRamp", [(UINT, "iSwapChain"), Out(Pointer(D3DGAMMARAMP), "pRamp")], sideeffects=False), StdMethod(HRESULT, "CreateTexture", [(UINT, "Width"), (UINT, "Height"), (UINT, "Levels"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DTEXTURE9), "ppTexture"), InOut(Pointer(SHARED_HANDLE_SYSMEM), "pSharedHandle")]), StdMethod(HRESULT, "CreateVolumeTexture", [(UINT, "Width"), (UINT, "Height"), (UINT, "Depth"), (UINT, "Levels"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DVOLUMETEXTURE9), "ppVolumeTexture"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "CreateCubeTexture", [(UINT, "EdgeLength"), (UINT, "Levels"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DCUBETEXTURE9), "ppCubeTexture"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "CreateVertexBuffer", [(UINT, "Length"), (D3DUSAGE, "Usage"), (D3DFVF, "FVF"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DVERTEXBUFFER9), "ppVertexBuffer"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "CreateIndexBuffer", [(UINT, "Length"), (D3DUSAGE, "Usage"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DINDEXBUFFER9), "ppIndexBuffer"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "CreateRenderTarget", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Lockable"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "CreateDepthStencilSurface", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Discard"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "UpdateSurface", [(PDIRECT3DSURFACE9, "pSourceSurface"), (ConstPointer(RECT), "pSourceRect"), (PDIRECT3DSURFACE9, "pDestinationSurface"), (ConstPointer(POINT), "pDestPoint")]), StdMethod(HRESULT, "UpdateTexture", [(PDIRECT3DBASETEXTURE9, "pSourceTexture"), (PDIRECT3DBASETEXTURE9, "pDestinationTexture")]), StdMethod(HRESULT, "GetRenderTargetData", [(PDIRECT3DSURFACE9, "pRenderTarget"), (PDIRECT3DSURFACE9, "pDestSurface")]), StdMethod(HRESULT, "GetFrontBufferData", [(UINT, "iSwapChain"), (PDIRECT3DSURFACE9, "pDestSurface")]), StdMethod(HRESULT, "StretchRect", [(PDIRECT3DSURFACE9, "pSourceSurface"), (ConstPointer(RECT), "pSourceRect"), (PDIRECT3DSURFACE9, "pDestSurface"), (ConstPointer(RECT), "pDestRect"), (D3DTEXTUREFILTERTYPE, "Filter")]), StdMethod(HRESULT, "ColorFill", [(PDIRECT3DSURFACE9, "pSurface"), (ConstPointer(RECT), "pRect"), (D3DCOLOR, "color")]), StdMethod(HRESULT, "CreateOffscreenPlainSurface", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle")]), StdMethod(HRESULT, "SetRenderTarget", [(DWORD, "RenderTargetIndex"), (PDIRECT3DSURFACE9, "pRenderTarget")]), StdMethod(HRESULT, "GetRenderTarget", [(DWORD, "RenderTargetIndex"), Out(Pointer(PDIRECT3DSURFACE9), "ppRenderTarget")]), StdMethod(HRESULT, "SetDepthStencilSurface", [(PDIRECT3DSURFACE9, "pNewZStencil")]), StdMethod(HRESULT, "GetDepthStencilSurface", [Out(Pointer(PDIRECT3DSURFACE9), "ppZStencilSurface")]), StdMethod(HRESULT, "BeginScene", []), StdMethod(HRESULT, "EndScene", []), StdMethod(HRESULT, "Clear", [(DWORD, "Count"), (Array(Const(D3DRECT), "Count"), "pRects"), (D3DCLEAR, "Flags"), (D3DCOLOR, "Color"), (Float, "Z"), (DWORD, "Stencil")]), StdMethod(HRESULT, "SetTransform", [(D3DTRANSFORMSTATETYPE, "State"), (ConstPointer(D3DMATRIX), "pMatrix")]), StdMethod(HRESULT, "GetTransform", [(D3DTRANSFORMSTATETYPE, "State"), Out(Pointer(D3DMATRIX), "pMatrix")], sideeffects=False), StdMethod(HRESULT, "MultiplyTransform", [(D3DTRANSFORMSTATETYPE, "State"), (ConstPointer(D3DMATRIX), "pMatrix")]), StdMethod(HRESULT, "SetViewport", [(ConstPointer(D3DVIEWPORT9), "pViewport")]), StdMethod(HRESULT, "GetViewport", [Out(Pointer(D3DVIEWPORT9), "pViewport")], sideeffects=False), StdMethod(HRESULT, "SetMaterial", [(ConstPointer(D3DMATERIAL9), "pMaterial")]), StdMethod(HRESULT, "GetMaterial", [Out(Pointer(D3DMATERIAL9), "pMaterial")], sideeffects=False), StdMethod(HRESULT, "SetLight", [(DWORD, "Index"), (ConstPointer(D3DLIGHT9), "pLight")]), StdMethod(HRESULT, "GetLight", [(DWORD, "Index"), Out(Pointer(D3DLIGHT9), "pLight")], sideeffects=False), StdMethod(HRESULT, "LightEnable", [(DWORD, "Index"), (BOOL, "Enable")]), StdMethod(HRESULT, "GetLightEnable", [(DWORD, "Index"), Out(Pointer(BOOL), "pEnable")], sideeffects=False), StdMethod(HRESULT, "SetClipPlane", [(DWORD, "Index"), (Array(Const(Float), 4), "pPlane")]), StdMethod(HRESULT, "GetClipPlane", [(DWORD, "Index"), Out(Array(Float, 4), "pPlane")], sideeffects=False), StdMethod(HRESULT, "SetRenderState", [(D3DRENDERSTATETYPE, "State"), (D3DRENDERSTATEVALUE, "Value")]), StdMethod(HRESULT, "GetRenderState", [(D3DRENDERSTATETYPE, "State"), Out(Pointer(D3DRENDERSTATEVALUE), "pValue")], sideeffects=False), StdMethod(HRESULT, "CreateStateBlock", [(D3DSTATEBLOCKTYPE, "Type"), Out(Pointer(PDIRECT3DSTATEBLOCK9), "ppSB")]), StdMethod(HRESULT, "BeginStateBlock", []), StdMethod(HRESULT, "EndStateBlock", [Out(Pointer(PDIRECT3DSTATEBLOCK9), "ppSB")]), StdMethod(HRESULT, "SetClipStatus", [(ConstPointer(D3DCLIPSTATUS9), "pClipStatus")]), StdMethod(HRESULT, "GetClipStatus", [Out(Pointer(D3DCLIPSTATUS9), "pClipStatus")], sideeffects=False), StdMethod(HRESULT, "GetTexture", [(DWORD, "Stage"), Out(Pointer(PDIRECT3DBASETEXTURE9), "ppTexture")]), StdMethod(HRESULT, "SetTexture", [(DWORD, "Stage"), (PDIRECT3DBASETEXTURE9, "pTexture")]), StdMethod(HRESULT, "GetTextureStageState", [(DWORD, "Stage"), (D3DTEXTURESTAGESTATETYPE, "Type"), Out(Pointer(D3DTEXTURESTAGESTATEVALUE), "pValue")], sideeffects=False), StdMethod(HRESULT, "SetTextureStageState", [(DWORD, "Stage"), (D3DTEXTURESTAGESTATETYPE, "Type"), (D3DTEXTURESTAGESTATEVALUE, "Value")]), StdMethod(HRESULT, "GetSamplerState", [(DWORD, "Sampler"), (D3DSAMPLERSTATETYPE, "Type"), Out(Pointer(D3DSAMPLERSTATEVALUE), "pValue")], sideeffects=False), StdMethod(HRESULT, "SetSamplerState", [(DWORD, "Sampler"), (D3DSAMPLERSTATETYPE, "Type"), (D3DSAMPLERSTATEVALUE, "Value")]), StdMethod(HRESULT, "ValidateDevice", [Out(Pointer(DWORD), "pNumPasses")]), StdMethod(HRESULT, "SetPaletteEntries", [(UINT, "PaletteNumber"), (Array(Const(PALETTEENTRY), 256), "pEntries")]), StdMethod(HRESULT, "GetPaletteEntries", [(UINT, "PaletteNumber"), Out(Array(PALETTEENTRY, 256), "pEntries")], sideeffects=False), StdMethod(HRESULT, "SetCurrentTexturePalette", [(UINT, "PaletteNumber")]), StdMethod(HRESULT, "GetCurrentTexturePalette", [Out(Pointer(UINT), "PaletteNumber")], sideeffects=False), StdMethod(HRESULT, "SetScissorRect", [(ConstPointer(RECT), "pRect")]), StdMethod(HRESULT, "GetScissorRect", [Out(Pointer(RECT), "pRect")]), StdMethod(HRESULT, "SetSoftwareVertexProcessing", [(BOOL, "bSoftware")]), StdMethod(BOOL, "GetSoftwareVertexProcessing", [], sideeffects=False), StdMethod(HRESULT, "SetNPatchMode", [(Float, "nSegments")]), StdMethod(Float, "GetNPatchMode", [], sideeffects=False), StdMethod(HRESULT, "DrawPrimitive", [(D3DPRIMITIVETYPE, "PrimitiveType"), (UINT, "StartVertex"), (UINT, "PrimitiveCount")]), StdMethod(HRESULT, "DrawIndexedPrimitive", [(D3DPRIMITIVETYPE, "PrimitiveType"), (INT, "BaseVertexIndex"), (UINT, "MinVertexIndex"), (UINT, "NumVertices"), (UINT, "startIndex"), (UINT, "primCount")]), StdMethod(HRESULT, "DrawPrimitiveUP", [(D3DPRIMITIVETYPE, "PrimitiveType"), (UINT, "PrimitiveCount"), (Blob(Const(Void), "_vertexDataSize(PrimitiveType, PrimitiveCount, VertexStreamZeroStride)"), "pVertexStreamZeroData"), (UINT, "VertexStreamZeroStride")]), StdMethod(HRESULT, "DrawIndexedPrimitiveUP", [(D3DPRIMITIVETYPE, "PrimitiveType"), (UINT, "MinVertexIndex"), (UINT, "NumVertices"), (UINT, "PrimitiveCount"), (Blob(Const(Void), "_indexDataSize(PrimitiveType, PrimitiveCount, IndexDataFormat)"), "pIndexData"), (D3DFORMAT, "IndexDataFormat"), (Blob(Const(Void), "NumVertices*VertexStreamZeroStride"), "pVertexStreamZeroData"), (UINT, "VertexStreamZeroStride")]), StdMethod(HRESULT, "ProcessVertices", [(UINT, "SrcStartIndex"), (UINT, "DestIndex"), (UINT, "VertexCount"), (PDIRECT3DVERTEXBUFFER9, "pDestBuffer"), (PDIRECT3DVERTEXDECLARATION9, "pVertexDecl"), (D3DPV, "Flags")]), StdMethod(HRESULT, "CreateVertexDeclaration", [(Array(Const(D3DVERTEXELEMENT9), "_declCount(pVertexElements)"), "pVertexElements"), Out(Pointer(PDIRECT3DVERTEXDECLARATION9), "ppDecl")]), StdMethod(HRESULT, "SetVertexDeclaration", [(PDIRECT3DVERTEXDECLARATION9, "pDecl")]), StdMethod(HRESULT, "GetVertexDeclaration", [Out(Pointer(PDIRECT3DVERTEXDECLARATION9), "ppDecl")]), StdMethod(HRESULT, "SetFVF", [(D3DFVF, "FVF")]), StdMethod(HRESULT, "GetFVF", [Out(Pointer(D3DFVF), "pFVF")], sideeffects=False), StdMethod(HRESULT, "CreateVertexShader", [(D3DSHADER9, "pFunction"), Out(Pointer(PDIRECT3DVERTEXSHADER9), "ppShader")]), StdMethod(HRESULT, "SetVertexShader", [(PDIRECT3DVERTEXSHADER9, "pShader")]), StdMethod(HRESULT, "GetVertexShader", [Out(Pointer(PDIRECT3DVERTEXSHADER9), "ppShader")]), StdMethod(HRESULT, "SetVertexShaderConstantF", [(UINT, "StartRegister"), (Array(Const(Float), "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")]), StdMethod(HRESULT, "GetVertexShaderConstantF", [(UINT, "StartRegister"), Out(Array(Float, "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")], sideeffects=False), StdMethod(HRESULT, "SetVertexShaderConstantI", [(UINT, "StartRegister"), (Array(Const(Int), "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")]), StdMethod(HRESULT, "GetVertexShaderConstantI", [(UINT, "StartRegister"), Out(Array(Int, "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")], sideeffects=False), StdMethod(HRESULT, "SetVertexShaderConstantB", [(UINT, "StartRegister"), (Array(Const(BOOL), "BoolCount"), "pConstantData"), (UINT, "BoolCount")]), StdMethod(HRESULT, "GetVertexShaderConstantB", [(UINT, "StartRegister"), Out(Array(BOOL, "BoolCount"), "pConstantData"), (UINT, "BoolCount")], sideeffects=False), StdMethod(HRESULT, "SetStreamSource", [(UINT, "StreamNumber"), (PDIRECT3DVERTEXBUFFER9, "pStreamData"), (UINT, "OffsetInBytes"), (UINT, "Stride")]), StdMethod(HRESULT, "GetStreamSource", [(UINT, "StreamNumber"), Out(Pointer(PDIRECT3DVERTEXBUFFER9), "ppStreamData"), Out(Pointer(UINT), "pOffsetInBytes"), Out(Pointer(UINT), "pStride")]), StdMethod(HRESULT, "SetStreamSourceFreq", [(UINT, "StreamNumber"), (UINT, "Setting")]), StdMethod(HRESULT, "GetStreamSourceFreq", [(UINT, "StreamNumber"), Out(Pointer(UINT), "pSetting")], sideeffects=False), StdMethod(HRESULT, "SetIndices", [(PDIRECT3DINDEXBUFFER9, "pIndexData")]), StdMethod(HRESULT, "GetIndices", [Out(Pointer(PDIRECT3DINDEXBUFFER9), "ppIndexData")]), StdMethod(HRESULT, "CreatePixelShader", [(D3DSHADER9, "pFunction"), Out(Pointer(PDIRECT3DPIXELSHADER9), "ppShader")]), StdMethod(HRESULT, "SetPixelShader", [(PDIRECT3DPIXELSHADER9, "pShader")]), StdMethod(HRESULT, "GetPixelShader", [Out(Pointer(PDIRECT3DPIXELSHADER9), "ppShader")]), StdMethod(HRESULT, "SetPixelShaderConstantF", [(UINT, "StartRegister"), (Array(Const(Float), "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")]), StdMethod(HRESULT, "GetPixelShaderConstantF", [(UINT, "StartRegister"), Out(Array(Float, "4*Vector4fCount"), "pConstantData"), (UINT, "Vector4fCount")], sideeffects=False), StdMethod(HRESULT, "SetPixelShaderConstantI", [(UINT, "StartRegister"), (Array(Const(Int), "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")]), StdMethod(HRESULT, "GetPixelShaderConstantI", [(UINT, "StartRegister"), Out(Array(Int, "4*Vector4iCount"), "pConstantData"), (UINT, "Vector4iCount")], sideeffects=False), StdMethod(HRESULT, "SetPixelShaderConstantB", [(UINT, "StartRegister"), (Array(Const(BOOL), "BoolCount"), "pConstantData"), (UINT, "BoolCount")]), StdMethod(HRESULT, "GetPixelShaderConstantB", [(UINT, "StartRegister"), Out(Array(BOOL, "BoolCount"), "pConstantData"), (UINT, "BoolCount")], sideeffects=False), StdMethod(HRESULT, "DrawRectPatch", [(UINT, "Handle"), (ConstPointer(Float), "pNumSegs"), (ConstPointer(D3DRECTPATCH_INFO), "pRectPatchInfo")]), StdMethod(HRESULT, "DrawTriPatch", [(UINT, "Handle"), (ConstPointer(Float), "pNumSegs"), (ConstPointer(D3DTRIPATCH_INFO), "pTriPatchInfo")]), StdMethod(HRESULT, "DeletePatch", [(UINT, "Handle")]), StdMethod(HRESULT, "CreateQuery", [(D3DQUERYTYPE, "Type"), Out(Pointer(PDIRECT3DQUERY9), "ppQuery")]), ] IDirect3DStateBlock9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "Capture", []), StdMethod(HRESULT, "Apply", []), ] IDirect3DSwapChain9.methods += [ StdMethod(HRESULT, "Present", [(ConstPointer(RECT), "pSourceRect"), (ConstPointer(RECT), "pDestRect"), (HWND, "hDestWindowOverride"), (ConstPointer(RGNDATA), "pDirtyRegion"), (D3DPRESENT, "dwFlags")]), StdMethod(HRESULT, "GetFrontBufferData", [(PDIRECT3DSURFACE9, "pDestSurface")]), StdMethod(HRESULT, "GetBackBuffer", [(UINT, "iBackBuffer"), (D3DBACKBUFFER_TYPE, "Type"), Out(Pointer(PDIRECT3DSURFACE9), "ppBackBuffer")]), StdMethod(HRESULT, "GetRasterStatus", [Out(Pointer(D3DRASTER_STATUS), "pRasterStatus")], sideeffects=False), StdMethod(HRESULT, "GetDisplayMode", [Out(Pointer(D3DDISPLAYMODE), "pMode")], sideeffects=False), StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "GetPresentParameters", [Out(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters")], sideeffects=False), ] IDirect3DResource9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "refguid"), (OpaqueBlob(Const(Void), "SizeOfData"), "pData"), (DWORD, "SizeOfData"), (D3DSPD, "Flags")], sideeffects=False), StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "refguid"), Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), InOut(Pointer(DWORD), "pSizeOfData")], sideeffects=False), StdMethod(HRESULT, "FreePrivateData", [(REFGUID, "refguid")], sideeffects=False), StdMethod(D3D9_RESOURCE_PRIORITY, "SetPriority", [(D3D9_RESOURCE_PRIORITY, "PriorityNew")]), StdMethod(D3D9_RESOURCE_PRIORITY, "GetPriority", [], sideeffects=False), StdMethod(Void, "PreLoad", []), StdMethod(D3DRESOURCETYPE, "GetType", [], sideeffects=False), ] IDirect3DVertexDeclaration9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "GetDeclaration", [Out(Array(D3DVERTEXELEMENT9, "*pNumElements"), "pElement"), InOut(Pointer(UINT), "pNumElements")], sideeffects=False), ] IDirect3DVertexShader9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "GetFunction", [Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), Out(Pointer(UINT), "pSizeOfData")], sideeffects=False), ] IDirect3DPixelShader9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "GetFunction", [Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), Out(Pointer(UINT), "pSizeOfData")], sideeffects=False), ] IDirect3DBaseTexture9.methods += [ StdMethod(DWORD, "SetLOD", [(DWORD, "LODNew")]), StdMethod(DWORD, "GetLOD", [], sideeffects=False), StdMethod(DWORD, "GetLevelCount", [], sideeffects=False), StdMethod(HRESULT, "SetAutoGenFilterType", [(D3DTEXTUREFILTERTYPE, "FilterType")]), StdMethod(D3DTEXTUREFILTERTYPE, "GetAutoGenFilterType", [], sideeffects=False), StdMethod(Void, "GenerateMipSubLevels", []), ] IDirect3DTexture9.methods += [ StdMethod(HRESULT, "GetLevelDesc", [(UINT, "Level"), Out(Pointer(D3DSURFACE_DESC), "pDesc")], sideeffects=False), StdMethod(HRESULT, "GetSurfaceLevel", [(UINT, "Level"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurfaceLevel")]), StdMethod(HRESULT, "LockRect", [(UINT, "Level"), Out(Pointer(D3DLOCKED_RECT), "pLockedRect"), (ConstPointer(RECT), "pRect"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "UnlockRect", [(UINT, "Level")]), StdMethod(HRESULT, "AddDirtyRect", [(ConstPointer(RECT), "pDirtyRect")]), ] IDirect3DVolumeTexture9.methods += [ StdMethod(HRESULT, "GetLevelDesc", [(UINT, "Level"), Out(Pointer(D3DVOLUME_DESC), "pDesc")], sideeffects=False), StdMethod(HRESULT, "GetVolumeLevel", [(UINT, "Level"), Out(Pointer(PDIRECT3DVOLUME9), "ppVolumeLevel")]), StdMethod(HRESULT, "LockBox", [(UINT, "Level"), Out(Pointer(D3DLOCKED_BOX), "pLockedVolume"), (ConstPointer(D3DBOX), "pBox"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "UnlockBox", [(UINT, "Level")]), StdMethod(HRESULT, "AddDirtyBox", [(ConstPointer(D3DBOX), "pDirtyBox")]), ] IDirect3DCubeTexture9.methods += [ StdMethod(HRESULT, "GetLevelDesc", [(UINT, "Level"), Out(Pointer(D3DSURFACE_DESC), "pDesc")], sideeffects=False), StdMethod(HRESULT, "GetCubeMapSurface", [(D3DCUBEMAP_FACES, "FaceType"), (UINT, "Level"), Out(Pointer(PDIRECT3DSURFACE9), "ppCubeMapSurface")]), StdMethod(HRESULT, "LockRect", [(D3DCUBEMAP_FACES, "FaceType"), (UINT, "Level"), Out(Pointer(D3DLOCKED_RECT), "pLockedRect"), (ConstPointer(RECT), "pRect"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "UnlockRect", [(D3DCUBEMAP_FACES, "FaceType"), (UINT, "Level")]), StdMethod(HRESULT, "AddDirtyRect", [(D3DCUBEMAP_FACES, "FaceType"), (ConstPointer(RECT), "pDirtyRect")]), ] IDirect3DVertexBuffer9.methods += [ StdMethod(HRESULT, "Lock", [(UINT, "OffsetToLock"), (UINT, "SizeToLock"), Out(Pointer(LinearPointer(Void, "_MappedSize")), "ppbData"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "Unlock", []), StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DVERTEXBUFFER_DESC), "pDesc")], sideeffects=False), ] IDirect3DIndexBuffer9.methods += [ StdMethod(HRESULT, "Lock", [(UINT, "OffsetToLock"), (UINT, "SizeToLock"), Out(Pointer(LinearPointer(Void, "_MappedSize")), "ppbData"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "Unlock", []), StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DINDEXBUFFER_DESC), "pDesc")], sideeffects=False), ] IDirect3DSurface9.methods += [ StdMethod(HRESULT, "GetContainer", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppContainer")]), StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DSURFACE_DESC), "pDesc")], sideeffects=False), StdMethod(HRESULT, "LockRect", [Out(Pointer(D3DLOCKED_RECT), "pLockedRect"), (ConstPointer(RECT), "pRect"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "UnlockRect", []), StdMethod(HRESULT, "GetDC", [Out(Pointer(HDC), "phdc")], sideeffects=False), StdMethod(HRESULT, "ReleaseDC", [(HDC, "hdc")], sideeffects=False), ] IDirect3DVolume9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "refguid"), (OpaqueBlob(Const(Void), "SizeOfData"), "pData"), (DWORD, "SizeOfData"), (D3DSPD, "Flags")], sideeffects=False), StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "refguid"), Out(OpaqueBlob(Void, "*pSizeOfData"), "pData"), InOut(Pointer(DWORD), "pSizeOfData")], sideeffects=False), StdMethod(HRESULT, "FreePrivateData", [(REFGUID, "refguid")], sideeffects=False), StdMethod(HRESULT, "GetContainer", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppContainer")]), StdMethod(HRESULT, "GetDesc", [Out(Pointer(D3DVOLUME_DESC), "pDesc")], sideeffects=False), StdMethod(HRESULT, "LockBox", [Out(Pointer(D3DLOCKED_BOX), "pLockedVolume"), (ConstPointer(D3DBOX), "pBox"), (D3DLOCK, "Flags")]), StdMethod(HRESULT, "UnlockBox", []), ] IDirect3DQuery9.methods += [ StdMethod(HRESULT, "GetDevice", [Out(Pointer(PDIRECT3DDEVICE9), "ppDevice")]), StdMethod(D3DQUERYTYPE, "GetType", [], sideeffects=False), StdMethod(DWORD, "GetDataSize", [], sideeffects=False), StdMethod(HRESULT, "Issue", [(D3DISSUE, "dwIssueFlags")]), StdMethod(HRESULT, "GetData", [Out(D3DQUERYDATA, "pData"), (DWORD, "dwSize"), (D3DGETDATA, "dwGetDataFlags")], sideeffects=False), ] IDirect3D9Ex.methods += [ StdMethod(UINT, "GetAdapterModeCountEx", [(D3DADAPTER, "Adapter"), (ConstPointer(D3DDISPLAYMODEFILTER), "pFilter") ], sideeffects=False), StdMethod(HRESULT, "EnumAdapterModesEx", [(D3DADAPTER, "Adapter"), (ConstPointer(D3DDISPLAYMODEFILTER), "pFilter"), (UINT, "Mode"), Out(Pointer(D3DDISPLAYMODEEX), "pMode")], sideeffects=False), StdMethod(HRESULT, "GetAdapterDisplayModeEx", [(D3DADAPTER, "Adapter"), Out(Pointer(D3DDISPLAYMODEEX), "pMode"), Out(Pointer(D3DDISPLAYROTATION), "pRotation")], sideeffects=False), StdMethod(HRESULT, "CreateDeviceEx", [(D3DADAPTER, "Adapter"), (D3DDEVTYPE, "DeviceType"), (HWND, "hFocusWindow"), (D3DCREATE, "BehaviorFlags"), InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), Out(Pointer(D3DDISPLAYMODEEX), "pFullscreenDisplayMode"), Out(Pointer(PDIRECT3DDEVICE9EX), "ppReturnedDeviceInterface")]), StdMethod(HRESULT, "GetAdapterLUID", [(D3DADAPTER, "Adapter"), Out(Pointer(LUID), "pLUID")], sideeffects=False), ] IDirect3DDevice9Ex.methods += [ StdMethod(HRESULT, "SetConvolutionMonoKernel", [(UINT, "width"), (UINT, "height"), (Array(Float, "width"), "rows"), (Array(Float, "height"), "columns")]), StdMethod(HRESULT, "ComposeRects", [(PDIRECT3DSURFACE9, "pSrc"), (PDIRECT3DSURFACE9, "pDst"), (PDIRECT3DVERTEXBUFFER9, "pSrcRectDescs"), (UINT, "NumRects"), (PDIRECT3DVERTEXBUFFER9, "pDstRectDescs"), (D3DCOMPOSERECTSOP, "Operation"), (Int, "Xoffset"), (Int, "Yoffset")]), StdMethod(HRESULT, "PresentEx", [(ConstPointer(RECT), "pSourceRect"), (ConstPointer(RECT), "pDestRect"), (HWND, "hDestWindowOverride"), (ConstPointer(RGNDATA), "pDirtyRegion"), (D3DPRESENT, "dwFlags")]), StdMethod(HRESULT, "GetGPUThreadPriority", [Out(Pointer(INT), "pPriority")], sideeffects=False), StdMethod(HRESULT, "SetGPUThreadPriority", [(INT, "Priority")]), StdMethod(HRESULT, "WaitForVBlank", [(UINT, "iSwapChain")]), StdMethod(HRESULT, "CheckResourceResidency", [(Array(PDIRECT3DRESOURCE9, "NumResources"), "pResourceArray"), (UINT32, "NumResources")]), StdMethod(HRESULT, "SetMaximumFrameLatency", [(UINT, "MaxLatency")]), StdMethod(HRESULT, "GetMaximumFrameLatency", [Out(Pointer(UINT), "pMaxLatency")], sideeffects=False), StdMethod(HRESULT, "CheckDeviceState", [(HWND, "hDestinationWindow")], sideeffects=False), StdMethod(HRESULT, "CreateRenderTargetEx", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Lockable"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle"), (D3DUSAGE, "Usage")]), StdMethod(HRESULT, "CreateOffscreenPlainSurfaceEx", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DPOOL, "Pool"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle"), (D3DUSAGE, "Usage")]), StdMethod(HRESULT, "CreateDepthStencilSurfaceEx", [(UINT, "Width"), (UINT, "Height"), (D3DFORMAT, "Format"), (D3DMULTISAMPLE_TYPE, "MultiSample"), (DWORD, "MultisampleQuality"), (BOOL, "Discard"), Out(Pointer(PDIRECT3DSURFACE9), "ppSurface"), InOut(Pointer(SHARED_HANDLE), "pSharedHandle"), (D3DUSAGE, "Usage")]), StdMethod(HRESULT, "ResetEx", [InOut(Pointer(D3DPRESENT_PARAMETERS), "pPresentationParameters"), Out(Pointer(D3DDISPLAYMODEEX), "pFullscreenDisplayMode")]), StdMethod(HRESULT, "GetDisplayModeEx", [(UINT, "iSwapChain"), Out(Pointer(D3DDISPLAYMODEEX), "pMode"), Out(Pointer(D3DDISPLAYROTATION), "pRotation")], sideeffects=False), ] IDirect3DSwapChain9Ex.methods += [ StdMethod(HRESULT, "GetLastPresentCount", [Out(Pointer(UINT), "pLastPresentCount")], sideeffects=False), StdMethod(HRESULT, "GetPresentStats", [Out(Pointer(D3DPRESENTSTATS), "pPresentationStatistics")], sideeffects=False), StdMethod(HRESULT, "GetDisplayModeEx", [Out(Pointer(D3DDISPLAYMODEEX), "pMode"), Out(Pointer(D3DDISPLAYROTATION), "pRotation")], sideeffects=False), ] d3d9 = Module("d3d9") d3d9.addFunctions([ StdFunction(PDIRECT3D9, "Direct3DCreate9", [(UINT, "SDKVersion")], fail='NULL'), StdFunction(HRESULT, "Direct3DCreate9Ex", [(UINT, "SDKVersion"), Out(Pointer(PDIRECT3D9EX), "ppD3D")], fail='D3DERR_NOTAVAILABLE'), ]) d3d9.addInterfaces([ IDirect3DSwapChain9Ex, ]) # D3DPERF_* functions can also be used by D3D10 applications, so keep them in a # separate module to be merged as necessary # See http://web.archive.org/web/20110510070258/http://msdn.microsoft.com/en-us/library/ee417071%28v=VS.85%29.aspx d3dperf = Module("d3d9") d3dperf.addFunctions([ StdFunction(Int, "D3DPERF_BeginEvent", [(D3DCOLOR, "col"), (LPCWSTR, "wszName")], fail='-1', sideeffects=False), StdFunction(Int, "D3DPERF_EndEvent", [], fail='-1', sideeffects=False), StdFunction(Void, "D3DPERF_SetMarker", [(D3DCOLOR, "col"), (LPCWSTR, "wszName")], sideeffects=False), StdFunction(Void, "D3DPERF_SetRegion", [(D3DCOLOR, "col"), (LPCWSTR, "wszName")], sideeffects=False), StdFunction(BOOL, "D3DPERF_QueryRepeatFrame", [], fail='FALSE', sideeffects=False), StdFunction(Void, "D3DPERF_SetOptions", [(DWORD, "dwOptions")], sideeffects=False), StdFunction(DWORD, "D3DPERF_GetStatus", [], fail='0', sideeffects=False), ])
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Internal utilities for managing I-Spy test results in Google Cloud Storage. See the ispy.client.chrome_utils module for the external API. """ import collections import itertools import json import os import sys import image_tools _INVALID_EXPECTATION_CHARS = ['/', '\\', ' ', '"', '\''] def IsValidExpectationName(expectation_name): return not any(c in _INVALID_EXPECTATION_CHARS for c in expectation_name) def GetExpectationPath(expectation, file_name=''): """Get the path to a test file in the given test run and expectation. Args: expectation: name of the expectation. file_name: name of the file. Returns: the path as a string relative to the bucket. """ return 'expectations/%s/%s' % (expectation, file_name) def GetFailurePath(test_run, expectation, file_name=''): """Get the path to a failure file in the given test run and test. Args: test_run: name of the test run. expectation: name of the expectation. file_name: name of the file. Returns: the path as a string relative to the bucket. """ return GetTestRunPath(test_run, '%s/%s' % (expectation, file_name)) def GetTestRunPath(test_run, file_name=''): """Get the path to a the given test run. Args: test_run: name of the test run. file_name: name of the file. Returns: the path as a string relative to the bucket. """ return 'failures/%s/%s' % (test_run, file_name) class ISpyUtils(object): """Utility functions for working with an I-Spy google storage bucket.""" def __init__(self, cloud_bucket): """Initialize with a cloud bucket instance to supply GS functionality. Args: cloud_bucket: An object implementing the cloud_bucket.BaseCloudBucket interface. """ self.cloud_bucket = cloud_bucket def UploadImage(self, full_path, image): """Uploads an image to a location in GS. Args: full_path: the path to the file in GS including the file extension. image: a RGB PIL.Image to be uploaded. """ self.cloud_bucket.UploadFile( full_path, image_tools.EncodePNG(image), 'image/png') def DownloadImage(self, full_path): """Downloads an image from a location in GS. Args: full_path: the path to the file in GS including the file extension. Returns: The downloaded RGB PIL.Image. Raises: cloud_bucket.NotFoundError: if the path to the image is not valid. """ return image_tools.DecodePNG(self.cloud_bucket.DownloadFile(full_path)) def UpdateImage(self, full_path, image): """Updates an existing image in GS, preserving permissions and metadata. Args: full_path: the path to the file in GS including the file extension. image: a RGB PIL.Image. """ self.cloud_bucket.UpdateFile(full_path, image_tools.EncodePNG(image)) def GenerateExpectation(self, expectation, images): """Creates and uploads an expectation to GS from a set of images and name. This method generates a mask from the uploaded images, then uploads the mask and first of the images to GS as a expectation. Args: expectation: name for this expectation, any existing expectation with the name will be replaced. images: a list of RGB encoded PIL.Images Raises: ValueError: if the expectation name is invalid. """ if not IsValidExpectationName(expectation): raise ValueError("Expectation name contains an illegal character: %s." % str(_INVALID_EXPECTATION_CHARS)) mask = image_tools.InflateMask(image_tools.CreateMask(images), 7) self.UploadImage( GetExpectationPath(expectation, 'expected.png'), images[0]) self.UploadImage(GetExpectationPath(expectation, 'mask.png'), mask) def PerformComparison(self, test_run, expectation, actual): """Runs an image comparison, and uploads discrepancies to GS. Args: test_run: the name of the test_run. expectation: the name of the expectation to use for comparison. actual: an RGB-encoded PIL.Image that is the actual result. Raises: cloud_bucket.NotFoundError: if the given expectation is not found. ValueError: if the expectation name is invalid. """ if not IsValidExpectationName(expectation): raise ValueError("Expectation name contains an illegal character: %s." % str(_INVALID_EXPECTATION_CHARS)) expectation_tuple = self.GetExpectation(expectation) if not image_tools.SameImage( actual, expectation_tuple.expected, mask=expectation_tuple.mask): self.UploadImage( GetFailurePath(test_run, expectation, 'actual.png'), actual) diff, diff_pxls = image_tools.VisualizeImageDifferences( expectation_tuple.expected, actual, mask=expectation_tuple.mask) self.UploadImage(GetFailurePath(test_run, expectation, 'diff.png'), diff) self.cloud_bucket.UploadFile( GetFailurePath(test_run, expectation, 'info.txt'), json.dumps({ 'different_pixels': diff_pxls, 'fraction_different': diff_pxls / float(actual.size[0] * actual.size[1])}), 'application/json') def GetExpectation(self, expectation): """Returns the given expectation from GS. Args: expectation: the name of the expectation to get. Returns: A named tuple: 'Expectation', containing two images: expected and mask. Raises: cloud_bucket.NotFoundError: if the test is not found in GS. """ Expectation = collections.namedtuple('Expectation', ['expected', 'mask']) return Expectation(self.DownloadImage(GetExpectationPath(expectation, 'expected.png')), self.DownloadImage(GetExpectationPath(expectation, 'mask.png'))) def ExpectationExists(self, expectation): """Returns whether the given expectation exists in GS. Args: expectation: the name of the expectation to check. Returns: A boolean indicating whether the test exists. """ expected_image_exists = self.cloud_bucket.FileExists( GetExpectationPath(expectation, 'expected.png')) mask_image_exists = self.cloud_bucket.FileExists( GetExpectationPath(expectation, 'mask.png')) return expected_image_exists and mask_image_exists def FailureExists(self, test_run, expectation): """Returns whether a failure for the expectation exists for the given run. Args: test_run: the name of the test_run. expectation: the name of the expectation that failed. Returns: A boolean indicating whether the failure exists. """ actual_image_exists = self.cloud_bucket.FileExists( GetFailurePath(test_run, expectation, 'actual.png')) test_exists = self.ExpectationExists(expectation) info_exists = self.cloud_bucket.FileExists( GetFailurePath(test_run, expectation, 'info.txt')) return test_exists and actual_image_exists and info_exists def RemoveExpectation(self, expectation): """Removes an expectation and all associated failures with that test. Args: expectation: the name of the expectation to remove. """ test_paths = self.cloud_bucket.GetAllPaths( GetExpectationPath(expectation)) for path in test_paths: self.cloud_bucket.RemoveFile(path) def GenerateExpectationPinkOut(self, expectation, images, pint_out, rgb): """Uploads an ispy-test to GS with the pink_out workaround. Args: expectation: the name of the expectation to be uploaded. images: a json encoded list of base64 encoded png images. pink_out: an image. RGB: a json list representing the RGB values of a color to mask out. Raises: ValueError: if expectation name is invalid. """ if not IsValidExpectationName(expectation): raise ValueError("Expectation name contains an illegal character: %s." % str(_INVALID_EXPECTATION_CHARS)) # convert the pink_out into a mask black = (0, 0, 0, 255) white = (255, 255, 255, 255) pink_out.putdata( [black if px == (rgb[0], rgb[1], rgb[2], 255) else white for px in pink_out.getdata()]) mask = image_tools.CreateMask(images) mask = image_tools.InflateMask(image_tools.CreateMask(images), 7) combined_mask = image_tools.AddMasks([mask, pink_out]) self.UploadImage(GetExpectationPath(expectation, 'expected.png'), images[0]) self.UploadImage(GetExpectationPath(expectation, 'mask.png'), combined_mask) def RemoveFailure(self, test_run, expectation): """Removes a failure from GS. Args: test_run: the name of the test_run. expectation: the expectation on which the failure to be removed occured. """ failure_paths = self.cloud_bucket.GetAllPaths( GetFailurePath(test_run, expectation)) for path in failure_paths: self.cloud_bucket.RemoveFile(path) def GetFailure(self, test_run, expectation): """Returns a given test failure's expected, diff, and actual images. Args: test_run: the name of the test_run. expectation: the name of the expectation the result corresponds to. Returns: A named tuple: Failure containing three images: expected, diff, and actual. Raises: cloud_bucket.NotFoundError: if the result is not found in GS. """ expected = self.DownloadImage( GetExpectationPath(expectation, 'expected.png')) actual = self.DownloadImage( GetFailurePath(test_run, expectation, 'actual.png')) diff = self.DownloadImage( GetFailurePath(test_run, expectation, 'diff.png')) info = json.loads(self.cloud_bucket.DownloadFile( GetFailurePath(test_run, expectation, 'info.txt'))) Failure = collections.namedtuple( 'Failure', ['expected', 'diff', 'actual', 'info']) return Failure(expected, diff, actual, info) def GetAllPaths(self, prefix): """Gets urls to all files in GS whose path starts with a given prefix. Args: prefix: the prefix to filter files in GS by. Returns: a list containing urls to all objects that started with the prefix. """ return self.cloud_bucket.GetAllPaths(prefix)
# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from glance.api import authorization from glance.api import policy from glance.api import property_protections from glance.common import property_utils from glance.common import store_utils import glance.db import glance.domain import glance.location import glance.notifier import glance.quota class Gateway(object): def __init__(self, db_api=None, store_api=None, notifier=None, policy_enforcer=None): self.db_api = db_api or glance.db.get_api() self.store_api = store_api or glance_store self.store_utils = store_utils self.notifier = notifier or glance.notifier.Notifier() self.policy = policy_enforcer or policy.Enforcer() def get_image_factory(self, context): image_factory = glance.domain.ImageFactory() store_image_factory = glance.location.ImageFactoryProxy( image_factory, context, self.store_api, self.store_utils) quota_image_factory = glance.quota.ImageFactoryProxy( store_image_factory, context, self.db_api, self.store_utils) policy_image_factory = policy.ImageFactoryProxy( quota_image_factory, context, self.policy) notifier_image_factory = glance.notifier.ImageFactoryProxy( policy_image_factory, context, self.notifier) if property_utils.is_property_protection_enabled(): property_rules = property_utils.PropertyRules(self.policy) pif = property_protections.ProtectedImageFactoryProxy( notifier_image_factory, context, property_rules) authorized_image_factory = authorization.ImageFactoryProxy( pif, context) else: authorized_image_factory = authorization.ImageFactoryProxy( notifier_image_factory, context) return authorized_image_factory def get_image_member_factory(self, context): image_factory = glance.domain.ImageMemberFactory() quota_image_factory = glance.quota.ImageMemberFactoryProxy( image_factory, context, self.db_api, self.store_utils) policy_member_factory = policy.ImageMemberFactoryProxy( quota_image_factory, context, self.policy) authorized_image_factory = authorization.ImageMemberFactoryProxy( policy_member_factory, context) return authorized_image_factory def get_repo(self, context): image_repo = glance.db.ImageRepo(context, self.db_api) store_image_repo = glance.location.ImageRepoProxy( image_repo, context, self.store_api, self.store_utils) quota_image_repo = glance.quota.ImageRepoProxy( store_image_repo, context, self.db_api, self.store_utils) policy_image_repo = policy.ImageRepoProxy( quota_image_repo, context, self.policy) notifier_image_repo = glance.notifier.ImageRepoProxy( policy_image_repo, context, self.notifier) if property_utils.is_property_protection_enabled(): property_rules = property_utils.PropertyRules(self.policy) pir = property_protections.ProtectedImageRepoProxy( notifier_image_repo, context, property_rules) authorized_image_repo = authorization.ImageRepoProxy( pir, context) else: authorized_image_repo = authorization.ImageRepoProxy( notifier_image_repo, context) return authorized_image_repo def get_member_repo(self, image, context): image_member_repo = glance.db.ImageMemberRepo( context, self.db_api, image) store_image_repo = glance.location.ImageMemberRepoProxy( image_member_repo, image, context, self.store_api) policy_member_repo = policy.ImageMemberRepoProxy( store_image_repo, image, context, self.policy) notifier_member_repo = glance.notifier.ImageMemberRepoProxy( policy_member_repo, image, context, self.notifier) authorized_member_repo = authorization.ImageMemberRepoProxy( notifier_member_repo, image, context) return authorized_member_repo def get_task_factory(self, context): task_factory = glance.domain.TaskFactory() policy_task_factory = policy.TaskFactoryProxy( task_factory, context, self.policy) notifier_task_factory = glance.notifier.TaskFactoryProxy( policy_task_factory, context, self.notifier) authorized_task_factory = authorization.TaskFactoryProxy( notifier_task_factory, context) return authorized_task_factory def get_task_repo(self, context): task_repo = glance.db.TaskRepo(context, self.db_api) policy_task_repo = policy.TaskRepoProxy( task_repo, context, self.policy) notifier_task_repo = glance.notifier.TaskRepoProxy( policy_task_repo, context, self.notifier) authorized_task_repo = authorization.TaskRepoProxy( notifier_task_repo, context) return authorized_task_repo def get_task_stub_repo(self, context): task_stub_repo = glance.db.TaskRepo(context, self.db_api) policy_task_stub_repo = policy.TaskStubRepoProxy( task_stub_repo, context, self.policy) notifier_task_stub_repo = glance.notifier.TaskStubRepoProxy( policy_task_stub_repo, context, self.notifier) authorized_task_stub_repo = authorization.TaskStubRepoProxy( notifier_task_stub_repo, context) return authorized_task_stub_repo def get_task_executor_factory(self, context): task_repo = self.get_task_repo(context) image_repo = self.get_repo(context) image_factory = self.get_image_factory(context) return glance.domain.TaskExecutorFactory(task_repo, image_repo, image_factory) def get_metadef_namespace_factory(self, context): ns_factory = glance.domain.MetadefNamespaceFactory() policy_ns_factory = policy.MetadefNamespaceFactoryProxy( ns_factory, context, self.policy) notifier_ns_factory = glance.notifier.MetadefNamespaceFactoryProxy( policy_ns_factory, context, self.notifier) authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy( notifier_ns_factory, context) return authorized_ns_factory def get_metadef_namespace_repo(self, context): ns_repo = glance.db.MetadefNamespaceRepo(context, self.db_api) policy_ns_repo = policy.MetadefNamespaceRepoProxy( ns_repo, context, self.policy) notifier_ns_repo = glance.notifier.MetadefNamespaceRepoProxy( policy_ns_repo, context, self.notifier) authorized_ns_repo = authorization.MetadefNamespaceRepoProxy( notifier_ns_repo, context) return authorized_ns_repo def get_metadef_object_factory(self, context): object_factory = glance.domain.MetadefObjectFactory() policy_object_factory = policy.MetadefObjectFactoryProxy( object_factory, context, self.policy) notifier_object_factory = glance.notifier.MetadefObjectFactoryProxy( policy_object_factory, context, self.notifier) authorized_object_factory = authorization.MetadefObjectFactoryProxy( notifier_object_factory, context) return authorized_object_factory def get_metadef_object_repo(self, context): object_repo = glance.db.MetadefObjectRepo(context, self.db_api) policy_object_repo = policy.MetadefObjectRepoProxy( object_repo, context, self.policy) notifier_object_repo = glance.notifier.MetadefObjectRepoProxy( policy_object_repo, context, self.notifier) authorized_object_repo = authorization.MetadefObjectRepoProxy( notifier_object_repo, context) return authorized_object_repo def get_metadef_resource_type_factory(self, context): resource_type_factory = glance.domain.MetadefResourceTypeFactory() policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy( resource_type_factory, context, self.policy) notifier_resource_type_factory = ( glance.notifier.MetadefResourceTypeFactoryProxy( policy_resource_type_factory, context, self.notifier) ) authorized_resource_type_factory = ( authorization.MetadefResourceTypeFactoryProxy( notifier_resource_type_factory, context) ) return authorized_resource_type_factory def get_metadef_resource_type_repo(self, context): resource_type_repo = glance.db.MetadefResourceTypeRepo( context, self.db_api) policy_object_repo = policy.MetadefResourceTypeRepoProxy( resource_type_repo, context, self.policy) notifier_object_repo = glance.notifier.MetadefResourceTypeRepoProxy( policy_object_repo, context, self.notifier) authorized_object_repo = authorization.MetadefResourceTypeRepoProxy( notifier_object_repo, context) return authorized_object_repo def get_metadef_property_factory(self, context): prop_factory = glance.domain.MetadefPropertyFactory() policy_prop_factory = policy.MetadefPropertyFactoryProxy( prop_factory, context, self.policy) notifier_prop_factory = glance.notifier.MetadefPropertyFactoryProxy( policy_prop_factory, context, self.notifier) authorized_prop_factory = authorization.MetadefPropertyFactoryProxy( notifier_prop_factory, context) return authorized_prop_factory def get_metadef_property_repo(self, context): prop_repo = glance.db.MetadefPropertyRepo(context, self.db_api) policy_prop_repo = policy.MetadefPropertyRepoProxy( prop_repo, context, self.policy) notifier_prop_repo = glance.notifier.MetadefPropertyRepoProxy( policy_prop_repo, context, self.notifier) authorized_prop_repo = authorization.MetadefPropertyRepoProxy( notifier_prop_repo, context) return authorized_prop_repo def get_metadef_tag_factory(self, context): tag_factory = glance.domain.MetadefTagFactory() policy_tag_factory = policy.MetadefTagFactoryProxy( tag_factory, context, self.policy) notifier_tag_factory = glance.notifier.MetadefTagFactoryProxy( policy_tag_factory, context, self.notifier) authorized_tag_factory = authorization.MetadefTagFactoryProxy( notifier_tag_factory, context) return authorized_tag_factory def get_metadef_tag_repo(self, context): tag_repo = glance.db.MetadefTagRepo(context, self.db_api) policy_tag_repo = policy.MetadefTagRepoProxy( tag_repo, context, self.policy) notifier_tag_repo = glance.notifier.MetadefTagRepoProxy( policy_tag_repo, context, self.notifier) authorized_tag_repo = authorization.MetadefTagRepoProxy( notifier_tag_repo, context) return authorized_tag_repo
# Copyright 2011-2013 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Some of POX's core API and functionality is here, largely in the POXCore class (an instance of which is available as pox.core.core). This includes things like component rendezvous, logging, system status (up and down events), etc. """ from __future__ import print_function # Set up initial log state import logging import inspect import time import os _path = inspect.stack()[0][1] _ext_path = _path[0:_path.rindex(os.sep)] _ext_path = os.path.dirname(_ext_path) + os.sep _path = os.path.dirname(_path) + os.sep SQUELCH_TIME = 5 _squelch = '' _squelchTime = 0 _squelchCount = 0 def getLogger (name=None, moreFrames=0): """ In general, you don't need to call this directly, and will use core.getLogger() instead. """ if name is None: s = inspect.stack()[1+moreFrames] name = s[1] if name.endswith('.py'): name = name[0:-3] elif name.endswith('.pyc'): name = name[0:-4] if name.startswith(_path): name = name[len(_path):] elif name.startswith(_ext_path): name = name[len(_ext_path):] name = name.replace('/', '.').replace('\\', '.') #FIXME: use os.path or whatever # Remove double names ("topology.topology" -> "topology") if name.find('.') != -1: n = name.split('.') if len(n) >= 2: if n[-1] == n[-2]: del n[-1] name = '.'.join(n) if name.startswith("ext."): name = name.split("ext.",1)[1] if name.endswith(".__init__"): name = name.rsplit(".__init__",1)[0] l = logging.getLogger(name) g=globals() if not hasattr(l, "print"): def printmsg (*args, **kw): #squelch = kw.get('squelch', True) msg = ' '.join((str(s) for s in args)) s = inspect.stack()[1] o = '[' if 'self' in s[0].f_locals: o += s[0].f_locals['self'].__class__.__name__ + '.' o += s[3] + ':' + str(s[2]) + '] ' o += msg if o == _squelch: if time.time() >= _squelchTime: l.debug("[Previous message repeated %i more times]" % (g['_squelchCount']+1,)) g['_squelchCount'] = 0 g['_squelchTime'] = time.time() + SQUELCH_TIME else: g['_squelchCount'] += 1 else: g['_squelch'] = o if g['_squelchCount'] > 0: l.debug("[Previous message repeated %i more times]" % (g['_squelchCount'],)) g['_squelchCount'] = 0 g['_squelchTime'] = time.time() + SQUELCH_TIME l.debug(o) setattr(l, "print", printmsg) setattr(l, "msg", printmsg) return l # Working around something (don't remember what) log = (lambda : getLogger())() from pox.lib.revent import * # Now use revent's exception hook to put exceptions in event handlers into # the log... def _revent_exception_hook (source, event, args, kw, exc_info): try: c = source t = event if hasattr(c, "__class__"): c = c.__class__.__name__ if isinstance(t, Event): t = t.__class__.__name__ elif issubclass(t, Event): t = t.__name__ except: pass log.exception("Exception while handling %s!%s...\n" % (c,t)) import pox.lib.revent.revent pox.lib.revent.revent.handleEventException = _revent_exception_hook class GoingUpEvent (Event): """ Fired when system is going up. """ pass class GoingDownEvent (Event): """ Fired when system is going down. """ pass class UpEvent (Event): """ Fired when system is up. """ pass class DownEvent (Event): """ Fired when system is down. """ pass class ComponentRegistered (Event): """ This is raised by core whenever a new component is registered. By watching this, a component can monitor whether other components it depends on are available. """ def __init__ (self, name, component): Event.__init__(self) self.name = name self.component = component import pox.lib.recoco as recoco class POXCore (EventMixin): """ A nexus of of the POX API. pox.core.core is a reference to an instance of this class. This class serves a number of functions. An important one is that it can serve as a rendezvous point for components. A component can register objects on core, and they can then be accessed on the core object (e.g., if you register foo, then there will then be a pox.core.core.foo). In many cases, this means you won't need to import a module. Another purpose to the central registration is that it decouples functionality from a specific module. If myL2Switch and yourL2Switch both register as "switch" and both provide the same API, then it doesn't matter. Doing this with imports is a pain. Additionally, a number of commmon API functions are vailable here. """ _eventMixin_events = set([ UpEvent, DownEvent, GoingUpEvent, GoingDownEvent, ComponentRegistered ]) def __init__ (self): self.debug = False self.running = True self.starting_up = True self.components = {'core':self} import threading self.quit_condition = threading.Condition() self.version = (0,2,0) self.version_name = "carp" print(self.banner) self.scheduler = recoco.Scheduler(daemon=True) self._waiters = [] # List of waiting components @property def banner (self): return "{0} / Copyright 2011-2013 James McCauley, et al.".format( self.version_string) @property def version_string (self): return "POX %s (%s)" % ('.'.join(map(str,self.version)),self.version_name) def callDelayed (_self, _seconds, _func, *args, **kw): """ Calls the function at a later time. This is just a wrapper around a recoco timer. """ t = recoco.Timer(_seconds, _func, args=args, kw=kw, scheduler = _self.scheduler) return t def callLater (_self, _func, *args, **kw): # first arg is `_self` rather than `self` in case the user wants # to specify self as a keyword argument """ Call the given function with the given arguments within the context of the co-operative threading environment. It actually calls it sooner rather than later. ;) Much of POX is written without locks because it's all thread-safe with respect to itself, as it's written using the recoco co-operative threading library. If you have a real thread outside of the co-operative thread context, you need to be careful about calling things within it. This function provides a rather simple way that works for most situations: you give it a callable (like a method) and some arguments, and it will call that callable with those arguments from within the co-operative threader, taking care of synchronization for you. """ _self.scheduler.callLater(_func, *args, **kw) def raiseLater (_self, _obj, *args, **kw): # first arg is `_self` rather than `self` in case the user wants # to specify self as a keyword argument """ This is similar to callLater(), but provides an easy way to raise a revent event from outide the co-operative context. Rather than foo.raiseEvent(BarEvent, baz, spam), you just do core.raiseLater(foo, BarEvent, baz, spam). """ _self.scheduler.callLater(_obj.raiseEvent, *args, **kw) def getLogger (self, *args, **kw): """ Returns a logger. Pass it the name you want if you'd like to specify one (e.g., core.getLogger("foo")). If you don't specify a name, it will make one up based on the module name it is called from. """ return getLogger(moreFrames=1,*args, **kw) def quit (self): """ Shut down POX. """ import threading if (self.starting_up or threading.current_thread() is self.scheduler._thread): t = threading.Thread(target=self._quit) t.daemon = True t.start() else: self._quit() def _quit (self): # Should probably do locking here if not self.running: return if self.starting_up: # Try again later self.quit() return self.running = False log.info("Going down...") import gc gc.collect() #DJ++ START 20140115 from pox.sine.libmysql import dbop dbop.closedb() #DJ++ END 20140115 self.raiseEvent(GoingDownEvent()) self.callLater(self.scheduler.quit) for i in range(50): if self.scheduler._hasQuit: break gc.collect() time.sleep(.1) if not self.scheduler._allDone: log.warning("Scheduler didn't quit in time") self.raiseEvent(DownEvent()) log.info("Down.") #logging.shutdown() self.quit_condition.acquire() self.quit_condition.notifyAll() core.quit_condition.release() def _get_python_version (self): try: import platform return "{impl} ({vers}/{build})".format( impl=platform.python_implementation(), vers=platform.python_version(), build=platform.python_build()[1].replace(" "," ")) except: return "Unknown Python" def _get_platform_info (self): try: import platform return platform.platform().split("\n")[0] except: return "Unknown Platform" def goUp (self): log.debug(self.version_string + " going up...") log.debug("Running on " + self._get_python_version()) log.debug("Platform is " + self._get_platform_info()) try: import platform vers = '.'.join(platform.python_version().split(".")[:2]) except: vers = 'an unknown version' if vers != "2.7": l = logging.getLogger("version") if not l.isEnabledFor(logging.WARNING): l.setLevel(logging.WARNING) l.warn("POX requires Python 2.7. You're running %s.", vers) l.warn("If you run into problems, try using Python 2.7 or PyPy.") self.starting_up = False self.raiseEvent(GoingUpEvent()) self.raiseEvent(UpEvent()) self._waiter_notify() if self.running: log.info(self.version_string + " is up.") def _waiter_notify (self): if len(self._waiters): waiting_for = set() for entry in self._waiters: _, name, components, _, _ = entry components = [c for c in components if not self.hasComponent(c)] waiting_for.update(components) log.debug("%s still waiting for: %s" % (name, " ".join(components))) names = set([n for _,n,_,_,_ in self._waiters]) #log.info("%i things still waiting on %i components" # % (names, waiting_for)) log.warn("Still waiting on %i component(s)" % (len(waiting_for),)) def hasComponent (self, name): """ Returns True if a component with the given name has been registered. """ return name in self.components def registerNew (self, __componentClass, *args, **kw): """ Give it a class (and optional __init__ arguments), and it will create an instance and register it using the class name. If the instance has a _core_name property, it will use that instead. It returns the new instance. core.registerNew(FooClass, arg) is roughly equivalent to core.register("FooClass", FooClass(arg)). """ name = __componentClass.__name__ obj = __componentClass(*args, **kw) if hasattr(obj, '_core_name'): # Default overridden name = obj._core_name self.register(name, obj) return obj def register (self, name, component=None): """ Makes the object "component" available as pox.core.core.name. If only one argument is specified, the given argument is registered using its class name as the name. """ #TODO: weak references? if component is None: component = name name = component.__class__.__name__ if hasattr(component, '_core_name'): # Default overridden name = component._core_name if name in self.components: log.warn("Warning: Registered '%s' multipled times" % (name,)) self.components[name] = component self.raiseEventNoErrors(ComponentRegistered, name, component) self._try_waiters() def call_when_ready (self, callback, components=[], name=None, args=(), kw={}): """ Calls a callback when components are ready. """ if callback is None: callback = lambda:None callback.func_name = "<None>" if isinstance(components, basestring): components = [components] elif isinstance(components, set): components = list(components) else: try: _ = components[0] components = list(components) except: components = [components] if name is None: #TODO: Use inspect here instead name = getattr(callback, 'func_name') if name is None: name = str(callback) else: name += "()" if hasattr(callback, 'im_class'): name = getattr(callback.im_class,'__name__', '') + '.' + name if hasattr(callback, '__module__'): # Is this a good idea? If not here, we should do it in the # exception printing in try_waiter(). name += " in " + callback.__module__ entry = (callback, name, components, args, kw) self._waiters.append(entry) self._try_waiter(entry) def _try_waiter (self, entry): """ Tries a waiting callback. Calls the callback, removes from _waiters, and returns True if all are satisfied. """ if entry not in self._waiters: # Already handled return callback, name, components, args_, kw_ = entry for c in components: if not self.hasComponent(c): return False self._waiters.remove(entry) try: if callback is not None: callback(*args_,**kw_) except: import traceback msg = "Exception while trying to notify " + name import inspect try: msg += " at " + inspect.getfile(callback) msg += ":" + str(inspect.getsourcelines(callback)[1]) except: pass log.exception(msg) return True def _try_waiters (self): """ Tries to satisfy all component-waiting callbacks """ changed = True while changed: changed = False for entry in list(self._waiters): if self._try_waiter(entry): changed = True def listen_to_dependencies (self, sink, components=None, attrs=True, short_attrs=False, listen_args={}): """ Look through *sink* for handlers named like _handle_component_event. Use that to build a list of components, and append any components explicitly specified by *components*. listen_args is a dict of "component_name"={"arg_name":"arg_value",...}, allowing you to specify additional arguments to addListeners(). When all the referenced components are registered, do the following: 1) Set up all the event listeners 2) Call "_all_dependencies_met" on *sink* if it exists 3) If attrs=True, set attributes on *sink* for each component (e.g, sink._openflow_ would be set to core.openflow) For example, if topology is a dependency, a handler for topology's SwitchJoin event must be defined as so: def _handle_topology_SwitchJoin (self, ...): *NOTE*: The semantics of this function changed somewhat in the Summer 2012 milestone, though its intention remains the same. """ if components is None: components = set() elif isinstance(components, basestring): components = set([components]) else: components = set(components) for c in dir(sink): if not c.startswith("_handle_"): continue if c.count("_") < 3: continue c = '_'.join(c.split("_")[2:-1]) components.add(c) if None in listen_args: # This means add it to all... args = listen_args.pop(None) for k,v in args.iteritems(): for c in components: if c not in listen_args: listen_args[c] = {} if k not in listen_args[c]: listen_args[c][k] = v if set(listen_args).difference(components): log.error("Specified listen_args for missing component(s): %s" % (" ".join(set(listen_args).difference(components)),)) def done (sink, components, attrs, short_attrs): if attrs or short_attrs: for c in components: if short_attrs: attrname = c else: attrname = '_%s_' % (c,) setattr(sink, attrname, getattr(self, c)) for c in components: if hasattr(getattr(self, c), "_eventMixin_events"): kwargs = {"prefix":c} kwargs.update(listen_args.get(c, {})) getattr(self, c).addListeners(sink, **kwargs) getattr(sink, "_all_dependencies_met", lambda : None)() self.call_when_ready(done, components, name=sink.__class__.__name__, args=(sink,components,attrs,short_attrs)) if not self.starting_up: self._waiter_notify() def __getattr__ (self, name): if name not in self.components: raise AttributeError("'%s' not registered" % (name,)) return self.components[name] core = None def initialize (): global core core = POXCore() return core # The below is a big hack to make tests and doc tools work. # We should do something better. def _maybe_initialize (): import sys if 'unittest' in sys.modules or 'nose' in sys.modules: initialize() return import __main__ mod = getattr(__main__, '__file__', '') if 'pydoc' in mod: initialize() return _maybe_initialize()
import numpy from chainer import cuda from chainer import function from chainer.functions.math import matmul as _matmul from chainer import utils from chainer.utils import type_check from chainer import variable def _convert_value_to_string(value): if isinstance(value, variable.Variable): value = value.data if numpy.isscalar(value): if value < 0: return '({})'.format(value) else: return str(value) elif isinstance(value, (numpy.ndarray, cuda.ndarray)): return 'constant array' else: raise ValueError( 'value must be scalar, ndarray, or Variable') def _check_constant_type(value): if numpy.isscalar(value): return elif isinstance(value, (numpy.ndarray, cuda.ndarray)): return else: raise ValueError( 'value must be scalar, ndarray, or Variable') def _preprocess_const(x, value): xp = cuda.get_array_module(x) if not numpy.isscalar(value) and cuda.get_array_module(value) != xp: # TODO(unno): We can transfer arrays automatically raise TypeError('Cannot mix cupy.ndarray and numpy.ndarray') b = xp.broadcast(x, value) if b.shape != x.shape: raise ValueError('Failed to broadcast arrays') return utils.force_type(x.dtype, value) class Neg(function.Function): @property def label(self): return '__neg__' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) def forward(self, x): self.retain_inputs(()) return utils.force_array(-x[0]), def backward(self, x, gy): return utils.force_array(-gy[0]), def neg(self): # -x """Element-wise negation. Returns: ~chainer.Variable: Output variable. """ return Neg()(self) class Absolute(function.Function): @property def label(self): return '|_|' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) type_check.expect(in_types[0].dtype.kind == 'f') def forward(self, x): return utils.force_array(abs(x[0])), def backward_cpu(self, x, gy): return utils.force_array(numpy.sign(x[0]) * gy[0]), def backward_gpu(self, x, gy): gx0 = cuda.elementwise( 'T x0, T gy', 'T gx0', 'gx0 = ((x0 > 0) - (x0 < 0)) * gy', 'abs_bwd')(x[0], gy[0]) return gx0, def absolute(self): """Element-wise absolute. Returns: ~chainer.Variable: Output variable. """ return Absolute()(self) class Add(function.Function): @property def label(self): return '_ + _' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) type_check.expect( in_types[0].dtype == in_types[1].dtype, in_types[0].shape == in_types[1].shape ) def forward(self, x): self.retain_inputs(()) y = utils.force_array(x[0] + x[1]) return y, def backward(self, x, gy): return gy[0], gy[0] class AddConstant(function.Function): def __init__(self, value): self.value = value @property def label(self): return '_ + %s' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) def forward(self, x): self.retain_inputs(()) value = _preprocess_const(x[0], self.value) return utils.force_array(x[0] + value), def backward(self, x, gy): return gy[0], def add(self, rhs): # lhs + rhs """Element-wise addition. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return Add()(self, rhs) _check_constant_type(rhs) return AddConstant(rhs)(self) class Sub(function.Function): @property def label(self): return '_ - _' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) type_check.expect( in_types[0].dtype == in_types[1].dtype, in_types[0].shape == in_types[1].shape ) def forward(self, x): self.retain_inputs(()) return utils.force_array(x[0] - x[1]), def backward(self, x, gy): return gy[0], utils.force_array(-gy[0]) def sub(self, rhs): # lhs - rhs """Element-wise subtraction. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return Sub()(self, rhs) _check_constant_type(rhs) return AddConstant(-rhs)(self) class SubFromConstant(function.Function): def __init__(self, value): self.value = value @property def label(self): return '%s - _' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) def forward(self, x): self.retain_inputs(()) value = _preprocess_const(x[0], self.value) return utils.force_array(value - x[0]), def backward(self, x, gy): return utils.force_array(-gy[0]), def rsub(self, rhs): # rhs - lhs """Element-wise subtraction. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return Sub()(rhs, self) _check_constant_type(rhs) return SubFromConstant(rhs)(self) class Mul(function.Function): @property def label(self): return '_ * _' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) type_check.expect( in_types[0].dtype.kind == 'f', in_types[0].dtype == in_types[1].dtype, in_types[0].shape == in_types[1].shape ) def forward(self, x): return utils.force_array(x[0] * x[1]), def backward(self, x, gy): return utils.force_array(gy[0] * x[1]), utils.force_array(gy[0] * x[0]) class MulConstant(function.Function): def __init__(self, value): self.value = value @property def label(self): return '_ * %s' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) def forward(self, x): value = _preprocess_const(x[0], self.value) return utils.force_array(value * x[0]), def backward(self, x, gy): # TODO(beam2d): Make it not use the input value = _preprocess_const(x[0], self.value) return utils.force_array(value * gy[0]), def mul(self, rhs): # lhs * rhs """Element-wise multiplication. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return Mul()(self, rhs) _check_constant_type(rhs) return MulConstant(rhs)(self) class Div(function.Function): @property def label(self): return '_ / _' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) type_check.expect( in_types[0].dtype.kind == 'f', in_types[0].dtype == in_types[1].dtype, in_types[0].shape == in_types[1].shape ) def forward(self, x): return utils.force_array(x[0] / x[1]), def backward_cpu(self, x, gy): gx0 = utils.force_array(gy[0] / x[1]) return gx0, utils.force_array(-gx0 * x[0] / x[1]) def backward_gpu(self, x, gy): return cuda.elementwise( 'T x0, T x1, T gy', 'T gx0, T gx1', ''' gx0 = gy / x1; gx1 = -gx0 * x0 / x1; ''', 'div_bwd')(x[0], x[1], gy[0]) def div(self, rhs): # lhs / rhs """Element-wise division Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return Div()(self, rhs) _check_constant_type(rhs) return MulConstant(1. / rhs)(self) class DivFromConstant(function.Function): def __init__(self, value): self.value = value @property def label(self): return '_ / %s' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) type_check.expect(in_types[0].dtype.kind == 'f') def forward(self, x): value = _preprocess_const(x[0], self.value) return utils.force_array(value / x[0]), def backward_cpu(self, x, gy): value = _preprocess_const(x[0], self.value) return utils.force_array(-value * gy[0] / (x[0] ** 2)), def backward_gpu(self, x, gy): # TODO(beam2d): Make it not use the input value = _preprocess_const(x[0], self.value) gx = cuda.elementwise('T x, T gy, T value', 'T gx', 'gx = -value * gy / (x * x)', 'div_from_const_bwd')(x[0], gy[0], value) return gx, def rdiv(self, rhs): # rhs / lhs """Element-wise division. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return Div()(rhs, self) _check_constant_type(rhs) return DivFromConstant(rhs)(self) class PowVarVar(function.Function): @property def label(self): return '_ ** _' def check_type_forward(self, in_types): type_check.expect(in_types.size() == 2) type_check.expect( in_types[0].dtype.kind == 'f', in_types[0].dtype == in_types[1].dtype, in_types[0].shape == in_types[1].shape ) def forward_cpu(self, x): self.y = utils.force_array(x[0] ** x[1]) return self.y, def forward_gpu(self, x): return x[0] ** x[1], def backward_cpu(self, x, gy): one = x[1].dtype.type(1) gx0 = utils.force_array(x[1] * (x[0] ** (x[1] - one)) * gy[0]) gx1 = utils.force_array(numpy.log(x[0]) * self.y * gy[0]) return gx0, gx1 def backward_gpu(self, x, gy): return cuda.elementwise( 'T x0, T x1, T gy', 'T gx0, T gx1', ''' gx0 = x1 * pow(x0, x1 - 1) * gy; gx1 = log(x0) * pow(x0, x1) * gy; ''', 'pow_var_var_bwd')(x[0], x[1], gy[0]) class PowVarConst(function.Function): def __init__(self, value): self.value = value @property def label(self): return '_ ** %s' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) type_check.expect(in_types[0].dtype.kind == 'f') def forward(self, x): value = _preprocess_const(x[0], self.value) return utils.force_array(x[0] ** value, x[0].dtype), def backward_cpu(self, x, gy): val_1 = _preprocess_const(x[0], self.value - 1) gx = utils.force_type(x[0].dtype, self.value) * (x[0] ** val_1) * gy[0] return utils.force_array(gx), def backward_gpu(self, x, gy): value = _preprocess_const(x[0], self.value) gx = cuda.elementwise( 'T x, T gy, T value', 'T gx', 'gx = value * pow(x, value - 1) * gy', 'pow_var_const_bwd')(x[0], gy[0], value) return gx, def pow(self, rhs): # lhs ** rhs """Element-wise power function. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return PowVarVar()(self, rhs) _check_constant_type(rhs) return PowVarConst(rhs)(self) class PowConstVar(function.Function): def __init__(self, value): self.value = value @property def label(self): return '%s ** _' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) type_check.expect(in_types[0].dtype.kind == 'f') def forward(self, x): value = _preprocess_const(x[0], self.value) self.y = utils.force_array(value ** x[0]) return self.y, def backward_cpu(self, x, gy): # TODO(beam2d): Make it not use the input value = _preprocess_const(x[0], self.value) return utils.force_array( numpy.log(value, dtype=x[0].dtype) * self.y * gy[0]), def backward_gpu(self, x, gy): value = _preprocess_const(x[0], self.value) gx = cuda.elementwise( 'T x, T gy, T value', 'T gx', 'gx = log(value) * pow(value, x) * gy', 'pow_const_var_bwd')(x[0], gy[0], value) return gx, def rpow(self, rhs): # rhs ** lhs """Element-wise power function. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return PowVarVar()(rhs, self) _check_constant_type(rhs) return PowConstVar(rhs)(self) class MatMulVarVar(_matmul.MatMul): @property def label(self): return '_ @ _' class MatMulVarConst(function.Function): def __init__(self, value): self.value = value @property def label(self): return '_ @ %s' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) a_type = in_types[0] b_type = self.value type_check.expect(a_type.dtype.kind == 'f') _matmul._check_ndim(a_type) a_idx = _matmul._get_check_index(False, False) b_idx = _matmul._get_check_index(False, True) type_check.expect( a_type.shape[a_idx] == b_type.shape[b_idx] ) def forward(self, x): self.retain_inputs(()) self._x_shape = x[0].shape return _matmul._matmul(x[0], self.value), def backward(self, x, gy): gx0 = _matmul._matmul( gy[0], self.value, transb=True, transout=False ).reshape(self._x_shape) return gx0, class MatMulConstVar(function.Function): def __init__(self, value): self.value = value @property def label(self): return '%s @ _' % _convert_value_to_string(self.value) def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) a_type = self.value b_type = in_types[0] type_check.expect(b_type.dtype.kind == 'f') _matmul._check_ndim(b_type) a_idx = _matmul._get_check_index(False, False) b_idx = _matmul._get_check_index(False, True) type_check.expect( a_type.shape[a_idx] == b_type.shape[b_idx] ) def forward(self, x): self.retain_inputs(()) self._x_shape = x[0].shape return _matmul._matmul(self.value, x[0]), def backward(self, x, gy): gx1 = _matmul._matmul( self.value, gy[0], transa=True, transout=False ).reshape(self._x_shape) return gx1, def matmul(self, rhs): # lhs @ rhs """Matrix multiplication. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return MatMulVarVar()(self, rhs) _check_constant_type(rhs) return MatMulVarConst(rhs)(self) def rmatmul(self, rhs): # rhs @ lhs """Matrix multiplication. Returns: ~chainer.Variable: Output variable. """ if isinstance(rhs, variable.Variable): return MatMulVarVar()(rhs, self) _check_constant_type(rhs) return MatMulConstVar(rhs)(self) def install_variable_arithmetics(): variable.Variable.__neg__ = neg variable.Variable.__abs__ = absolute variable.Variable.__add__ = add variable.Variable.__radd__ = add variable.Variable.__sub__ = sub variable.Variable.__rsub__ = rsub variable.Variable.__mul__ = mul variable.Variable.__rmul__ = mul variable.Variable.__div__ = div variable.Variable.__truediv__ = div variable.Variable.__rdiv__ = rdiv variable.Variable.__rtruediv__ = rdiv variable.Variable.__pow__ = pow variable.Variable.__rpow__ = rpow variable.Variable.__matmul__ = matmul variable.Variable.__rmatmul__ = rmatmul
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Bitcoin Core developers # Copyright (c) 2015-2016 The Bitcoin Unlimited developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test BIP68 implementation # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.script import * from test_framework.mininode import * from test_framework.blocktools import * COIN = 100000000 SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31) SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height) SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "64: non-BIP68-final" class BIP68Test(BitcoinTestFramework): def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"])) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"])) self.is_network_split = False self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] connect_nodes(self.nodes[0], 1) def run_test(self): # Generate some coins self.nodes[0].generate(110) print "Running test disable flag" self.test_disable_flag() print "Running test sequence-lock-confirmed-inputs" self.test_sequence_lock_confirmed_inputs() print "Running test sequence-lock-unconfirmed-inputs" self.test_sequence_lock_unconfirmed_inputs() print "Running test BIP68 not consensus before versionbits activation" self.test_bip68_not_consensus() print "Verifying nVersion=2 transactions aren't standard" self.test_version2_relay(before_activation=True) print "Activating BIP68 (and 112/113)" self.activateCSV() print "Verifying nVersion=2 transactions are now standard" self.test_version2_relay(before_activation=False) print "Passed\n" # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC utxos = self.nodes[0].listunspent(0, 0) assert(len(utxos) > 0) utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))] tx2.rehash() try: self.nodes[0].sendrawtransaction(ToHex(tx2)) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(False) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in xrange(num_outputs): outputs[addresses[i]] = random.randint(1, 20)*0.01 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for i in xrange(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in xrange(num_inputs): sequence_value = 0xfffffffe # this disables sequence locks # 50% chance we enable sequence locks if random.randint(0,1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1,10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 spec. orig_time = self.get_median_time_past(utxos[j]["confirmations"]) cur_time = self.get_median_time_past(0) # MTP of the tip # can only timelock this input if it's not too old -- otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time lock if random.randint(0,1) and can_time_lock: # Find first time-lock value that fails, or latest one that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"]*COIN # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] try: self.nodes[0].sendrawtransaction(rawtx) except JSONRPCException as exp: assert(not should_pass and using_sequence_locks) assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(should_pass or not using_sequence_locks) # Recalculate utxos if we successfully sent the transaction utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))] tx.rehash() try: node.sendrawtransaction(ToHex(tx)) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) assert(orig_tx.hash in node.getrawmempool()) else: # orig_tx must not be in mempool assert(orig_tx.hash not in node.getrawmempool()) return tx test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN)) cur_time = int(time.time()) for i in xrange(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert(tx2.hash in self.nodes[0].getrawmempool()) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time+600) self.nodes[0].generate(1) assert(tx2.hash not in self.nodes[0].getrawmempool()) # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False) assert(tx3.hash in self.nodes[0].getrawmempool()) self.nodes[0].generate(1) assert(tx3.hash not in self.nodes[0].getrawmempool()) # One more test, this time using height locks tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True) assert(tx4.hash in self.nodes[0].getrawmempool()) # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True) assert(tx5.hash not in self.nodes[0].getrawmempool()) utxos = self.nodes[0].listunspent() tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] try: self.nodes[0].sendrawtransaction(raw_tx5) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(False) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert(tx4.hash not in self.nodes[0].getrawmempool()) assert(tx3.hash in self.nodes[0].getrawmempool()) # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16) height = self.nodes[0].getblockcount() for i in xrange(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert(tx3.hash not in mempool) assert(tx2.hash in mempool) # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1)) self.nodes[0].generate(10) # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active') txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 sequence_value = 100 # 100 block relative locktime tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] tx3.rehash() try: self.nodes[0].sendrawtransaction(ToHex(tx3)) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(False) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1)) block.nVersion = 3 block.vtx.extend([tx1, tx2, tx3]) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].submitblock(ToHex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 432 (3 periods) min_activation_height = 432 height = self.nodes[0].getblockcount() assert(height < 432) self.nodes[0].generate(432-height) assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active') sync_blocks(self.nodes) # Use self.nodes[1] to test standardness relay policy def test_version2_relay(self, before_activation): inputs = [ ] outputs = { self.nodes[1].getnewaddress() : 1.0 } rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] try: tx_id = self.nodes[1].sendrawtransaction(tx_signed) assert(before_activation == False) except: assert(before_activation) if __name__ == '__main__': BIP68Test().main()
import json import re import itertools from .common import InfoExtractor from ..utils import ( compat_urllib_parse, compat_urllib_request, clean_html, get_element_by_attribute, ExtractorError, std_headers, ) class VimeoIE(InfoExtractor): """Information extractor for vimeo.com.""" # _VALID_URL matches Vimeo URLs _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)(?:[?].*)?$' _NETRC_MACHINE = 'vimeo' IE_NAME = u'vimeo' _TESTS = [ { u'url': u'http://vimeo.com/56015672', u'file': u'56015672.mp4', u'md5': u'8879b6cc097e987f02484baf890129e5', u'info_dict': { u"upload_date": u"20121220", u"description": u"This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", u"uploader_id": u"user7108434", u"uploader": u"Filippo Valsorda", u"title": u"youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", }, }, { u'url': u'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876', u'file': u'68093876.mp4', u'md5': u'3b5ca6aa22b60dfeeadf50b72e44ed82', u'note': u'Vimeo Pro video (#1197)', u'info_dict': { u'uploader_id': u'openstreetmapus', u'uploader': u'OpenStreetMap US', u'title': u'Andy Allan - Putting the Carto into OpenStreetMap Cartography', }, }, { u'url': u'http://player.vimeo.com/video/54469442', u'file': u'54469442.mp4', u'md5': u'619b811a4417aa4abe78dc653becf511', u'note': u'Videos that embed the url in the player page', u'info_dict': { u'title': u'Kathy Sierra: Building the minimum Badass User, Business of Software', u'uploader': u'The BLN & Business of Software', }, }, ] def _login(self): (username, password) = self._get_login_info() if username is None: return self.report_login() login_url = 'https://vimeo.com/log_in' webpage = self._download_webpage(login_url, None, False) token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1) data = compat_urllib_parse.urlencode({'email': username, 'password': password, 'action': 'login', 'service': 'vimeo', 'token': token, }) login_request = compat_urllib_request.Request(login_url, data) login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_request.add_header('Cookie', 'xsrft=%s' % token) self._download_webpage(login_request, None, False, u'Wrong login info') def _verify_video_password(self, url, video_id, webpage): password = self._downloader.params.get('videopassword', None) if password is None: raise ExtractorError(u'This video is protected by a password, use the --video-password option') token = re.search(r'xsrft: \'(.*?)\'', webpage).group(1) data = compat_urllib_parse.urlencode({'password': password, 'token': token}) # I didn't manage to use the password with https if url.startswith('https'): pass_url = url.replace('https','http') else: pass_url = url password_request = compat_urllib_request.Request(pass_url+'/password', data) password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') password_request.add_header('Cookie', 'xsrft=%s' % token) self._download_webpage(password_request, video_id, u'Verifying the password', u'Wrong password') def _real_initialize(self): self._login() def _real_extract(self, url, new_video=True): # Extract ID from URL mobj = re.match(self._VALID_URL, url) if mobj is None: raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('id') if not mobj.group('proto'): url = 'https://' + url elif mobj.group('pro'): url = 'http://player.vimeo.com/video/' + video_id elif mobj.group('direct_link'): url = 'https://vimeo.com/' + video_id # Retrieve video webpage to extract further information request = compat_urllib_request.Request(url, None, std_headers) webpage = self._download_webpage(request, video_id) # Now we begin extracting as much information as we can from what we # retrieved. First we extract the information common to all extractors, # and latter we extract those that are Vimeo specific. self.report_extraction(video_id) # Extract the config JSON try: config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'], webpage, u'info section', flags=re.DOTALL) config = json.loads(config) except: if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage): raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option') if re.search('If so please provide the correct password.', webpage): self._verify_video_password(url, video_id, webpage) return self._real_extract(url) else: raise ExtractorError(u'Unable to extract info section') # Extract title video_title = config["video"]["title"] # Extract uploader and uploader_id video_uploader = config["video"]["owner"]["name"] video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None # Extract video thumbnail video_thumbnail = config["video"].get("thumbnail") if video_thumbnail is None: _, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in config["video"]["thumbs"].items())[-1] # Extract video description video_description = None try: video_description = get_element_by_attribute("itemprop", "description", webpage) if video_description: video_description = clean_html(video_description) except AssertionError as err: # On some pages like (http://player.vimeo.com/video/54469442) the # html tags are not closed, python 2.6 cannot handle it if err.args[0] == 'we should not get here!': pass else: raise # Extract upload date video_upload_date = None mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage) if mobj is not None: video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) # Vimeo specific: extract request signature and timestamp sig = config['request']['signature'] timestamp = config['request']['timestamp'] # Vimeo specific: extract video codec and quality information # First consider quality, then codecs, then take everything # TODO bind to format param codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] files = { 'hd': [], 'sd': [], 'other': []} config_files = config["video"].get("files") or config["request"].get("files") for codec_name, codec_extension in codecs: if codec_name in config_files: if 'hd' in config_files[codec_name]: files['hd'].append((codec_name, codec_extension, 'hd')) elif 'sd' in config_files[codec_name]: files['sd'].append((codec_name, codec_extension, 'sd')) else: files['other'].append((codec_name, codec_extension, config_files[codec_name][0])) for quality in ('hd', 'sd', 'other'): if len(files[quality]) > 0: video_quality = files[quality][0][2] video_codec = files[quality][0][0] video_extension = files[quality][0][1] self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) break else: raise ExtractorError(u'No known codec found') video_url = None if isinstance(config_files[video_codec], dict): video_url = config_files[video_codec][video_quality].get("url") if video_url is None: video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ %(video_id, sig, timestamp, video_quality, video_codec.upper()) return [{ 'id': video_id, 'url': video_url, 'uploader': video_uploader, 'uploader_id': video_uploader_id, 'upload_date': video_upload_date, 'title': video_title, 'ext': video_extension, 'thumbnail': video_thumbnail, 'description': video_description, }] class VimeoChannelIE(InfoExtractor): IE_NAME = u'vimeo:channel' _VALID_URL = r'(?:https?://)?vimeo.\com/channels/(?P<id>[^/]+)' _MORE_PAGES_INDICATOR = r'<a.+?rel="next"' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) channel_id = mobj.group('id') video_ids = [] for pagenum in itertools.count(1): webpage = self._download_webpage('http://vimeo.com/channels/%s/videos/page:%d' % (channel_id, pagenum), channel_id, u'Downloading page %s' % pagenum) video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage)) if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: break entries = [self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo') for video_id in video_ids] channel_title = self._html_search_regex(r'<a href="/channels/%s">(.*?)</a>' % channel_id, webpage, u'channel title') return {'_type': 'playlist', 'id': channel_id, 'title': channel_title, 'entries': entries, }
""" Copyright (c) 2011-2012 Fredrik Ehnbom This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. """ from common import Worker, complete_path, expand_path, get_setting, get_path_setting,\ get_language, LockedVariable, run_in_main_thread, error_message,\ display_user_selection, get_cpu_count, status_message from clang import cindex import time import shlex import subprocess from ctypes import cdll, Structure, POINTER, c_char_p, c_void_p, c_uint, c_bool from parsehelp.parsehelp import * import re import os import Queue import threading scriptpath = os.path.dirname(os.path.abspath(__file__)) def get_cache_library(): import platform name = platform.system() if name == 'Darwin': return cdll.LoadLibrary('libcache.dylib') elif name == 'Windows': if cindex.isWin64: return cdll.LoadLibrary("libcache_x64.dll") return cdll.LoadLibrary('libcache.dll') else: try: # Try loading with absolute path first return cdll.LoadLibrary('%s/libcache.so' % scriptpath) except: try: # See if there's one in the system path return cdll.LoadLibrary("libcache.so") except: import traceback traceback.print_exc() error_message("""\ It looks like libcache.so couldn't be loaded. On Linux you have to \ compile it yourself. See http://github.com/quarnster/SublimeClang for more details. """) class CacheEntry(Structure): _fields_ = [("cursor", cindex.Cursor), ("insert", c_char_p), ("display", c_char_p), ("access", c_uint), ("static", c_bool), ("baseclass", c_bool)] class _Cache(Structure): def __del__(self): _deleteCache(self) class CacheCompletionResults(Structure): @property def length(self): return self.__len__() def __len__(self): return completionResults_length(self) def __getitem__(self, key): if key >= self.length: raise IndexError return completionResults_getEntry(self, key)[0] def __del__(self): completionResults_dispose(self) cachelib = get_cache_library() try: import json _getVersion = cachelib.getVersion _getVersion.restype = c_char_p f = open("%s/package.json" % scriptpath) data = json.load(f) f.close() json = data["packages"][0]["platforms"]["*"][0]["version"] lib = _getVersion() print "Have SublimeClang package: %s" % json print "Have SublimeClang libcache: %s" % lib assert lib == json except: import traceback traceback.print_exc() error_message("Your SublimeClang libcache is out of date. Try restarting ST2 and if that fails, uninstall SublimeClang, restart ST2 and install it again.") _createCache = cachelib.createCache _createCache.restype = POINTER(_Cache) _createCache.argtypes = [cindex.Cursor] _deleteCache = cachelib.deleteCache _deleteCache.argtypes = [POINTER(_Cache)] cache_completeNamespace = cachelib.cache_completeNamespace cache_completeNamespace.argtypes = [POINTER(_Cache), POINTER(c_char_p), c_uint] cache_completeNamespace.restype = POINTER(CacheCompletionResults) cache_complete_startswith = cachelib.cache_complete_startswith cache_complete_startswith.argtypes = [POINTER(_Cache), c_char_p] cache_complete_startswith.restype = POINTER(CacheCompletionResults) completionResults_length = cachelib.completionResults_length completionResults_length.argtypes = [POINTER(CacheCompletionResults)] completionResults_length.restype = c_uint completionResults_getEntry = cachelib.completionResults_getEntry completionResults_getEntry.argtypes = [POINTER(CacheCompletionResults)] completionResults_getEntry.restype = POINTER(CacheEntry) completionResults_dispose = cachelib.completionResults_dispose completionResults_dispose.argtypes = [POINTER(CacheCompletionResults)] cache_findType = cachelib.cache_findType cache_findType.argtypes = [POINTER(_Cache), POINTER(c_char_p), c_uint, c_char_p] cache_findType.restype = cindex.Cursor cache_completeCursor = cachelib.cache_completeCursor cache_completeCursor.argtypes = [POINTER(_Cache), cindex.Cursor] cache_completeCursor.restype = POINTER(CacheCompletionResults) cache_clangComplete = cachelib.cache_clangComplete cache_clangComplete.argtypes = [POINTER(_Cache), c_char_p, c_uint, c_uint, POINTER(cindex._CXUnsavedFile), c_uint, c_bool] cache_clangComplete.restype = POINTER(CacheCompletionResults) def remove_duplicates(data): if data == None: return None seen = {} ret = [] for d in data: if d in seen: continue seen[d] = 1 ret.append(d) return ret class Cache: def __init__(self, tu, filename): self.cache = _createCache(tu.cursor)[0] assert self.cache != None self.tu = tu self.filename = filename def __del__(self): self.tu = None self.cache = None def get_native_namespace(self, namespace): nsarg = (c_char_p*len(namespace))() for i in range(len(namespace)): nsarg[i] = namespace[i] return nsarg def complete_namespace(self, namespace): ret = None if len(namespace): nsarg = self.get_native_namespace(namespace) comp = cache_completeNamespace(self.cache, nsarg, len(nsarg)) if comp: ret = [(x.display, x.insert) for x in comp[0]] return ret def get_namespace_from_cursor(self, cursor): namespace = [] while cursor != None and cursor.kind == cindex.CursorKind.NAMESPACE: namespace.insert(0, cursor.displayname) cursor = cursor.get_lexical_parent() return namespace def find_type(self, data, typename): extra = None idx = typename.rfind("::") if idx != -1: extra = typename[:idx] typename = typename[idx+2:] if "<" in typename: typename = typename[:typename.find("<")] namespaces = extract_used_namespaces(data) namespaces.insert(0, None) namespaces.insert(1, extract_namespace(data)) cursor = None for ns in namespaces: nsarg = None nslen = 0 if extra: if ns: ns = ns + "::" + extra else: ns = extra if ns: nsarg = self.get_native_namespace(ns.split("::")) nslen = len(nsarg) cursor = cache_findType(self.cache, nsarg, nslen, typename) if cursor != None and not cursor.kind.is_invalid(): if cursor.kind.is_reference(): cursor = cursor.get_referenced() break if (cursor != None and not cursor.kind.is_invalid()) or idx == -1: return cursor # Maybe it's a subtype? parent = self.find_type(data, extra) if parent != None and not parent.kind.is_invalid(): for child in parent.get_children(): if child.kind.is_declaration() and child.spelling == typename: return child return None def solve_template_from_cursor(self, temp, member, template): found = False children = [] for child in member.get_children(): if not found: ref = child.get_reference() if ref != None and ref == temp: found = True continue if child.kind == cindex.CursorKind.TEMPLATE_REF: # Don't support nested templates for now children = [] break elif child.kind == cindex.CursorKind.TYPE_REF: children.append((child.get_resolved_cursor(), None)) return temp, children def solve_member(self, data, typecursor, member, template): temp = None pointer = 0 if member != None and not member.kind.is_invalid(): temp = member.get_returned_cursor() pointer = member.get_returned_pointer_level() if temp != None and not temp.kind.is_invalid(): if temp.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER: off = 0 for child in typecursor.get_children(): if child.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER: if child == temp: break off += 1 if template[1] and off < len(template[1]): template = template[1][off] if isinstance(template[0], cindex.Cursor): temp = template[0] else: temp = self.find_type(data, template[0]) elif temp.kind == cindex.CursorKind.CLASS_TEMPLATE: template = self.solve_template_from_cursor(temp, member, template) return temp, template, pointer def inherits(self, parent, child): if child == None or child.kind.is_invalid(): return False if parent == child: return True for c in child.get_children(): if c.kind == cindex.CursorKind.CXX_BASE_SPECIFIER: for c2 in c.get_children(): if c2.kind == cindex.CursorKind.TYPE_REF: c2 = c2.get_reference() return self.inherits(parent, c2) return False def filter(self, ret, constr=False): if ret == None: return None if constr: match = "\t(namespace|constructor|class|typedef|struct)$" else: match = "\t(?!constructor)[^\t]+$" regex = re.compile(match) ret2 = [] constrs = [] for display, insert in ret: if not regex.search(display): continue if constr and display.endswith("constructor"): constrs.append(display[:display.find("(")]) ret2.append((display, insert)) if constr: for name in constrs: regex = re.compile(r"%s\t(class|typedef|struct)$" % name) ret2 = filter(lambda a: not regex.search(a[0]), ret2) return ret2 def complete(self, data, prefix): line = extract_line_at_offset(data, len(data)-1) before = line if len(prefix) > 0: before = line[:-len(prefix)] ret = None if re.search(r"::$", before): constr = re.search(r"(\W|^)new\s+(\w+::)+$", before) != None ret = [] match = re.search(r"([^\(\s,]+::)+$", before) if match == None: ret = None cached_results = cache_complete_startswith(self.cache, prefix) if cached_results: ret = [] for x in cached_results[0]: if x.cursor.kind != cindex.CursorKind.MACRO_DEFINITION and \ x.cursor.kind != cindex.CursorKind.CXX_METHOD: ret.append((x.display, x.insert)) return ret before = match.group(1) namespace = before.split("::") namespace.pop() # the last item is going to be "prefix" ret = self.complete_namespace(namespace) if len(ret) == 0: typename = "::".join(namespace) c = self.find_type(data, typename) if c != None: if c.kind == cindex.CursorKind.ENUM_DECL: # It's not valid to complete enum:: c = None if c != None and not c.kind.is_invalid() and c.kind != cindex.CursorKind.NAMESPACE: # It's going to be a declaration of some kind, so # get the returned cursor c = c.get_returned_cursor() if c != None and c.kind == cindex.CursorKind.TYPEDEF_DECL: # Too complex typedef to be able to complete, fall back to slow completions c = None ret = None if c != None and not c.kind.is_invalid(): if c.kind == cindex.CursorKind.NAMESPACE: namespace = self.get_namespace_from_cursor(c) return self.complete_namespace(namespace) comp = cache_completeCursor(self.cache, c) if comp: inherits = False clazz = extract_class_from_function(data) if clazz == None: clazz = extract_class(data) if clazz != None: c2 = self.find_type(data, clazz) inherits = self.inherits(c, c2) selfcompletion = clazz == c.spelling for c in comp[0]: if (selfcompletion and not c.baseclass) or \ (inherits and not c.access == cindex.CXXAccessSpecifier.PRIVATE) or \ (c.access == cindex.CXXAccessSpecifier.PUBLIC and \ ( c.static or \ c.cursor.kind == cindex.CursorKind.TYPEDEF_DECL or \ c.cursor.kind == cindex.CursorKind.CLASS_DECL or \ c.cursor.kind == cindex.CursorKind.STRUCT_DECL or \ c.cursor.kind == cindex.CursorKind.ENUM_CONSTANT_DECL or \ c.cursor.kind == cindex.CursorKind.ENUM_DECL)): ret.append((c.display, c.insert)) ret = self.filter(ret, constr) return ret elif re.search(r"(\w+\]+\s+$|\[[\w\.\-\>]+\s+$|([^ \t]+)(\.|\->)$)", before): comp = data if len(prefix) > 0: comp = data[:-len(prefix)] typedef = get_type_definition(comp) if typedef == None: return None line, column, typename, var, tocomplete = typedef if typename == None: return None cursor = None template = solve_template(get_base_type(typename)) pointer = get_pointer_level(typename) if var == "this": pointer = 1 if var != None: if line > 0 and column > 0: cursor = cindex.Cursor.get(self.tu, self.filename, line, column) if cursor == None or cursor.kind.is_invalid() or cursor.spelling != var: cursor = self.find_type(data, template[0]) else: pointer = 0 # get the pointer level from the cursor instead if cursor != None and not cursor.kind.is_invalid() and \ cursor.spelling == typename and \ cursor.kind == cindex.CursorKind.VAR_DECL: # We're trying to use a variable as a type.. This isn't valid cursor = None ret = [] if cursor != None and not cursor.kind.is_invalid(): # It's going to be a declaration of some kind, so # get the returned cursor pointer += cursor.get_returned_pointer_level() cursor = cursor.get_returned_cursor() if cursor == None: ret = [] else: # Probably a member of the current class clazz = extract_class_from_function(data) if clazz == None: clazz = extract_class(data) if clazz != None: cursor = self.find_type(data, clazz) if cursor != None and not cursor.kind.is_invalid(): func = False if typename.endswith("()"): func = True typename = typename[:-2] member = cursor.get_member(typename, func) cursor, template, pointer = self.solve_member(data, cursor, member, template) if member != None and (cursor == None or cursor.kind.is_invalid()): ret = [] if cursor == None or cursor.kind.is_invalid(): # Is it by any chance a struct variable or an ObjC class? cursor = self.find_type(data, template[0]) if cursor == None or cursor.kind.is_invalid() or \ cursor.spelling != typename or \ (not tocomplete.startswith("::") and \ cursor.kind != cindex.CursorKind.VAR_DECL and \ cursor.kind != cindex.CursorKind.OBJC_INTERFACE_DECL) or \ (tocomplete.startswith("::") and \ not (cursor.kind == cindex.CursorKind.CLASS_DECL or \ cursor.kind == cindex.CursorKind.STRUCT_DECL or \ cursor.kind == cindex.CursorKind.OBJC_INTERFACE_DECL or \ cursor.kind == cindex.CursorKind.CLASS_TEMPLATE)): cursor = None if cursor != None and not cursor.kind.is_invalid(): # It's going to be a declaration of some kind, so # get the returned cursor pointer = cursor.get_returned_pointer_level() cursor = cursor.get_returned_cursor() if cursor == None: ret = [] if cursor == None or cursor.kind.is_invalid(): # Is it a non-member function? func = False if typename.endswith("()"): func = True typename = typename[:-2] cached_results = cache_complete_startswith(self.cache, typename) if cached_results: for x in cached_results[0]: if x.cursor.spelling == typename: if x.cursor.kind == cindex.CursorKind.VAR_DECL or \ x.cursor.kind == cindex.CursorKind.FUNCTION_DECL: cursor = x.cursor pointer = cursor.get_returned_pointer_level() cursor = cursor.get_returned_cursor() if cursor == None: ret = [] break if cursor != None and not cursor.kind.is_invalid(): r = cursor m2 = None count = 0 while len(tocomplete) and count < 10: if r == None or \ not (r.kind == cindex.CursorKind.CLASS_DECL or \ r.kind == cindex.CursorKind.STRUCT_DECL or \ r.kind == cindex.CursorKind.UNION_DECL or \ r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL or \ r.kind == cindex.CursorKind.CLASS_TEMPLATE): if r != None and not (r.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER or \ (r.kind == cindex.CursorKind.TYPEDEF_DECL and len(r.get_children()))): ret = [] r = None break count += 1 match = re.search(r"^([^\.\-\(:\[\]]+)?(\[\]|\(|\.|->|::)(.*)", tocomplete) if match == None: # probably Objective C code match = re.search(r"^(\S+)?(\s+)(.*)", tocomplete) if match == None: break if r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL: pointer = 0 tocomplete = match.group(3) count = 1 function = False if match.group(2) == "(": function = True tocomplete = tocomplete[1:] left = re.match(r"(\.|\->|::)?(.*)", tocomplete) tocomplete = left.group(2) if left.group(1) != None: tocomplete = left.group(1) + tocomplete nextm2 = match.group(2) if match.group(1) == None and pointer == 0 and r.kind != cindex.CursorKind.OBJC_INTERFACE_DECL: if match.group(2) == "->": comp = r.get_member("operator->", True) r, template, pointer = self.solve_member(data, r, comp, template) if pointer > 0: pointer -= 1 if comp == None or comp.kind.is_invalid(): ret = [] elif match.group(2) == "[]": # TODO: different index types? comp = r.get_member("operator[]", True) r, template, pointer = self.solve_member(data, r, comp, template) if comp == None or comp.kind.is_invalid(): ret = [] elif match.group(1) == None and pointer > 0: if (nextm2 == "->" or nextm2 == "[]"): pointer -= 1 elif nextm2 == ".": # Trying to dot-complete a pointer, this is invalid # so there can be no completions ret = [] r = None break if match.group(1): member = match.group(1) if "[" in member: member = get_base_type(member) if "]" in member: member = member[:member.find("]")] if m2 == " ": function = True member = r.get_member(member, function) r, template, pointer = self.solve_member(data, r, member, template) if r == None and member != None: # This can't be completed as a cursor object isn't returned # from this member ret = [] if match.group(2) != "(": tocomplete = match.group(2) + tocomplete m2 = nextm2 if r != None and not r.kind.is_invalid() and (pointer == 0 or r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL): clazz = extract_class_from_function(data) if clazz == None: clazz = extract_class(data) selfcompletion = clazz == r.spelling comp = cache_completeCursor(self.cache, r) replaces = [] if template[1] != None: tempnames = [] for child in r.get_children(): if child.kind == cindex.CursorKind.TEMPLATE_TYPE_PARAMETER: tempnames.append(child.spelling) count = min(len(template[1]), len(tempnames)) for i in range(count): s = template[1][i][0] if isinstance(s, cindex.Cursor): s = s.spelling replaces.append((r"(^|,|\(|\d:|\s+)(%s)($|,|\s+|\))" % tempnames[i], r"\1%s\3" % s)) if comp: ret = [] if r.kind == cindex.CursorKind.OBJC_INTERFACE_DECL: isStatic = var == None if m2 == ".": for c in comp[0]: add = True if c.cursor.kind == cindex.CursorKind.OBJC_IVAR_DECL: continue for child in c.cursor.get_children(): if child.kind == cindex.CursorKind.PARM_DECL: add = False break if add: ret.append((c.display, c.insert)) elif m2 == "->": for c in comp[0]: if c.cursor.kind != cindex.CursorKind.OBJC_IVAR_DECL: continue ret.append((c.display, c.insert)) else: for c in comp[0]: if c.static == isStatic and c.cursor.kind != cindex.CursorKind.OBJC_IVAR_DECL: ret.append((c.display, c.insert)) else: for c in comp[0]: if not c.static and c.cursor.kind != cindex.CursorKind.ENUM_CONSTANT_DECL and \ c.cursor.kind != cindex.CursorKind.ENUM_DECL and \ c.cursor.kind != cindex.CursorKind.TYPEDEF_DECL and \ c.cursor.kind != cindex.CursorKind.CLASS_DECL and \ c.cursor.kind != cindex.CursorKind.STRUCT_DECL and \ c.cursor.kind != cindex.CursorKind.CLASS_TEMPLATE and \ (c.access == cindex.CXXAccessSpecifier.PUBLIC or \ (selfcompletion and not (c.baseclass and c.access == cindex.CXXAccessSpecifier.PRIVATE))): disp = c.display ins = c.insert for r in replaces: disp = re.sub(r[0], r[1], disp) ins = re.sub(r[0], r[1], ins) add = (disp, ins) ret.append(add) ret = self.filter(ret) return remove_duplicates(ret) else: constr = re.search(r"(^|\W)new\s+$", before) != None cached_results = cache_complete_startswith(self.cache, prefix) if cached_results: ret = [(x.display, x.insert) for x in cached_results[0]] variables = extract_variables(data) if not constr else [] var = [("%s\t%s" % (v[1], re.sub(r"(^|\b)\s*static\s+", "", v[0])), v[1]) for v in variables] if len(var) and ret == None: ret = [] for v in var: if v[1].startswith(prefix): ret.append(v) clazz = extract_class_from_function(data) if clazz == None: clazz = extract_class(data) if clazz != None: c = self.find_type(data, clazz) if c != None and not c.kind.is_invalid(): comp = cache_completeCursor(self.cache, c) if comp: for c in comp[0]: if not c.static and \ not (c.baseclass and c.access == cindex.CXXAccessSpecifier.PRIVATE): add = (c.display, c.insert) ret.append(add) namespaces = extract_used_namespaces(data) ns = extract_namespace(data) if ns: namespaces.append(ns) for ns in namespaces: ns = ns.split("::") add = self.complete_namespace(ns) if add: ret.extend(add) ret = self.filter(ret, constr) return remove_duplicates(ret) def clangcomplete(self, filename, row, col, unsaved_files, membercomp): ret = None unsaved = None if len(unsaved_files): unsaved = (cindex._CXUnsavedFile * len(unsaved_files))() for i, (name, value) in enumerate(unsaved_files): if not isinstance(value, str): value = value.encode("ascii", "ignore") unsaved[i].name = name unsaved[i].contents = value unsaved[i].length = len(value) comp = cache_clangComplete(self.cache, filename, row, col, unsaved, len(unsaved_files), membercomp) if comp: ret = [(c.display, c.insert) for c in comp[0]] return ret def format_cursor(cursor): return "%s:%d:%d" % (cursor.location.file.name, cursor.location.line, cursor.location.column) def get_cursor_spelling(cursor): cursor_spelling = None if cursor != None: cursor_spelling = cursor.spelling or cursor.displayname cursor_spelling = re.sub(r"^(enum\s+|(class|struct)\s+(\w+::)*)", "", cursor_spelling) return cursor_spelling searchcache = {} class ExtensiveSearch: def quickpanel_extensive_search(self, idx): if idx == 0: for cpu in range(get_cpu_count()): t = threading.Thread(target=self.worker) t.start() self.queue.put((0, "*/+")) elif len(self.options) > 2: self.found_callback(self.options[idx][1]) def __init__(self, cursor, spelling, found_callback, folders, opts, opts_script, name="", impl=True, search_re=None, file_re=None): self.name = name if impl: self.re = re.compile(r"\w+[\*&\s]+(?:\w+::)?(%s\s*\([^;\{]*\))(?=\s*\{)" % re.escape(spelling)) self.impre = re.compile(r"(\.cpp|\.c|\.cc|\.m|\.mm)$") else: self.re = re.compile(r"\w+[\*&\s]+(?:\w+::)?(%s\s*\([^;\{]*\))(?=\s*;)" % re.escape(spelling)) self.impre = re.compile(r"(\.h|\.hpp)$") if search_re != None: self.re = search_re if file_re != None: self.impre = file_re self.spelling = spelling self.folders = folders self.opts = opts self.opts_script = opts_script self.impl = impl self.target = "" self.cursor = None if cursor: self.cursor = format_cursor(cursor) self.queue = Queue.PriorityQueue() self.candidates = Queue.Queue() self.lock = threading.RLock() self.timer = None self.status_count = 0 self.found_callback = found_callback self.options = [["Yes", "Do extensive search"], ["No", "Don't do extensive search"]] k = self.key() if k in searchcache: self.options = [["Redo search", "Redo extensive search"], ["Don't redo", "Don't redo extensive search"]] targets = searchcache[k] if isinstance(targets, str): # An exact match is known, we're done here found_callback(targets) return elif targets != None: self.options.extend(targets) display_user_selection(self.options, self.quickpanel_extensive_search) def key(self): return str((self.cursor, self.spelling, self.impre.pattern, self.re.pattern, self.impl, str(self.folders))) def done(self): cache = None if len(self.target) > 0: cache = self.target elif not self.candidates.empty(): cache = [] while not self.candidates.empty(): name, function, line, column = self.candidates.get() pos = "%s:%d:%d" % (name, line, column) cache.append([function, pos]) self.candidates.task_done() searchcache[self.key()] = cache self.found_callback(cache) def do_message(self): try: self.lock.acquire() run_in_main_thread(lambda: status_message(self.status)) self.status_count = 0 self.timer = None finally: self.lock.release() def set_status(self, message): try: self.lock.acquire() self.status = message if self.timer: self.timer.cancel() self.timer = None self.status_count += 1 if self.status_count == 30: self.do_message() else: self.timer = threading.Timer(0.1, self.do_message) finally: self.lock.release() def worker(self): try: while len(self.target) == 0: prio, name = self.queue.get(timeout=60) if name == "*/+": run_in_main_thread(lambda: status_message("Searching for %s..." % ("implementation" if self.impl else "definition"))) name = os.path.basename(self.name) for folder in self.folders: for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: if self.impre.search(filename) != None: score = 1000 for i in range(min(len(filename), len(name))): if filename[i] == name[i]: score -= 1 else: break self.queue.put((score, os.path.join(dirpath, filename))) for i in range(get_cpu_count()-1): self.queue.put((1001, "*/+++")) self.queue.put((1010, "*/++")) self.queue.task_done() continue elif name == "*/++": run_in_main_thread(self.done) break elif name == "*/+++": self.queue.task_done() break remove = tuCache.get_status(name) == TranslationUnitCache.STATUS_NOT_IN_CACHE fine_search = not remove self.set_status("Searching %s" % name) # try a regex search first f = file(name, "r") data = f.read() f.close() fine_cands = [] for match in self.re.finditer(data): fine_search = True loc = match.start() for i in range(len(match.groups())+1): m = match.group(i) if self.spelling in m: loc = match.start(i) line, column = get_line_and_column_from_offset(data, loc) fine_cands.append((name, line, column)) self.candidates.put((name, match.group(0), line, column)) if fine_search and self.cursor and self.impl: tu2 = tuCache.get_translation_unit(name, self.opts, self.opts_script) if tu2 != None: tu2.lock() try: for cand in fine_cands: cursor2 = cindex.Cursor.get( tu2.var, cand[0], cand[1], cand[2]) if cursor2 != None: d = cursor2.get_canonical_cursor() if d != None and cursor2 != d: if format_cursor(d) == self.cursor: self.target = format_cursor(cursor2) run_in_main_thread(self.done) break finally: tu2.unlock() if remove: tuCache.remove(name) self.queue.task_done() except Queue.Empty as e: pass except: import traceback traceback.print_exc() class LockedTranslationUnit(LockedVariable): def __init__(self, var, fn): LockedVariable.__init__(self, var) self.cache = Cache(var, fn) self.fn = fn def quickpanel_format(self, cursor): return ["%s::%s" % (cursor.get_semantic_parent().spelling, cursor.displayname), format_cursor(cursor)] def get_impdef_prep(self, data, offset): row, col = get_line_and_column_from_offset(data, offset) cursor = cindex.Cursor.get(self.var, self.fn, row, col) cursor_spelling = get_cursor_spelling(cursor) word_under_cursor = extract_word_at_offset(data, offset) if word_under_cursor == "" and cursor != None: # Allow a parenthesis, brackets and some other non-name characters right after the name match = re.search(r"(\w+)[\(\[\&\+\-\*\/]*$", extract_line_until_offset(data, offset)) if match: word_under_cursor = match.group(1) return cursor, cursor_spelling, word_under_cursor def get_implementation(self, data, offset, found_callback, folders): target = None try: self.lock() self.var.reparse([(self.fn, data)]) cursor, cursor_spelling, word_under_cursor = self.get_impdef_prep(data, offset) if len(word_under_cursor) == 0: found_callback(None) return if cursor == None or cursor.kind.is_invalid() or cursor_spelling != word_under_cursor: if cursor == None or cursor.kind.is_invalid(): cursor = None ExtensiveSearch(cursor, word_under_cursor, found_callback, folders, self.opts, self.opts_script) return d = cursor.get_definition() if d != None and cursor != d: target = format_cursor(d) elif d != None and cursor == d and \ (cursor.kind == cindex.CursorKind.VAR_DECL or \ cursor.kind == cindex.CursorKind.PARM_DECL or \ cursor.kind == cindex.CursorKind.FIELD_DECL): for child in cursor.get_children(): if child.kind == cindex.CursorKind.TYPE_REF: d = child.get_definition() if d != None: target = format_cursor(d) break elif cursor.kind == cindex.CursorKind.CLASS_DECL: for child in cursor.get_children(): if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER: d = child.get_definition() if d != None: target = format_cursor(d) elif d == None: if cursor.kind == cindex.CursorKind.DECL_REF_EXPR or \ cursor.kind == cindex.CursorKind.MEMBER_REF_EXPR or \ cursor.kind == cindex.CursorKind.CALL_EXPR: cursor = cursor.get_reference() if cursor.kind == cindex.CursorKind.CXX_METHOD or \ cursor.kind == cindex.CursorKind.FUNCTION_DECL or \ cursor.kind == cindex.CursorKind.CONSTRUCTOR or \ cursor.kind == cindex.CursorKind.DESTRUCTOR: f = cursor.location.file.name if f.endswith(".h"): endings = ["cpp", "c", "cc", "m", "mm"] for ending in endings: f = "%s.%s" % (f[:f.rfind(".")], ending) if f != self.fn and os.access(f, os.R_OK): tu2 = tuCache.get_translation_unit(f, self.opts, self.opts_script) if tu2 == None: continue tu2.lock() try: cursor2 = cindex.Cursor.get( tu2.var, cursor.location.file.name, cursor.location.line, cursor.location.column) if cursor2 != None: d = cursor2.get_definition() if d != None and cursor2 != d: target = format_cursor(d) break finally: tu2.unlock() if not target: ExtensiveSearch(cursor, word_under_cursor, found_callback, folders, self.opts, self.opts_script) return else: target = format_cursor(d) finally: self.unlock() found_callback(target) def get_definition(self, data, offset, found_callback, folders): target = None try: self.lock() self.var.reparse([(self.fn, data)]) cursor, cursor_spelling, word_under_cursor = self.get_impdef_prep(data, offset) if len(word_under_cursor) == 0: found_callback(None) return ref = cursor.get_reference() target = None if ref != None: target = format_cursor(ref) elif cursor.kind == cindex.CursorKind.INCLUSION_DIRECTIVE: f = cursor.get_included_file() if not f is None: target = f.name finally: self.unlock() found_callback(target) class TranslationUnitCache(Worker): STATUS_PARSING = 1 STATUS_REPARSING = 2 STATUS_READY = 3 STATUS_NOT_IN_CACHE = 4 def __init__(self): workerthreadcount = get_setting("worker_threadcount", -1) self.as_super = super(TranslationUnitCache, self) self.as_super.__init__(workerthreadcount) self.translationUnits = LockedVariable({}) self.parsingList = LockedVariable([]) self.busyList = LockedVariable([]) self.index_parse_options = 13 self.index = None self.debug_options = False self.__options_cache = LockedVariable({}) def get_status(self, filename): tu = self.translationUnits.lock() pl = self.parsingList.lock() a = filename in tu b = filename in pl self.translationUnits.unlock() self.parsingList.unlock() if a and b: return TranslationUnitCache.STATUS_REPARSING elif a: return TranslationUnitCache.STATUS_READY elif b: return TranslationUnitCache.STATUS_PARSING else: return TranslationUnitCache.STATUS_NOT_IN_CACHE def display_status(self): if get_setting("parse_status_messages", True): self.as_super.display_status() def add_busy(self, filename, task, data): bl = self.busyList.lock() test = filename in bl if test: self.busyList.unlock() # Another thread is already doing something with # this file, so try again later if self.tasks.empty(): try: time.sleep(1) except: pass self.tasks.put((task, data)) return True else: bl.append(filename) self.busyList.unlock() return False def remove_busy(self, filename): bl = self.busyList.lock() try: bl.remove(filename) finally: self.busyList.unlock() def task_parse(self, data): filename, opts, opts_script, on_done = data if self.add_busy(filename, self.task_parse, data): return try: self.set_status("Parsing %s" % filename) self.get_translation_unit(filename, opts, opts_script) self.set_status("Parsing %s done" % filename) finally: l = self.parsingList.lock() try: l.remove(filename) finally: self.parsingList.unlock() self.remove_busy(filename) if on_done != None: run_in_main_thread(on_done) def task_reparse(self, data): filename, opts, opts_script, unsaved_files, on_done = data if self.add_busy(filename, self.task_reparse, data): return try: self.set_status("Reparsing %s" % filename) tu = self.get_translation_unit(filename, opts, opts_script, unsaved_files) if tu != None: tu.lock() try: tu.var.reparse(unsaved_files) tu.cache = Cache(tu.var, filename) self.set_status("Reparsing %s done" % filename) finally: tu.unlock() finally: l = self.parsingList.lock() try: l.remove(filename) finally: self.parsingList.unlock() self.remove_busy(filename) if on_done != None: run_in_main_thread(on_done) def task_clear(self, data): tus = self.translationUnits.lock() try: tus.clear() searchcache.clear() finally: self.translationUnits.unlock() cache = self.__options_cache.lock() try: cache.clear() finally: self.__options_cache.unlock() def task_remove(self, data): if self.add_busy(data, self.task_remove, data): return try: tus = self.translationUnits.lock() try: if data in tus: del tus[data] finally: self.translationUnits.unlock() cache = self.__options_cache.lock() try: if data in cache: del cache[data] finally: self.__options_cache.unlock() finally: self.remove_busy(data) def reparse(self, view, filename, unsaved_files=[], on_done=None): ret = False pl = self.parsingList.lock() try: if filename not in pl: ret = True pl.append(filename) self.tasks.put(( self.task_reparse, (filename, self.get_opts(view), self.get_opts_script(view), unsaved_files, on_done))) finally: self.parsingList.unlock() return ret def add_ex(self, filename, opts, opts_script, on_done=None): tu = self.translationUnits.lock() pl = self.parsingList.lock() try: if filename not in tu and filename not in pl: pl.append(filename) self.tasks.put(( self.task_parse, (filename, opts, opts_script, on_done))) finally: self.translationUnits.unlock() self.parsingList.unlock() def add(self, view, filename, on_done=None): ret = False tu = self.translationUnits.lock() pl = self.parsingList.lock() try: if filename not in tu and filename not in pl: ret = True opts = self.get_opts(view) opts_script = self.get_opts_script(view) pl.append(filename) self.tasks.put(( self.task_parse, (filename, opts, opts_script, on_done))) finally: self.translationUnits.unlock() self.parsingList.unlock() return ret def get_opts_script(self, view): return expand_path(get_setting("options_script", "", view), view.window()) def check_opts(self, view): key = view.file_name() opts = get_setting("options", [], view) cache = self.__options_cache.lock() try: if opts != cache[key][0]: view.settings().clear_on_change("sublimeclang.opts") del cache[key] except KeyError: view.settings().clear_on_change("sublimeclang.opts") finally: self.__options_cache.unlock() def get_opts(self, view): key = view.file_name() cache = self.__options_cache.lock() try: if key in cache: return list(cache[key][1]) finally: self.__options_cache.unlock() opts = get_path_setting("options", [], view) if not get_setting("dont_prepend_clang_includes", False, view): opts.insert(0, "-I%s/clang/include" % scriptpath) if get_setting("add_language_option", True, view): language = get_language(view) if language == "objc": opts.append("-ObjC") elif language == "objc++": opts.append("-ObjC++") else: opts.append("-x") opts.append(language) additional_language_options = get_setting("additional_language_options", {}, view) if additional_language_options.has_key(language): opts.extend(additional_language_options[language] or []) self.debug_options = get_setting("debug_options", False) self.index_parse_options = get_setting("index_parse_options", 13, view) if view.window() != None: # At startup it's possible that the window is None and thus path expansion # might be wrong. cache = self.__options_cache.lock() try: cache[key] = (get_setting("options", [], view), opts) finally: self.__options_cache.unlock() view.settings().add_on_change("sublimeclang.opts", lambda: run_in_main_thread(lambda: self.check_opts(view))) return list(opts) def get_translation_unit(self, filename, opts=[], opts_script=None, unsaved_files=[]): if self.index == None: self.index = cindex.Index.create() tu = None tus = self.translationUnits.lock() if filename not in tus: self.translationUnits.unlock() pre_script_opts = list(opts) opts2 = [] for option in opts: opts2.extend(complete_path(option)) opts = opts2 if opts_script: # shlex.split barfs if fed with an unicode strings args = shlex.split(opts_script.encode()) + [filename] process = subprocess.Popen(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE) output = process.communicate() if process.returncode: print "The options_script failed with code [%s]" % process.returncode print output[1] else: opts += shlex.split(output[0]) if self.debug_options: print "Will compile file %s with the following options:\n%s" % (filename, opts) opts.append(filename) tu = self.index.parse(None, opts, unsaved_files, self.index_parse_options) if tu != None: tu = LockedTranslationUnit(tu, filename) tu.opts = pre_script_opts tu.opts_script = opts_script tus = self.translationUnits.lock() tus[filename] = tu self.translationUnits.unlock() else: print "tu is None..." else: tu = tus[filename] recompile = tu.opts != opts or tu.opts_script != opts_script if recompile: del tus[filename] self.translationUnits.unlock() if recompile: self.set_status("Options change detected. Will recompile %s" % filename) self.add_ex(filename, opts, opts_script, None) return tu def remove(self, filename): self.tasks.put((self.task_remove, filename)) def clear(self): self.tasks.put((self.task_clear, None)) tuCache = TranslationUnitCache()
#!/usr/bin/python # The purpose of this script is only to allow to parse SVN (could be extended to GIT quite easily). # Redmine supports this by default, so in case you wonder why I did this, there are two reasons to use this script: # * Administrative: Your administrators don't have the time/will to set it up # * Security: You do not want to give SCM access to your redmine server import redmine import time import pysvn import os import sys import re import argparse import unittest import json import logging logging.basicConfig(level=logging.DEBUG, format="%(asctime)s | %(levelname)8s | %(filename).4s:%(lineno)4d | %(message)s", datefmt='%m-%d %H:%M:%S', ) # === CRAZY REGEXES STUFF === # These regexes are maybe a little bit long but they are the ones handling all the magic. # If anyone is willing to simplify them (with them still passing the unit tests), I'm very interested. # (?im)((?:(?:re)?solved|fixed)\s*)?(?:issue|bug|feature|improvement|redmine|the):?\s*#?([0-9]+) # (?im)((?:solved|fixed)\s*)?(?:issue|bug|feature|improvement|redmine|the):?\s*#?([0-9]+) # (?i)(solved|solves|solving|fixed|fixes|fixing|closed|closing)?\s*(?:issue(?:s/)?|bug|feature|improvement|redmine|the):?\s*#?([0-9]+)\s*(solved|solving|fixed|fixing|closed|closing) # (?i)(solved|solves|solving|fixed|fixes|fixing|closed|closing)?\s*(?:issue(?:s/)?|bug|feature|improvement|redmine|the):?\s*?([0-9]+)\s*(solved|solving|fixed|fixing|closed|closing)? # (?i)(solved|solves|solving|fixed|fixes|fixing|closed|closing)?\s*(?:issue(?:s/)?|bug|feature|improvement|redmine|the):?\s*?\#?([0-9]+)\s*(solved|solving|fixed|fixing|closed|closing)? # (?i)(solves|solving|solved|fix|fixes|fixing|fixed|closed|closing)?\s*(?:(?:(?:issue(?:s/)?|bug|feature|improvement|redmine|the)\s*)|\#)([0-9]+)\s*(solves|solving|solved|fix|fixes|fixing|fixed|closed|closing)? pattern_text_status = "(open|opening|opened|solves|solving|solved|fix|fixes|fixing|fixed|closing|closed)" pattern_text_bugs = r"(?i)" + pattern_text_status + "?\s*(?:(?:(?:issue(?:s\/)?|bug|feature|improvement|redmine|the)\s*)|\#)(?:\#?([0-9]+))\s*" + pattern_text_status + "?" pattern_bugs = re.compile(pattern_text_bugs) pattern_done = re.compile(r"(?i)(?:(?P<per1>[0-9]+)%\sdone)|(?:(?:done|did)\s(?P<per2>[0-9]+)%)") pattern_hours = re.compile(r"(?i)estimated\s(?:to\s)?([0-9]+)\s?h(?:ours)?") pattern_priority = re.compile(r"(?i)(very low|low|normal|high|urgent|immediate) priority") pattern_include_diff = re.compile(r"(?i)(?:include|with|want) (?:a\s)?diff") pattern_skip = re.compile(r"\*.*") pattern_diff_files = re.compile(r"^\+{3}\s(.*)\t\(revision [0-9]+\)$", re.MULTILINE) # print "Pattern: "+pattern_text_bugs status_attr_to_id = { 'opening': 2, 'opened': 2, 'solves': 2, 'solved': 3, 'solving:': 3, 'fix': 3, 'fixes': 3, 'fixed': 3, 'fixing': 3, 'closed': 3, 'closing': 3, } # def get_priority_to_id(): """Returns a dictionary to convert textual a priority name to priority_id""" return { "very low": 10, "low": 3, "normal": 4, "high": 5, "urgent": 6, "immediate": 7 } # The following implementation unfortunately doesn't work and filename = "priority_to_id.json" if os.path.exists(filename): return json.loads(open(filename).read()) priority_to_id = {} for p in rm.enumerations.filter(resource="time_entry_activities"): priority_to_id[p.name] = p.id open(filename + ".tmp").write(json.dumps(priority_to_id)) os.rename(filename + ".tmp", filename) return priority_to_id def handle_log(message, author=None, rev=None, date=None): # This must be skipped if re.match(pattern_skip, message): return None matches = re.findall(pattern_bugs, message) if matches: issues_attr = {} for m in matches: issue_nb = m[1] if not issue_nb in issues_attr.keys(): if m[0]: issues_attr[issue_nb] = m[0] elif m[2]: issues_attr[issue_nb] = m[2] else: issues_attr[issue_nb] = "" # print "Matches: " + str(len(issues_attr)) + " / " + str(issues_attr) changes_list = {} for issue_id, attr in issues_attr.iteritems(): changes = {} # This is only for testing (very time consuming) # print " - Subject : " + issue.subject # print " - Author : " + str(issue.author) if attr: changes["status_id"] = status_attr_to_id[attr.lower()] if len(issues_attr) == 1: done_match = re.findall(pattern_done, message) # These rules apply if we have only ONE issue if done_match and len(done_match) == 1: v = done_match[0][0] if not v: v = done_match[0][1] changes["done_ratio"] = int(v) hours_match = re.findall(pattern_hours, message) if hours_match and len(hours_match) == 1: changes["estimated_hours"] = int(hours_match[0]) priority_match = re.findall(pattern_priority, message) if priority_match and len(priority_match) == 1: changes["priority_id"] = get_priority_to_id()[priority_match[0]] changes["notes"] = "SVN r{rev}, {date}, {author}: <pre>{message}</pre>" \ .format(rev=rev, date=date, author=author, message=message) if rev: diff = pysvn.Client().diff( '/tmp', svn_url, revision1=pysvn.Revision(pysvn.opt_revision_kind.number, rev-1), url_or_path2=svn_url, revision2=pysvn.Revision(pysvn.opt_revision_kind.number, rev) ) if re.findall(pattern_include_diff, message): # TODO: Move this somewhere else. # This is a bad design, we shouldn't mix the SCM messages parsing code # with the SVN logic around it. changes["notes"] += "diff: <pre><code class=\"diff\">"+diff+"</code></pre>" else: changes["notes"] += "Modified files are:\n" for f in re.findall(pattern_diff_files, diff): changes["notes"] += "* "+f+"\n" changes_list[issue_id] = changes return changes_list def main(): # We get a redmine connection rm = redmine.Redmine(redmine_url, key=redmine_key) if os.path.exists(".rev_prev"): rev_prev = int(open(".rev_prev", 'r').read()) else: logging.critical("Not having the .rev_prev file is very BAD !!!") sys.exit(1) # We list all SVN logs since last time logs = pysvn.Client().log( svn_url, revision_start=pysvn.Revision(pysvn.opt_revision_kind.number, rev_prev), revision_end=pysvn.Revision(pysvn.opt_revision_kind.head), limit=rev_limit ) last_rev = None for log in logs: author = log["author"] revision = log.revision.number date = time.ctime(log.date) message = log.message logging.info("* {revision} - {date} - {author} : {message}".format(revision=revision, date=date, author=author, message=message.replace("\n", ".").replace("\r", "."))) if not last_rev or revision > last_rev: last_rev = revision changes_by_issue = handle_log(message, author, revision, date) if changes_by_issue: logging.debug("Changes: %s", json.dumps(changes_by_issue)) for issue_id, changes in changes_by_issue.iteritems(): logging.info("Considering update of issue %s ...", issue_id) try: issue = rm.issue.get(issue_id) if not issue: logging.warning("Issue %s doesn't exist !!!", issue_id) break else: msg = "SVN r{rev},".format(rev=revision) for jl in issue.journals: logging.debug(" Note: "+jl.notes.replace("\n", ".").replace("\r", ".")) if msg in jl.notes: logging.warning("There's already a reference to our notes !") changes = None break if not test_only and changes: logging.info("Updating issue %s ...", issue_id) try: rm.issue.update(issue_id, **changes) except redmine.ValidationError: if "status_id" in changes.keys(): logging.info("Removing status change for issue %s", issue_id) del changes["status_id"] rm.issue.update(issue_id, **changes) else: logging.exception("Our issue wasn't validated %s", issue_id) except Exception: logging.exception("Problem handling issue %s", issue_id) if last_rev: open(".rev_prev.tmp", 'w').write(str(last_rev)) os.rename(".rev_prev.tmp", ".rev_prev") class TestCommitMessages(unittest.TestCase): def test_issue_matching_1(self): changes = handle_log("issue 123") self.assertTrue(changes.has_key("123")) def test_issue_matching_2(self): changes = handle_log("bug #123") self.assertTrue(changes.has_key("123")) def test_issue_matching_3(self): changes = handle_log("the #12") self.assertTrue(changes.has_key("12")) def test_issue_matching_4(self): changes = handle_log("about #123") self.assertTrue(changes.has_key("123")) def test_issue_matching_5(self): changes = handle_log("number 20 and issue 13 and #30") self.assertEqual(len(changes), 2) self.assertTrue(changes.has_key("13")) self.assertTrue(changes.has_key("30")) def test_issue_fixed_1(self): changes = handle_log("fixing issue 123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["status_id"], 3) def test_issue_fixed_2(self): changes = handle_log("issue 123 fixed") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["status_id"], 3) def test_issue_fixed_3(self): changes = handle_log("This commit fixes issue #123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["status_id"], 3) def test_issue_fixed_multi(self): changes = handle_log("issue #123 and #256 need to be fixed soon") self.assertEquals(len(changes.keys()), 2) self.assertFalse(changes["123"].has_key("status_id")) self.assertFalse(changes["256"].has_key("status_id")) def test_issue_opening_1(self): changes = handle_log("opening #123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["status_id"], 2) def test_issue_done_1(self): changes = handle_log("I've done 30% of issue #123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["done_ratio"], 30) self.assertEquals(len(changes.keys()), 1) def test_issue_done_2(self): changes = handle_log("30% done on issue 123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["done_ratio"], 30) self.assertEquals(len(changes.keys()), 1) def test_issue_done_3(self): changes = handle_log("did 30% of issue 123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["done_ratio"], 30) self.assertEquals(len(changes.keys()), 1) def test_issue_estimated_1(self): changes = handle_log("issue 123 was estimated to 20h of work") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["estimated_hours"], 20) self.assertEquals(len(changes.keys()), 1) def test_issue_estimated_2(self): changes = handle_log("issue 123 requires an estimated 20 hours of work") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["estimated_hours"], 20) self.assertEquals(len(changes.keys()), 1) def test_issue_priority_1(self): changes = handle_log("switching #123 to high priority") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["priority_id"], 5) def test_issue_priority_2(self): changes = handle_log("setting immediate priority on bug 123") self.assertTrue(changes.has_key("123")) self.assertEquals(changes["123"]["priority_id"], 7) def test_issue_dont_consider_me(self): changes = handle_log("*Skip anything that follows") self.assertIsNone(changes) def test_diff_files_search(self): diff = """ Index: dir1/dir2/file1.ext =================================================================== --- dir1/dir2/file1.ext (revision 4409) +++ dir1/dir2/file1.ext (revision 4410) @@ -54,13 +54,19 @@ - Removed + Added Index: dir1/dir2/file2.ext =================================================================== --- dir1/dir2/file2.ext (revision 4409) +++ dir1/dir2/file2.ext (revision 4410) @@ -54,13 +54,19 @@ - Removed + Added """ matches = re.findall(pattern_diff_files, diff) self.assertSequenceEqual(["dir1/dir2/file1.ext", "dir1/dir2/file2.ext"], matches) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Parse SVN messages to perform some redmine actions') parser.add_argument('--unit-tests', action='store_true', help='Activate unit tests') parser.add_argument('--redmine-url', help='Redmine server URL', default=os.environ.get("REDMINE_URL")) parser.add_argument('--redmine-key', help='Redmine server access key', default=os.environ.get("REDMINE_KEY")) parser.add_argument('--svn-url', help='SVN URL', default=os.environ.get("SVN_URL")) parser.add_argument('--test-only', action='store_true', help='Do not update redmine') parser.add_argument('--rev-limit', '-l', default=10, help='Number of revision to fetch') args = parser.parse_args() redmine_url = args.redmine_url redmine_key = args.redmine_key svn_url = args.svn_url test_only = args.test_only rev_limit = args.rev_limit if args.unit_tests: suite = unittest.TestLoader().loadTestsFromTestCase(TestCommitMessages) unittest.TextTestRunner(verbosity=2).run(suite) else: if not redmine_url: logging.critical("Missing the redmine URL") if not redmine_key: logging.critical("Missing the redmine key") if not svn_url: logging.critical("Missing the SVN URL") main()
"""This section introduces the Formula class.""" from variable_assignment import VariableAssignment class Formula(object): """ Formula class. Formula objects are defined over some Vocabulary :math:`\Sigma`. Formula objects are immutable. :ivar vocabulary: The underlying Vocabulary object :math:`\Sigma` the \ Formula is defined over. :ivar name: The name of the Formula object. :ivar terms: The terms of the Formula object. :ivar is_Formula: An identifier to use in place of ``type`` or \ ``isinstance``. """ def __init__(self, vocabulary, name, *terms): """ Construct a Formula object. :param vocabulary: The underlying Vocabulary object :math:`\Sigma` \ the Formula is defined over. :type vocabulary: Vocabulary :param name: The name (identifier) of the formula. :type name: ``str`` :param terms: Any amount of ``str`` constants and variables \ representing the terms of the formula. :type terms: ``str`` :raises TypeError: ``vocabulary`` parameter must be a Vocabulary \ object. :raises ValueError: ``name`` parameter must match some RelationSymbol \ object in the ``vocabulary`` parameter, at least one term must be \ provided and all terms provided must be in either the constants or \ variables of the ``vocabulary`` parameter :math:`\Sigma`. """ if not hasattr(vocabulary, "_is_Vocabulary"): raise TypeError("vocabulary parameter must be a Vocabulary object") if type(name) != str: raise TypeError(name + " must be a string") relation_symbol_names = [rs._name for rs in vocabulary._R] if name not in relation_symbol_names: raise ValueError( "Name must match some RelationSymbol in Vocabulary") if not terms: raise ValueError("at least 1 term must be provided") C, V = vocabulary._C, vocabulary._V for t in terms: if t in C: in_C = True else: in_C = False if t in V: in_V = True else: in_V = False # Vocabulary takes care of ensuring no overlap between C and V if not in_C and not in_V: raise ValueError( "all terms must be contained in vocabulary's C or V") from copy import deepcopy self._vocabulary = vocabulary self._name = deepcopy(name) self._terms = list(terms) self._is_Formula = True def __eq__(self, other): """ Determine if two Formula objects are equal via the ``==`` operator. """ if not hasattr(other, "_is_Formula"): raise TypeError( "can only compare Formula object with another Formula object") vocab_cond = self._vocabulary is other._vocabulary name_cond = self._name == other._name terms_cond = set(self._terms) == set(other._terms) if vocab_cond and name_cond and terms_cond: return True else: return False def __ne__(self, other): """ Determine if two Formula objects are not equal via the ``!=`` operator. """ return not self.__eq__(other) def __add__(self, other): """ Combine a Formula object and another Formula object or an AssumptionBase object into an AssumptionBase object via the ``+`` operator. :raises TypeError: Only a Formula object or AssumptionBase object can \ be combined with a Formula object. :raises ValueError: This Formula and ``other`` parameter must share \ the same underlying Vocabulary object :math:`\Sigma` and duplicate \ Formula objects are not permitted (determined by name of the Formula \ object only). """ from copy import deepcopy from assumption_base import AssumptionBase # Handle adding an AssumptionBase if hasattr(other, "_is_AssumptionBase"): # Edge cases if len(other) == 0: if other._vocabulary is not self._vocabulary: raise ValueError( "Cannot add an AssumptionBase with different " "Vocabulary than this Formula object") return AssumptionBase(self) for other_formula in other: if other_formula._vocabulary is not self._vocabulary: raise ValueError( "Cannot add an AssumptionBase with different " "Vocabulary than this Formula object") if other_formula._name == self._name: raise ValueError("Duplicate Formula objects not permitted") return AssumptionBase(*deepcopy(other._formulae + [self])) # Handle adding a Formula if hasattr(other, "_is_Formula"): if other._vocabulary is not self._vocabulary: raise ValueError( "Cannot add Formula's with different Vocabulary's") if other._name == self._name: raise ValueError("Duplicate Formula objects not permitted") return AssumptionBase(*deepcopy([self, other])) raise TypeError( "Only Formula and AssumptionBase objects can be added to an " "AssumptionBase") def __str__(self): """ Return a readable string representation of the NamedState object. """ return self._name + '(' + ', '.join( [str(t) for t in self._terms]) + ')' def __repr__(self): """Return a string representation of the NamedState object.""" return self.__str__() def _key(self): """ Private key function for hashing. :return: tuple consisting of (name, :math:`t_{1}, \ldots, t_{n}`) :rtype: ``tuple`` """ return (hash(self._vocabulary), self._name, tuple(sorted(self._terms))) def __hash__(self): """Hash implementation for set functionality of Formula objects.""" return hash(self._key()) def __deepcopy__(self, memo): """ Deepcopy a Formula object via the ``copy.deepcopy`` method. This does not break the reference to the underlying Vocabulary object :math:`\Sigma`. """ from copy import deepcopy return Formula(self._vocabulary, deepcopy(self._name), *deepcopy(self._terms)) def assign_truth_value(self, attribute_interpretation, named_state, X): """ Assign a truth value in :math:`\{\\textbf{true}, \\textbf{false}, \\textbf{unknown}\}` to the calling Formula object :math:`F` given an arbitrary NamedState object :math:`(\sigma;\\rho)` in the ``named_state`` parameter and VariableAssignment object :math:`\chi` in the ``X`` parameter w.r.t. an AttributeInterpretation object :math:`I`. This function makes use of the ParserSet object; the ParserSet object is a key part in the vivid object extension protocol. The assign_truth_value function works as follows: 1. Find the entry in the interpretation table of the AttributeInterpretation object :math:`I` in the ``attribute_interpretation`` parameter and extract the corresponding profile and Relation object (the 3rd element of the corresponding row of the table is the identifier for the Relation object; e.g. :math:`R_{subscript}`). 2. Substitute the terms of the Formula object :math:`F` into the profile (the 2nd element of each pair in the profile corresponds to the index of the term in the :math:`F` to use, shifted down by 1). 3. Using the ConstantAssignment object of the ``named_state`` parameter :math:`\\rho` and the VariableAssignment in the ``X`` parameter :math:`\chi`, substitute for each term now in the profile, the object corresponding to that term given by the mapping in :math:`\\rho` or :math:`\chi` (if the term is in neither :math:`\\rho` nor :math:`\chi`, "unknown" is returned as the truth value). 4. The profile now consists of the attribute-object pairs (:math:`\delta_{i}(s_{j})` for some set of the possible values of :math:`i` and :math:`j`) to use in the Relation object's definition when creating the evaluatable expression. Now, all worlds :math:`(w;\widehat{\\rho})` derivable from the NamedState are generated and the ValueSets of the attribute-object pairs in the profile (consisting of single elements) are extracted from the ascriptions of these worlds. 5. The single element ValueSets are zipped together with the arguments in the Relation object definition (the :math:`i`\ th attribute-object pair of the profile is zipped with the :math:`i`\ th argument of the definition) and these new argument-ValueSet pairs are used to substitute every occurance of each argument in the definition with the corresponding single element ValueSet creating a (hopefully) evaluatable expression (the RHS of the substituted definition) for each world :math:`(w;\widehat{\\rho})`. 6. Each parser in the ParserSet object will then try to evaluate the expression and save the truth value for each :math:`(w;\widehat{\\rho})`. If some expression is unevaluatable for all parsers in the ParserSet a ValueError is raised. 7. If the expression of every world :math:`(w;\widehat{\\rho})` evaluates to True, the truth value returned is **true**, if the expression of every world evaluates to False, the truth value returned is **false** and if the expressions of any two worlds evaluate to different values, the truth value returned is **unknown**. :return: A truth value in the set \ :math:`\{\\textbf{true}, \\textbf{false}, \\textbf{unknown}\}` :rtype: ``bool`` | ``str`` :raises TypeError: ``attribute_interpretation`` parameter must be an \ AttributeInterpretation object, ``named_state`` parameter must be a \ NamedState object and ``X`` parameter must be a VariableAssignment \ object. :raises ValueError: This Formula object, the AttributeInterpretation \ object in the ``attribute_interpretation`` parameter, the NamedState \ object in the ``named_state`` parameter and the VariableAssignment \ object in the ``X`` parameter must all share the same underlying \ Vocabulary object (that is :math:`F`, :math:`I`, \ :math:`(\sigma;\\rho)` and :math:`\chi` must all share the same \ :math:`\Sigma`), the Formula object must match an entry in the \ interpretation table of the AttributeInterpretation :math:`I` in the ``attribute_interpretation`` parameter, the number of \ attribute-object pairs in the profile corresponding to the Formula \ must match the arity of the corresponding Relation object found in \ the table (where the Relation object is found in the \ AttributeStructure object in the AttributeSystem member of the \ ``named_state`` parameter), :math:`1 \le j_{x} \le n` for each \ :math:`j_{x}` in the profile (where *n* is the arity of the \ RelationSymbol corresponding to the RelationSymbol object matching \ the Formula in the interpretation table, or equivalently, the number \ of terms in the Formula object) and a parser in the ParserSet object \ must be able to evaluate the expression obtained after substituting \ the objects of the AttributeSystem in the ``named_state`` parameter, \ corresponding to the terms of the Formula, into the Relation object's \ definition. """ def get_relation_arguments(definition): """Return the arguments provided in Relation definition.""" start_paren = definition.find('(') end_paren = definition.find(')') arg_string = definition[start_paren + 1:end_paren] return arg_string.split(',') if not hasattr(attribute_interpretation, "_is_AttributeInterpretation"): raise TypeError( "attribute_interpretation parameter must an " "AttributeInterpretation object") if not hasattr(named_state, "_is_NamedState"): raise TypeError( "named_state parameter must be a NamedState object") if not hasattr(X, "_is_VariableAssignment"): raise TypeError( "X parameter must be a VariableAssignment object") if self._vocabulary == attribute_interpretation._vocabulary == \ named_state._p._vocabulary == X._vocabulary: pass else: raise ValueError( "Vocabulry's of Formula, AttributeInterpretation, NamedState, " "and VariableAssignment must match") # name should always be in interpretation table for entry in attribute_interpretation: if entry[0]._name == self._name: R_I = entry break else: raise ValueError(self._name + " must be in intepretation table") p = named_state._p profile = list(R_I[3]) terms = self._terms relation = named_state._attribute_system._attribute_structure[ int(R_I[2][1:])] if len(profile) != len(relation._DR): raise ValueError( "number of profile pairs must be equal to the number " "of arguments the relation takes") # compiling profile into attribute object pairs that will be # substituted into the expression # check if each index is valid in respect to list of terms # i.e., j_x has corresponding (t^{p,X})_{j_x} for index in [pair[1] for pair in profile]: if index > len(terms): raise ValueError( "each index corresponds to an index in formula's terms " "list; indicies may not exceed the amount of terms") profile_copy = [pr for pr in profile] # for each pair in profile grab formula term corresponding to the # pair's index; shifted down 1 as indexing starts at 0 and not 1 then # rewrite that pair with the corresponding term instead of index profile = map(lambda pair: (pair[0], terms[pair[1] - 1]), profile) # Replace Vocabulary C and V's with their respective objects # according to p and X for i, pair in enumerate(profile): try: obj = p._mapping[pair[1]] except KeyError: try: obj = X._mapping[pair[1]] except KeyError: return "unknown" profile[i] = (pair[0], obj) relation_args = get_relation_arguments(relation._definition) worlds = named_state.get_worlds() # sort by longest arguments firsts so we can ensure unambiguous # replacement when swapping in the valuations associated with the # ao_pairs from each world into the relation definition relation_args, profile = ( list(t) for t in zip(*sorted(zip(relation_args, profile), key=lambda x: len(x[0]), reverse=True))) # we now check the formula against each possible world within the state # First, create a ParserSet object so we can attempt parsing of formula from parsers.parser_set import ParserSet parser_set = ParserSet() truth_values = [] for world in worlds: # break reference from Relation definition = str(relation._definition) # zip arguments in Relation and valuations together valuations = [ world._ascriptions[ao_pair] for ao_pair in profile] substitutions = zip(relation_args, valuations) for substitution in substitutions: pattern, valueset = substitution # we're swapping in a valuation valueset so just shed # the prefix 'V(' and suffix ')' value = str(valueset)[2:-1] definition = definition.replace(pattern, value) # trim the LHS of the definition to make evaluatable expression expression = definition[definition.find(" <=> ") + 5:] # Try each parser in ParserSet; raise ValueError if no parser # can successfully parse formula for parser in parser_set: try: result = parser(expression) truth_values.append(result) break except: pass else: raise ValueError("Unable to parse formula") if all(truth_values): return True elif not any(truth_values): return False else: return "unknown" @staticmethod def get_basis(constant_assignment, variable_assignment, attribute_interpretation, *formulae): """ Get the basis of the Formula objects :math:`F_{1}, \ldots, F_{k}` provided as optional positional arguments in the ``formulae`` parameter w.r.t. the ConstantAssignment object :math:`\\rho` provided in the ``constant_assignment`` parameter, VariableAssignment object :math:`\chi` provided in the ``variable_assignment`` parameter, and the AttributeInterpretation object :math:`I` provided in the ``attribute_interpretation`` parameter, i.e., compute :math:`\mathcal{B}(F_{1}, \\rho, \chi) \cup \cdots \cup \mathcal{B}(F_{k}, \\rho, \chi)`. :param constant_assignment: The ConstantAssignment object \ :math:`\\rho` to use to compile the profile corresponding to each \ Formula object :math:`{F_{i}, i = 1, \ldots, k}` into \ attribute-object pairs to consider for the basis. :type constant_assignment: ConstantAssignment :param variable_assignment: The VariableAssignment object \ :math:`\chi` to use to compile the profile corresponding to each \ Formula object :math:`{F_{i}, i = 1, \ldots, k}` into \ attribute-object pairs to consider for the basis or ``None``. :type variable_assignment: VariableAssignment | ``None`` :param attribute_interpretation: The AttributeInterpretation object \ :math:`I` to use to determine the profiles corresponding to the \ Formula objects :math:`F_{1}, \ldots, F_{k}` provided (the profile is \ extracted from the interpretation table when the RelationSymbol \ matching the Formula object's name is found). :type attribute_interpretation: AttributeInterpretation :param formulae: Any positive amount of Formula objects \ :math:`F_{1}, \ldots, F_{k}` to consider in the basis. :type formulae: Formula :return: A list of attribute-object pairs comprising the basis of the \ Formula objects :math:`F_{1}, \ldots, F_{k}` provided w.r.t. \ :math:`\\rho` and :math:`\chi`. :rtype: ``list`` :raises TypeError: ``constant_assignment`` parameter must be a \ ConstantAssignment object, and all optional positional arguments \ provided in the ``formulae`` parameter must be Formula objects. :raises ValueError: At least one Formula object must be provided and \ all Formula objects provided must match some entry in the \ interpretation table of the AttributeInterpretation object :math:`I`. """ if not formulae: raise ValueError("At least one Formula must be provided") if not hasattr(constant_assignment, "_is_ConstantAssignment"): raise TypeError( "constant_assignment parameter must be a ConstantAssignment " "object") basis = set([]) # Load a dummy VariableAssignment object if one is not provided if not variable_assignment: variable_assignment = VariableAssignment( constant_assignment._vocabulary, constant_assignment._attribute_system, {}, dummy=True) for formula in formulae: if not hasattr(formula, "_is_Formula"): raise TypeError( "All positional arguments provided in formulae must be " "Formula objects.") # name should always be in interpretation table for entry in attribute_interpretation: if entry[0]._name == formula._name: R_I = entry break else: raise ValueError( formula._name + " must be in intepretation table") profile = list(R_I[3]) terms = formula._terms profile = map(lambda pair: (pair[0], terms[pair[1] - 1]), profile) # Replace Vocabulary C and V's with their respective objects # according to p and X for i, pair in enumerate(profile): try: obj = constant_assignment._mapping[pair[1]] except KeyError: try: obj = variable_assignment._mapping[pair[1]] except KeyError: raise ValueError("term: " + pair[1] + " undefined") profile[i] = (pair[0], obj) # Add all ao-pairs in profile to basis if they're not in it already basis.update(profile) return list(basis) def main(): """Quick tests.""" from point import Point from relation import Relation from attribute import Attribute from attribute_structure import AttributeStructure from relation_symbol import RelationSymbol from vocabulary import Vocabulary from attribute_system import AttributeSystem from constant_assignment import ConstantAssignment from named_state import NamedState from variable_assignment import VariableAssignment from attribute_interpretation import AttributeInterpretation from assumption_base import AssumptionBase point = Attribute('point', [Point('x', 'x', 'x', 'x')]) r_is_on = Relation('R1(h1, h2, h3) <=> is_on(h1, h2, h3)', ['point', 'point', 'point'], 1) r_not_same_point = Relation('R2(h1, h2) <=> not_same_point(h1, h2)', ['point', 'point'], 2) r_clocks_unequal = Relation('R3(h1, h2) <=> clocks_unequal(h1, h2)', ['point', 'point'], 3) r_can_observe = Relation( 'R4(p, sp_loc, wls, wle) <=> can_observe(p, sp_loc, wls, wle)', ['point', 'point', 'point', 'point'], 4) r_meets = Relation( 'R5(p, wl1s, wl1e, wl2s, wl2e) <=> meets(p, wl1s, wl1e, wl2s, wl2e)', ['point', 'point', 'point', 'point', 'point'], 5) attribute_structure = AttributeStructure( point, r_is_on, r_not_same_point, r_clocks_unequal, r_can_observe, r_meets) rs_is_on = RelationSymbol('IS_ON', 3) rs_not_same_point = RelationSymbol('NOT_SAME_POINT', 2) rs_clocks_unequal = RelationSymbol('CLOCKS_UNEQUAL', 2) rs_can_observe = RelationSymbol('CAN_OBSERVE', 4) rs_meets = RelationSymbol('MEETS', 5) vocabulary = Vocabulary(['P1', 'P2', 'P3', 'P4', 'P5'], [rs_is_on, rs_not_same_point, rs_clocks_unequal, rs_can_observe, rs_meets], []) profiles = [ [rs_is_on, ('point', 1), ('point', 2), ('point', 3)], [rs_not_same_point, ('point', 1), ('point', 2)], [rs_clocks_unequal, ('point', 1), ('point', 2)], [rs_can_observe, ('point', 1), ('point', 2), ('point', 3), ('point', 4)], [rs_meets, ('point', 1), ('point', 2), ('point', 3), ('point', 4), ('point', 5)]] mapping = {rs_is_on: 1, rs_not_same_point: 2, rs_clocks_unequal: 3, rs_can_observe: 4, rs_meets: 5} attribute_interpretation = AttributeInterpretation(vocabulary, attribute_structure, mapping, profiles) objects = ['p1', 'p2', 'p3', 'p4', 'p5'] attribute_system = AttributeSystem(attribute_structure, objects) p = ConstantAssignment(vocabulary, attribute_system, {'P1': 'p1', 'P2': 'p2', 'P3': 'p3', 'P4': 'p4', 'P5': 'p5'}) named_state = NamedState(attribute_system, p, { ('point', 'p1'): [Point(1.5, 1.5, 1.5, 1.5)], ('point', 'p2'): [Point(2.0, 2.0, 2.0, 2.0)], ('point', 'p3'): [Point(1.0, 1.0, 1.0, 1.0)], ('point', 'p4'): [Point(3.0, 3.0, 3.0, 3.0)], ('point', 'p5'): [Point(2.0, 2.0, 2.0, 2.0)]}) f1 = Formula(vocabulary, 'IS_ON', 'P1', 'P3', 'P4') f2 = Formula(vocabulary, 'NOT_SAME_POINT', 'P1', 'P2') f3 = Formula(vocabulary, 'CLOCKS_UNEQUAL', 'P1', 'P2') f4 = Formula(vocabulary, 'CAN_OBSERVE', 'P1', 'P2', 'P3', 'P4') f5 = Formula(vocabulary, 'MEETS', 'P1', 'P2', 'P3', 'P4', 'P5') VA = VariableAssignment(vocabulary, attribute_system, {}, dummy=True) assumption_base = AssumptionBase(f1, f2, f3, f4) print Formula.get_basis( named_state._p, VA, attribute_interpretation, f1, f2, f3, f4) # for f in assumption_base: # print f.assign_truth_value(attribute_interpretation, named_state, VA) named_state.set_ascription(('point', 'p4'), [Point(1.0, 1.0, 1.0, 1.0)]) # print f5.assign_truth_value(attribute_interpretation, named_state, VA) if __name__ == "__main__": main()
import re from random import randint from django.db import connections class Handler: def __init__(self, queryset): self.open = False self.db = queryset.db self._model = queryset.model self._table_name = self._model._meta.db_table self._handler_name = self._construct_name(self._table_name) self._where, self._params = self._extract_where(queryset) def _construct_name(self, table_name): # Undocumented max of 64 chars (get error on HANDLER CLOSE only!) return "{}_{}".format(table_name[-31:], randint(1, 2e10)) # Context manager def __enter__(self): if self.open: raise ValueError("You cannot open the same handler twice!") self.cursor = connections[self.db].cursor() self.cursor.__enter__() self.cursor.execute( "HANDLER `{}` OPEN AS {}".format(self._table_name, self._handler_name) ) self.open = True return self def __exit__(self, exc_type, exc_value, traceback): if not self.open: raise ValueError("You cannot close an unopened handler!") self.cursor.execute("HANDLER `{}` CLOSE".format(self._handler_name)) self.cursor.__exit__(exc_type, exc_value, traceback) self.open = False # Public methods def read(self, index="PRIMARY", mode=None, where=None, limit=None, **kwargs): if not self.open: raise RuntimeError("This handler isn't open yet") index_op, index_value = self._parse_index_value(kwargs) if index_op is not None and mode is not None: raise ValueError( "You cannot use an index operator and mode " "together in a handler read" ) elif index_op is None and mode is None: # Default mode = "first" sql = ["HANDLER {} READ".format(self._handler_name)] params = () # Caller's responsibility to ensure the index name is correct sql.append("`{}`".format(index)) if index_op is not None: sql.append(index_op) if isinstance(index_value, tuple): sql.append("(") sql.append(",".join("%s" for x in index_value)) sql.append(")") params += index_value else: sql.append("(%s)") params += (index_value,) if index_op is None: try: sql.append(self._read_modes[mode]) except KeyError: raise ValueError( "'mode' must be one of: {}".format( ",".join(self._read_modes.keys()) ) ) if where is None: # Use default if self._where: sql.append(self._where) params += self._params else: # 'where' is another queryset to use the clause from if isinstance(where, tuple): # Allow parsing in a pre-extracted where clause + params - # as iter() does where, where_params = where else: where, where_params = self._extract_where(where) sql.append(where) params += where_params if limit is not None: sql.append("LIMIT %s") params += (limit,) return self._model.objects.using(self.db).raw(" ".join(sql), params) _read_modes = {"first": "FIRST", "last": "LAST", "next": "NEXT", "prev": "PREV"} def _parse_index_value(self, kwargs): """ Parse the HANDLER-supported subset of django's __ expression syntax """ if len(kwargs) == 0: return None, None elif len(kwargs) > 1: raise ValueError( "You can't pass more than one value expression, " "you passed {}".format(",".join(kwargs.keys())) ) name, value = list(kwargs.items())[0] if not name.startswith("value"): raise ValueError( "The keyword arg {} is not valid for this " "function".format(name) ) if name == "value": return ("=", value) if not name.startswith("value__"): raise ValueError( "The keyword arg {} is not valid for this " "function".format(name) ) operator = name[name.find("__") + 2 :] try: return (self._operator_values[operator], value) except KeyError: raise ValueError( "The operator {op} is not valid for index value matching. " "Valid operators are {valid}".format( op=operator, valid=",".join(self._operator_values.keys()) ) ) _operator_values = {"lt": "<", "lte": "<=", "exact": "=", "gte": ">=", "gt": ">"} def iter(self, index="PRIMARY", where=None, chunk_size=100, reverse=False): if reverse: mode = "last" else: mode = "first" if where is not None: # Pre-convert so each iteration doesn't have to repeatedly parse # the SQL where = self._extract_where(where) while True: count = 0 for obj in self.read(index=index, where=where, mode=mode, limit=chunk_size): count += 1 yield obj if count < chunk_size: return if reverse: mode = "prev" else: mode = "next" # Internal methods @classmethod def _extract_where(cls, queryset): """ Was this a queryset with filters/excludes/expressions set? If so, extract the WHERE clause from the ORM output so we can use it in the handler queries. """ if not cls._is_simple_query(queryset.query): raise ValueError( "This QuerySet's WHERE clause is too complex to " "be used in a HANDLER" ) sql, params = queryset.query.sql_with_params() where_pos = sql.find("WHERE ") if where_pos != -1: # Cut the query to extract just its WHERE clause where_clause = sql[where_pos:] # Replace absolute table.column references with relative ones # since that is all HANDLER can work with # This is a bit flakey - if you inserted extra SQL with extra() or # an expression or something it might break. where_clause, _ = cls.absolute_col_re.subn(r"\1", where_clause) return (where_clause, params) else: return ("", ()) # For modifying the queryset SQL. Attempts to match the TABLE.COLUMN # pattern that Django compiles. Clearly not perfect. absolute_col_re = re.compile("`[^`]+`.(`[^`]+`)") @classmethod def _is_simple_query(cls, query): """ Inspect the internals of the Query and say if we think its WHERE clause can be used in a HANDLER statement """ return ( not query.low_mark and not query.high_mark and not query.select and not query.group_by and not query.distinct and not query.order_by and len(query.alias_map) <= 1 )
import logging import re from postprocessing.chebi_resolution import find_chebi_term3 from text.entity import Entity from config import config element_base = { # number: name symbol ions "H": ["Hydrogen", 1], "He": ["Helium", 2], "Li": ["Lithium", 3], "Be": ["Beryllium", 4], "B": ["Boron", 5], "C": ["Carbon", 6], "N": ["Nitrogen", 7], "O": ["Oxygen", 8], "F": ["Fluorine", 9], "Ne": ["Neon", 10], "Na": ["Sodium", 11], "Mg": ["Magnesium", 12], "Al": ["Aluminum", 13], "Si": ["Silicon", 14], "P": ["Phosphorus", 15], "S": ["Sulfur", 16], "Cl": ["Chlorine", 17], "Ar": ["Argon", 18], "K": ["Potassium", 19], "Ca": ["Calcium", 20], "Sc": ["Scandium", 21], "Ti": ["Titanium", 22], "V": ["Vanadium", 23], "Cr": ["Chromium", 24], "Mn": ["Manganese", 25], "Fe": ["Iron", 26], "Co": ["Cobalt", 27], "Ni": ["Nickel", 28], "Cu": ["Copper", 29], "Zn": ["Zinc", 30], "Ga": ["Gallium", 31], "Ge": ["Germanium", 32], "As": ["Arsenic", 33], "Se": ["Selenium", 34], "Br": ["Bromine", 35], "Kr": ["Krypton", 36], "Rb": ["Rubidium", 37], "Sr": ["Strontium", 38], "Y": ["Yttrium", 39], "Zr": ["Zirconium", 40], "Nb": ["Niobium", 41], "Mo": ["Molybdenum", 42], "Tc": ["Technetium", 43], "Ru": ["Ruthenium", 44], "Rh": ["Rhodium", 45], "Pd": ["Palladium", 46], "Ag": ["Silver", 47], "Cd": ["Cadmium", 48], "In": ["Indium", 49], "Sn": ["Tin", 50], "Sb": ["Antimony", 51], "Te": ["Tellurium", 52], "I": ["Iodine", 53], "Xe": ["Xenon", 54], "Cs": ["Cesium", 55], "Ba": ["Barium", 56], "La": ["Lanthanum", 57], "Ce": ["Cerium", 58], "Pr": ["Praseodymium", 59], "Nd": ["Neodymium", 60], "Pm": ["Promethium", 61], "Sm": ["Samarium", 62], "Eu": ["Europium", 63], "Gd": ["Gadolinium", 64], "Tb": ["Terbium", 65], "Dy": ["Dysprosium", 66], "Ho": ["Holmium", 67], "Er": ["Erbium", 68], "Tm": ["Thulium", 69], "Yb": ["Ytterbium", 70], "Lu": ["Lutetium", 71], "Hf": ["Hafnium", 72], "Ta": ["Tantalum", 73], "W": ["Tungsten", 74], "Re": ["Rhenium", 75], "Os": ["Osmium", 76], "Ir": ["Iridium", 77], "Pt": ["Platinum", 78], "Au": ["Gold", 79], "Hg": ["Mercury", 80], "Tl": ["Thallium", 81], "Pb": ["Lead", 82], "Bi": ["Bismuth", 83], "Po": ["Polonium", 84], "At": ["Astatine", 85], "Rn": ["Radon", 86], "Fr": ["Francium", 87], "Ra": ["Radium", 88], "Ac": ["Actinium", 89], "Th": ["Thorium", 90], "Pa": ["Protactinium", 91], "U": ["Uranium", 92], "Np": ["Neptunium", 93], "Pu": ["Plutonium", 94], "Am": ["Americium", 95], "Cm": ["Curium", 96], "Bk": ["Berkelium", 97], "Cf": ["Californium", 98], "Es": ["Einsteinium", 99], "Fm": ["Fermium", 100], "Md": ["Mendelevium", 101], "No": ["Nobelium", 102], "Lr": ["Lawrencium", 103], "Rf": ["Rutherfordium", 104], "Db": ["Dubnium", 105], "Sg": ["Seaborgium", 106], "Bh": ["Bohrium", 107], "Hs": ["Hassium", 108], "Mt": ["Meitnerium", 109], "Ds": ["Darmstadtium", 110], "Rg": ["Roentgenium", 111], "Cn": ["Copernicium", 112], "Uuq": ["Ununquadium", 114], "Uuh": ["Ununhexium", 116], } amino_acids = { 'Ala': '', 'Arg': '', 'Ans': '', 'Asp': '', 'Cys': '', 'Glu': '', 'Gln': '', 'Gly': '', 'His': '', 'Ile': '', 'Leu': '', 'Lys': '', 'Met': '', 'Phe': '', 'Pro': '', 'Ser': '', 'Thr': '', 'Trp': '', 'Tyr': '', 'Val': '', 'Sec': '', 'Pyl': '', } chem_words = set() chem_stopwords = set() # words that may seem like they are not part of named chemical entities but they are for e in element_base: chem_words.add(e.lower()) chem_words.add(element_base[e][0].lower()) #with open("TermList.txt") as termlist: # for l in termlist: # chem_words.add(l.strip().lower()) # words that are never part of chemical entities with open(config.stoplist, 'r') as stopfile: for l in stopfile: w = l.strip().lower() if w not in chem_words and len(w) > 1: chem_stopwords.add(w) class ChemicalEntity(Entity): """Chemical entities""" def __init__(self, tokens, sid, *args, **kwargs): # Entity.__init__(self, kwargs) super(ChemicalEntity, self).__init__(tokens, *args, **kwargs) self.type = "chemical" self.subtype = kwargs.get("subtype") self.chebi_id = None self.chebi_score = 0 self.chebi_name = None self.sid = sid self.ssm_score = 0 self.ssm_best_ID = None def normalize(self): chebi_info = find_chebi_term3(self.text.encode("utf-8")) self.chebi_id = chebi_info[0] self.chebi_name = chebi_info[1] self.chebi_score = chebi_info[2] def get_dic(self): dic = super(ChemicalEntity, self).get_dic() dic["subtype"] = self.subtype dic["chebi_id"] = self.chebi_id dic["chebi_name"] = self.chebi_name dic["ssm_score"] = self.ssm_score dic["ssm_entity"] = self.ssm_best_ID return dic def validate(self, ths, rules, *args, **kwargs): """ Use rules to validate if the entity was correctly identified :param rules: :return: True if entity does not fall into any of the rules, False if it does """ if "stopwords" in rules: # todo: use regex words = self.text.split(" ") stop = False for s in chem_stopwords: if any([s == w.lower() for w in words]): logging.debug("ignored stopword %s" % self.text) stop = True if stop: return False if "paren" in rules: if (self.text[-1] == ")" and "(" not in self.text) or (self.text[-1] == "]" and "[" not in self.text) or \ (self.text[-1] == "}" and "{" not in self.text): logging.debug("parenthesis %s" % self.text) self.dend -= 1 self.end -= 1 self.text = self.text[:-1] if (self.text[0] == "(" and ")" not in self.text) or (self.text[0] == "[" and "]" not in self.text) or \ (self.text[0] == "{" and "}" not in self.text): logging.debug("parenthesis %s" % self.text) self.dstart += 1 self.start += 1 self.text = self.text[1:] if "hyphen" in rules and "-" in self.text and all([len(t) > 3 for t in self.text.split("-")]): logging.debug("ignored hyphen %s" % self.text) return False #if all filters are 0, do not even check if "ssm" in ths and ths["ssm"] != 0 and self.ssm_score < ths["ssm"] and self.text.lower() not in chem_words: #logging.debug("filtered %s => %s" % (self.text, str(self.ssm_score))) return False if "alpha" in rules: alpha = False for c in self.text.strip(): if c.isalpha(): alpha = True break if not alpha: logging.debug("ignored no alpha %s" % self.text) return False if "dash" in rules and (self.text.startswith("-") or self.text.endswith("-")): logging.debug("excluded for -: {}".format(self.text)) return False return True
# coding: utf-8 # # Copyright 2017 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Decorators to provide authorization across the site.""" from constants import constants from core.controllers import base from core.domain import rights_manager from core.domain import role_services from core.platform import models import feconf current_user_services = models.Registry.import_current_user_services() def open_access(handler): """Decorator to give access to everyone.""" def test_can_access(self, **kwargs): return handler(self, **kwargs) test_can_access.__wrapped__ = True return test_can_access def can_play_exploration(handler): """Decorator to check whether user can play given exploration.""" def test_can_play(self, exploration_id, **kwargs): if exploration_id in feconf.DISABLED_EXPLORATION_IDS: raise self.PageNotFoundException exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if rights_manager.check_can_access_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_EXPLORATION, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise self.PageNotFoundException test_can_play.__wrapped__ = True return test_can_play def can_play_collection(handler): """Decorator to check whether user can play given collection.""" def test_can_play(self, collection_id, **kwargs): collection_rights = rights_manager.get_collection_rights( collection_id, strict=False) if rights_manager.check_can_access_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_COLLECTION, collection_rights): return handler(self, collection_id, **kwargs) else: raise self.PageNotFoundException test_can_play.__wrapped__ = True return test_can_play def can_download_exploration(handler): """Decorator to check whether user can download given exploration. If a user is authorized to play given exploration, they can download it. """ def test_can_download(self, exploration_id, **kwargs): if exploration_id in feconf.DISABLED_EXPLORATION_IDS: raise base.UserFacingExceptions.PageNotFoundException exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if rights_manager.check_can_access_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_EXPLORATION, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise self.PageNotFoundException test_can_download.__wrapped__ = True return test_can_download def can_view_exploration_stats(handler): """Decorator to check whether user can view exploration stats. If a user is authorized to play given exploration, they can view its stats. """ def test_can_view_stats(self, exploration_id, **kwargs): if exploration_id in feconf.DISABLED_EXPLORATION_IDS: raise base.UserFacingExceptions.PageNotFoundException exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if rights_manager.check_can_access_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_EXPLORATION, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.PageNotFoundException test_can_view_stats.__wrapped__ = True return test_can_view_stats def can_edit_collection(handler): """Decorator to check whether the user can edit collection.""" def test_can_edit(self, collection_id, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException collection_rights = rights_manager.get_collection_rights(collection_id) if collection_rights is None: raise base.UserFacingExceptions.PageNotFoundException if rights_manager.check_can_edit_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_COLLECTION, collection_rights): return handler(self, collection_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to edit this collection.') test_can_edit.__wrapped__ = True return test_can_edit def can_manage_email_dashboard(handler): """Decorator to check whether user can access email dashboard.""" def test_can_manage_emails(self, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException if role_services.ACTION_MANAGE_EMAIL_DASHBOARD in self.actions: return handler(self, **kwargs) raise self.UnauthorizedUserException( 'You do not have credentials to access email dashboard.') test_can_manage_emails.__wrapped__ = True return test_can_manage_emails def can_access_moderator_page(handler): """Decorator to check whether user can access moderator page.""" def test_can_access_moderator_page(self, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException if role_services.ACTION_ACCESS_MODERATOR_PAGE in self.actions: return handler(self, **kwargs) raise self.UnauthorizedUserException( 'You do not have credentials to access moderator page.') test_can_access_moderator_page.__wrapped__ = True return test_can_access_moderator_page def can_send_moderator_emails(handler): """Decorator to check whether user can send moderator emails.""" def test_can_send_moderator_emails(self, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException if role_services.ACTION_SEND_MODERATOR_EMAILS in self.actions: return handler(self, **kwargs) raise self.UnauthorizedUserException( 'You do not have credentials to send moderator emails.') test_can_send_moderator_emails.__wrapped__ = True return test_can_send_moderator_emails def can_manage_own_profile(handler): """Decorator to check whether user can manage his profile.""" def test_can_manage_profile(self, **kwargs): if not self.user_id: raise self.NotLoggedInException if role_services.ACTION_MANAGE_PROFILE in self.actions: return handler(self, **kwargs) raise self.UnauthorizedUserException( 'You do not have credentials to manage profile or preferences.') test_can_manage_profile.__wrapped__ = True return test_can_manage_profile def can_access_admin_page(handler): """Decorator that checks if the current user is a super admin.""" def test_super_admin(self, **kwargs): """Checks if the user is logged in and is a super admin.""" if not self.user_id: raise self.NotLoggedInException if not current_user_services.is_current_user_super_admin(): raise self.UnauthorizedUserException( '%s is not a super admin of this application', self.user_id) return handler(self, **kwargs) test_super_admin.__wrapped__ = True return test_super_admin def can_upload_exploration(handler): """Decorator that checks if the current user can upload exploration.""" def test_can_upload(self, **kwargs): if not self.user_id: raise self.NotLoggedInException if not current_user_services.is_current_user_super_admin(): raise self.UnauthorizedUserException( 'You do not have credentials to upload exploration.') return handler(self, **kwargs) test_can_upload.__wrapped__ = True return test_can_upload def can_create_exploration(handler): """Decorator to check whether the user can create an exploration.""" def test_can_create(self, **kwargs): if self.user_id is None: raise self.NotLoggedInException if role_services.ACTION_CREATE_EXPLORATION in self.actions: return handler(self, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to create an exploration.') test_can_create.__wrapped__ = True return test_can_create def can_create_collection(handler): """Decorator to check whether the user can create a collection.""" def test_can_create(self, **kwargs): if self.user_id is None: raise self.NotLoggedInException if role_services.ACTION_CREATE_COLLECTION in self.actions: return handler(self, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to create a collection.') test_can_create.__wrapped__ = True return test_can_create def can_access_creator_dashboard(handler): """Decorator to check whether the user can access creator dashboard page. """ def test_can_access(self, **kwargs): if self.user_id is None: raise self.NotLoggedInException if role_services.ACTION_ACCESS_CREATOR_DASHBOARD in self.actions: return handler(self, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to access creator dashboard.') test_can_access.__wrapped__ = True return test_can_access def can_comment_on_feedback_thread(handler): """Decorator to check whether the user can view feedback for a given exploration. """ def test_can_access(self, exploration_id, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException if exploration_id in feconf.DISABLED_EXPLORATION_IDS: raise base.UserFacingExceptions.PageNotFoundException exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if rights_manager.check_can_access_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_EXPLORATION, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise self.UnauthorizedUserException( 'You do not have credentials to view exploration feedback.') test_can_access.__wrapped__ = True return test_can_access def can_rate_exploration(handler): """Decorator to check whether the user can give rating to given exploration. """ def test_can_rate(self, exploration_id, **kwargs): if role_services.ACTION_RATE_EXPLORATION in self.actions: return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to give ratings to explorations.') test_can_rate.__wrapped__ = True return test_can_rate def can_flag_exploration(handler): """Decorator to check whether user can flag given exploration.""" def test_can_flag(self, exploration_id, **kwargs): if role_services.ACTION_FLAG_EXPLORATION in self.actions: return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to flag explorations.') test_can_flag.__wrapped__ = True return test_can_flag def can_subscribe_to_users(handler): """Decorator to check whether user can subscribe/unsubscribe a creator.""" def test_can_subscribe(self, **kwargs): if role_services.ACTION_SUBSCRIBE_TO_USERS in self.actions: return handler(self, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to manage subscriptions.') test_can_subscribe.__wrapped__ = True return test_can_subscribe def can_edit_exploration(handler): """Decorator to check whether the user can edit given exploration.""" def test_can_edit(self, exploration_id, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException exploration_rights = rights_manager.get_exploration_rights( exploration_id) if exploration_rights is None: raise base.UserFacingExceptions.PageNotFoundException if rights_manager.check_can_edit_activity( self.user_id, self.actions, constants.ACTIVITY_TYPE_EXPLORATION, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to edit this exploration.') test_can_edit.__wrapped__ = True return test_can_edit def can_delete_exploration(handler): """Decorator to check whether user can delete exploration.""" def test_can_delete(self, exploration_id, **kwargs): if not self.user_id: raise base.UserFacingExceptions.NotLoggedInException exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if rights_manager.check_can_delete_exploration( self.user_id, self.actions, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'User %s does not have permissions to delete exploration %s' % (self.user_id, exploration_id)) test_can_delete.__wrapped__ = True return test_can_delete def can_suggest_changes_to_exploration(handler): """Decorator to check whether a user can make suggestions to an exploration. """ def test_can_suggest(self, exploration_id, **kwargs): if role_services.ACTION_SUGGEST_CHANGES_TO_EXPLORATION in self.actions: return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to give suggestions to this ' 'exploration.') test_can_suggest.__wrapped__ = True return test_can_suggest def can_publish_exploration(handler): """Decorator to check whether user can publish exploration.""" def test_can_publish(self, exploration_id, *args, **kwargs): exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if exploration_rights is None: raise base.UserFacingExceptions.PageNotFoundException if rights_manager.check_can_publish_exploration( self.user_id, self.actions, exploration_rights): return handler(self, exploration_id, *args, **kwargs) if rights_manager.check_can_publicize_exploration( self.actions, exploration_rights): return handler(self, exploration_id, *args, **kwargs) raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to publish this exploration.') test_can_publish.__wrapped__ = True return test_can_publish def can_manage_collection_publish_status(handler): """Decorator to check whether user can publish exploration.""" def test_can_manage_collection_publish_status( self, collection_id, **kwargs): collection_rights = rights_manager.get_collection_rights( collection_id) if collection_rights is None: raise base.UserFacingExceptions.PageNotFoundException if collection_rights.is_published(): if role_services.ACTION_UNPUBLISH_PUBLIC_COLLECTION in self.actions: return handler(self, collection_id, **kwargs) raise self.UnauthorizedUserException( 'You do not have credentials to unpublish this collection.') if collection_rights.is_private(): if role_services.ACTION_PUBLISH_ANY_COLLECTION in self.actions: return handler(self, collection_id, **kwargs) if role_services.ACTION_PUBLISH_OWNED_COLLECTION in self.actions: if collection_rights.is_owner(self.user_id): return handler(self, collection_id, **kwargs) raise self.UnauthorizedUserException( 'You do not have credentials to publish this collection.') test_can_manage_collection_publish_status.__wrapped__ = True return test_can_manage_collection_publish_status def can_modify_exploration_roles(handler): """Decorators to check whether user can manage rights related to an exploration. """ def test_can_modify(self, exploration_id, **kwargs): exploration_rights = rights_manager.get_exploration_rights( exploration_id, strict=False) if rights_manager.check_can_modify_exploration_roles( self.user_id, self.actions, exploration_rights): return handler(self, exploration_id, **kwargs) else: raise base.UserFacingExceptions.UnauthorizedUserException( 'You do not have credentials to change rights for this ' 'exploration.') test_can_modify.__wrapped__ = True return test_can_modify def can_perform_cron_tasks(handler): """Decorator to ensure that the handler is being called by cron or by a superadmin of the application. """ def test_can_perform(self, **kwargs): if (self.request.headers.get('X-AppEngine-Cron') is None and not self.is_super_admin): raise self.UnauthorizedUserException( 'You do not have the credentials to access this page.') else: return handler(self, **kwargs) test_can_perform.__wrapped__ = True return test_can_perform def can_access_learner_dashboard(handler): """Decorator to check access to learner dashboard.""" def test_can_access(self, **kwargs): if role_services.ACTION_ACCESS_LEARNER_DASHBOARD in self.actions: return handler(self, **kwargs) else: raise self.NotLoggedInException test_can_access.__wrapped__ = True return test_can_access def require_user_id_else_redirect_to_homepage(handler): """Decorator that checks if a user_id is associated to the current session. If not, the user is redirected to the main page. Note that the user may not yet have registered. """ def test_login(self, **kwargs): """Checks if the user for the current session is logged in. If not, redirects the user to the home page. """ if not self.user_id: self.redirect('/') return return handler(self, **kwargs) test_login.__wrapped__ = True return test_login
# -*- coding:utf-8 -*- # # Copyright 2016 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. r""" ==================================================== Blacklist various Python calls known to be dangerous ==================================================== This blacklist data checks for a number of Python calls known to have possible security implications. The following blacklist tests are run against any function calls encoutered in the scanned code base, triggered by encoutering ast.Call nodes. B301: pickle ------------ Pickle library appears to be in use, possible security issue. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B301 | pickle | - pickle.loads | Medium | | | | - pickle.load | | | | | - pickle.Unpickler | | | | | - cPickle.loads | | | | | - cPickle.load | | | | | - cPickle.Unpickler | | +------+---------------------+------------------------------------+-----------+ B302: marshal ------------- Deserialization with the marshal module is possibly dangerous. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B302 | marshal | - marshal.load | Medium | | | | - marshal.loads | | +------+---------------------+------------------------------------+-----------+ B303: md5 --------- Use of insecure MD2, MD4, MD5, or SHA1 hash function. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B303 | md5 | - hashlib.md5 | Medium | | | | - hashlib.sha1 | | | | | - Crypto.Hash.MD2.new | | | | | - Crypto.Hash.MD4.new | | | | | - Crypto.Hash.MD5.new | | | | | - Crypto.Hash.SHA.new | | | | | - Cryptodome.Hash.MD2.new | | | | | - Cryptodome.Hash.MD4.new | | | | | - Cryptodome.Hash.MD5.new | | | | | - Cryptodome.Hash.SHA.new | | | | | - cryptography.hazmat.primitives | | | | | .hashes.MD5 | | | | | - cryptography.hazmat.primitives | | | | | .hashes.SHA1 | | +------+---------------------+------------------------------------+-----------+ B304 - B305: ciphers and modes ------------------------------ Use of insecure cipher or cipher mode. Replace with a known secure cipher such as AES. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B304 | ciphers | - Crypto.Cipher.ARC2.new | High | | | | - Crypto.Cipher.ARC4.new | | | | | - Crypto.Cipher.Blowfish.new | | | | | - Crypto.Cipher.DES.new | | | | | - Crypto.Cipher.XOR.new | | | | | - Cryptodome.Cipher.ARC2.new | | | | | - Cryptodome.Cipher.ARC4.new | | | | | - Cryptodome.Cipher.Blowfish.new | | | | | - Cryptodome.Cipher.DES.new | | | | | - Cryptodome.Cipher.XOR.new | | | | | - cryptography.hazmat.primitives | | | | | .ciphers.algorithms.ARC4 | | | | | - cryptography.hazmat.primitives | | | | | .ciphers.algorithms.Blowfish | | | | | - cryptography.hazmat.primitives | | | | | .ciphers.algorithms.IDEA | | +------+---------------------+------------------------------------+-----------+ | B305 | cipher_modes | - cryptography.hazmat.primitives | Medium | | | | .ciphers.modes.ECB | | +------+---------------------+------------------------------------+-----------+ B306: mktemp_q -------------- Use of insecure and deprecated function (mktemp). +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B306 | mktemp_q | - tempfile.mktemp | Medium | +------+---------------------+------------------------------------+-----------+ B307: eval ---------- Use of possibly insecure function - consider using safer ast.literal_eval. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B307 | eval | - eval | Medium | +------+---------------------+------------------------------------+-----------+ B308: mark_safe --------------- Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B308 | mark_safe | - django.utils.safestring.mark_safe| Medium | +------+---------------------+------------------------------------+-----------+ B309: httpsconnection --------------------- Use of HTTPSConnection on older versions of Python prior to 2.7.9 and 3.4.3 do not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033 +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B309 | httpsconnection | - httplib.HTTPSConnection | Medium | | | | - http.client.HTTPSConnection | | | | | - six.moves.http_client | | | | | .HTTPSConnection | | +------+---------------------+------------------------------------+-----------+ B310: urllib_urlopen -------------------- Audit url open for permitted schemes. Allowing use of 'file:'' or custom schemes is often unexpected. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B310 | urllib_urlopen | - urllib.urlopen | Medium | | | | - urllib.request.urlopen | | | | | - urllib.urlretrieve | | | | | - urllib.request.urlretrieve | | | | | - urllib.URLopener | | | | | - urllib.request.URLopener | | | | | - urllib.FancyURLopener | | | | | - urllib.request.FancyURLopener | | | | | - urllib2.urlopen | | | | | - urllib2.Request | | | | | - six.moves.urllib.request.urlopen | | | | | - six.moves.urllib.request | | | | | .urlretrieve | | | | | - six.moves.urllib.request | | | | | .URLopener | | | | | - six.moves.urllib.request | | | | | .FancyURLopener | | +------+---------------------+------------------------------------+-----------+ B311: random ------------ Standard pseudo-random generators are not suitable for security/cryptographic purposes. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B311 | random | - random.random | Low | | | | - random.randrange | | | | | - random.randint | | | | | - random.choice | | | | | - random.uniform | | | | | - random.triangular | | +------+---------------------+------------------------------------+-----------+ B312: telnetlib --------------- Telnet-related functions are being called. Telnet is considered insecure. Use SSH or some other encrypted protocol. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B312 | telnetlib | - telnetlib.\* | High | +------+---------------------+------------------------------------+-----------+ B313 - B320: XML ---------------- Most of this is based off of Christian Heimes' work on defusedxml: https://pypi.org/project/defusedxml/#defusedxml-sax Using various XLM methods to parse untrusted XML data is known to be vulnerable to XML attacks. Methods should be replaced with their defusedxml equivalents. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B313 | xml_bad_cElementTree| - xml.etree.cElementTree.parse | Medium | | | | - xml.etree.cElementTree.iterparse | | | | | - xml.etree.cElementTree.fromstring| | | | | - xml.etree.cElementTree.XMLParser | | +------+---------------------+------------------------------------+-----------+ | B314 | xml_bad_ElementTree | - xml.etree.ElementTree.parse | Medium | | | | - xml.etree.ElementTree.iterparse | | | | | - xml.etree.ElementTree.fromstring | | | | | - xml.etree.ElementTree.XMLParser | | +------+---------------------+------------------------------------+-----------+ | B315 | xml_bad_expatreader | - xml.sax.expatreader.create_parser| Medium | +------+---------------------+------------------------------------+-----------+ | B316 | xml_bad_expatbuilder| - xml.dom.expatbuilder.parse | Medium | | | | - xml.dom.expatbuilder.parseString | | +------+---------------------+------------------------------------+-----------+ | B317 | xml_bad_sax | - xml.sax.parse | Medium | | | | - xml.sax.parseString | | | | | - xml.sax.make_parser | | +------+---------------------+------------------------------------+-----------+ | B318 | xml_bad_minidom | - xml.dom.minidom.parse | Medium | | | | - xml.dom.minidom.parseString | | +------+---------------------+------------------------------------+-----------+ | B319 | xml_bad_pulldom | - xml.dom.pulldom.parse | Medium | | | | - xml.dom.pulldom.parseString | | +------+---------------------+------------------------------------+-----------+ | B320 | xml_bad_etree | - lxml.etree.parse | Medium | | | | - lxml.etree.fromstring | | | | | - lxml.etree.RestrictedElement | | | | | - lxml.etree.GlobalParserTLS | | | | | - lxml.etree.getDefaultParser | | | | | - lxml.etree.check_docinfo | | +------+---------------------+------------------------------------+-----------+ B321: ftplib ------------ FTP-related functions are being called. FTP is considered insecure. Use SSH/SFTP/SCP or some other encrypted protocol. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B321 | ftplib | - ftplib.\* | High | +------+---------------------+------------------------------------+-----------+ B322: input ------------ The input method in Python 2 will read from standard input, evaluate and run the resulting string as python source code. This is similar, though in many ways worse, then using eval. On Python 2, use raw_input instead, input is safe in Python 3. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B322 | input | - input | High | +------+---------------------+------------------------------------+-----------+ B323: unverified_context ------------------------ By default, Python will create a secure, verified ssl context for use in such classes as HTTPSConnection. However, it still allows using an insecure context via the _create_unverified_context that reverts to the previous behavior that does not validate certificates or perform hostname checks. +------+---------------------+------------------------------------+-----------+ | ID | Name | Calls | Severity | +======+=====================+====================================+===========+ | B323 | unverified_context | - ssl._create_unverified_context | Medium | +------+---------------------+------------------------------------+-----------+ """ from bandit.blacklists import utils def gen_blacklist(): """Generate a list of items to blacklist. Methods of this type, "bandit.blacklist" plugins, are used to build a list of items that bandit's built in blacklisting tests will use to trigger issues. They replace the older blacklist* test plugins and allow blacklisted items to have a unique bandit ID for filtering and profile usage. :return: a dictionary mapping node types to a list of blacklist data """ sets = [] sets.append(utils.build_conf_dict( 'pickle', 'B301', ['pickle.loads', 'pickle.load', 'pickle.Unpickler', 'cPickle.loads', 'cPickle.load', 'cPickle.Unpickler'], 'Pickle library appears to be in use, possible security issue.' )) sets.append(utils.build_conf_dict( 'marshal', 'B302', ['marshal.load', 'marshal.loads'], 'Deserialization with the marshal module is possibly dangerous.' )) sets.append(utils.build_conf_dict( 'md5', 'B303', ['hashlib.md5', 'hashlib.sha1', 'Crypto.Hash.MD2.new', 'Crypto.Hash.MD4.new', 'Crypto.Hash.MD5.new', 'Crypto.Hash.SHA.new', 'Cryptodome.Hash.MD2.new', 'Cryptodome.Hash.MD4.new', 'Cryptodome.Hash.MD5.new', 'Cryptodome.Hash.SHA.new', 'cryptography.hazmat.primitives.hashes.MD5', 'cryptography.hazmat.primitives.hashes.SHA1'], 'Use of insecure MD2, MD4, MD5, or SHA1 hash function.' )) sets.append(utils.build_conf_dict( 'ciphers', 'B304', ['Crypto.Cipher.ARC2.new', 'Crypto.Cipher.ARC4.new', 'Crypto.Cipher.Blowfish.new', 'Crypto.Cipher.DES.new', 'Crypto.Cipher.XOR.new', 'Cryptodome.Cipher.ARC2.new', 'Cryptodome.Cipher.ARC4.new', 'Cryptodome.Cipher.Blowfish.new', 'Cryptodome.Cipher.DES.new', 'Cryptodome.Cipher.XOR.new', 'cryptography.hazmat.primitives.ciphers.algorithms.ARC4', 'cryptography.hazmat.primitives.ciphers.algorithms.Blowfish', 'cryptography.hazmat.primitives.ciphers.algorithms.IDEA'], 'Use of insecure cipher {name}. Replace with a known secure' ' cipher such as AES.', 'HIGH' )) sets.append(utils.build_conf_dict( 'cipher_modes', 'B305', ['cryptography.hazmat.primitives.ciphers.modes.ECB'], 'Use of insecure cipher mode {name}.' )) sets.append(utils.build_conf_dict( 'mktemp_q', 'B306', ['tempfile.mktemp'], 'Use of insecure and deprecated function (mktemp).' )) sets.append(utils.build_conf_dict( 'eval', 'B307', ['eval'], 'Use of possibly insecure function - consider using safer ' 'ast.literal_eval.' )) sets.append(utils.build_conf_dict( 'mark_safe', 'B308', ['django.utils.safestring.mark_safe'], 'Use of mark_safe() may expose cross-site scripting ' 'vulnerabilities and should be reviewed.' )) sets.append(utils.build_conf_dict( 'httpsconnection', 'B309', ['httplib.HTTPSConnection', 'http.client.HTTPSConnection', 'six.moves.http_client.HTTPSConnection'], 'Use of HTTPSConnection on older versions of Python prior to 2.7.9 ' 'and 3.4.3 do not provide security, see ' 'https://wiki.openstack.org/wiki/OSSN/OSSN-0033' )) sets.append(utils.build_conf_dict( 'urllib_urlopen', 'B310', ['urllib.urlopen', 'urllib.request.urlopen', 'urllib.urlretrieve', 'urllib.request.urlretrieve', 'urllib.URLopener', 'urllib.request.URLopener', 'urllib.FancyURLopener', 'urllib.request.FancyURLopener', 'urllib2.urlopen', 'urllib2.Request', 'six.moves.urllib.request.urlopen', 'six.moves.urllib.request.urlretrieve', 'six.moves.urllib.request.URLopener', 'six.moves.urllib.request.FancyURLopener'], 'Audit url open for permitted schemes. Allowing use of file:/ or ' 'custom schemes is often unexpected.' )) sets.append(utils.build_conf_dict( 'random', 'B311', ['random.random', 'random.randrange', 'random.randint', 'random.choice', 'random.uniform', 'random.triangular'], 'Standard pseudo-random generators are not suitable for ' 'security/cryptographic purposes.', 'LOW' )) sets.append(utils.build_conf_dict( 'telnetlib', 'B312', ['telnetlib.*'], 'Telnet-related functions are being called. Telnet is considered ' 'insecure. Use SSH or some other encrypted protocol.', 'HIGH' )) # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.org/project/defusedxml/#defusedxml-sax xml_msg = ('Using {name} to parse untrusted XML data is known to be ' 'vulnerable to XML attacks. Replace {name} with its ' 'defusedxml equivalent function or make sure ' 'defusedxml.defuse_stdlib() is called') sets.append(utils.build_conf_dict( 'xml_bad_cElementTree', 'B313', ['xml.etree.cElementTree.parse', 'xml.etree.cElementTree.iterparse', 'xml.etree.cElementTree.fromstring', 'xml.etree.cElementTree.XMLParser'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_ElementTree', 'B314', ['xml.etree.ElementTree.parse', 'xml.etree.ElementTree.iterparse', 'xml.etree.ElementTree.fromstring', 'xml.etree.ElementTree.XMLParser'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_expatreader', 'B315', ['xml.sax.expatreader.create_parser'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_expatbuilder', 'B316', ['xml.dom.expatbuilder.parse', 'xml.dom.expatbuilder.parseString'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_sax', 'B317', ['xml.sax.parse', 'xml.sax.parseString', 'xml.sax.make_parser'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_minidom', 'B318', ['xml.dom.minidom.parse', 'xml.dom.minidom.parseString'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_pulldom', 'B319', ['xml.dom.pulldom.parse', 'xml.dom.pulldom.parseString'], xml_msg )) sets.append(utils.build_conf_dict( 'xml_bad_etree', 'B320', ['lxml.etree.parse', 'lxml.etree.fromstring', 'lxml.etree.RestrictedElement', 'lxml.etree.GlobalParserTLS', 'lxml.etree.getDefaultParser', 'lxml.etree.check_docinfo'], ('Using {name} to parse untrusted XML data is known to be ' 'vulnerable to XML attacks. Replace {name} with its ' 'defusedxml equivalent function.') )) # end of XML tests sets.append(utils.build_conf_dict( 'ftplib', 'B321', ['ftplib.*'], 'FTP-related functions are being called. FTP is considered ' 'insecure. Use SSH/SFTP/SCP or some other encrypted protocol.', 'HIGH' )) sets.append(utils.build_conf_dict( 'input', 'B322', ['input'], 'The input method in Python 2 will read from standard input, ' 'evaluate and run the resulting string as python source code. This ' 'is similar, though in many ways worse, then using eval. On Python ' '2, use raw_input instead, input is safe in Python 3.', 'HIGH' )) sets.append(utils.build_conf_dict( 'unverified_context', 'B323', ['ssl._create_unverified_context'], 'By default, Python will create a secure, verified ssl context for ' 'use in such classes as HTTPSConnection. However, it still allows ' 'using an insecure context via the _create_unverified_context that ' 'reverts to the previous behavior that does not validate certificates ' 'or perform hostname checks.' )) return {'Call': sets}
# -*- coding: utf-8 -*- import io import pytest from layeredconfig import LayeredConfig from layeredconfig import DictSource, Environment, INIFile from layeredconfig import strategy def test_raise_keyerrors_on_empty_multilayer_config(): config = LayeredConfig() with pytest.raises(KeyError): assert config.a def test_set_keychain(): config = LayeredConfig( DictSource({'a': {'b': {'c': 2}}}), keychain=('a', 'b') ) assert config.dump() == {'c': 2} def test_properly_return_none_values(): config = LayeredConfig( DictSource({'a': None}) ) assert config.a is None def test_read_layered_sources(): config = LayeredConfig( DictSource({'a': 1, 'b': {'c': 2}}), DictSource({'x': 6, 'b': {'y': 7, 'd': {'e': 8}}}) ) assert config.a == 1 assert config.b.c == 2 assert config.b.y == 7 assert config['a'] == 1 assert config['b'].c == 2 assert config.b['y'] == 7 assert config.b.d.e == 8 def test_read_complex_layered_sources(monkeypatch): monkeypatch.setenv('MVP1_A', 1000) monkeypatch.setenv('MVP2_B_M_E', 4000) config = LayeredConfig( Environment('MVP1_'), # untyped shadowing DictSource({'a': 1, 'b': {'c': 2, 'e': 400}}), DictSource({'x': 6, 'b': {'y': 7, 'd': {'e': 8}}}), DictSource({'a': 100, 'b': {'m': {'e': 800}}}), # shadowing DictSource({'x': 'x', 'b': {'y': 0.7, 'd': 800}}), # type changing Environment('MVP2_'), # untyped shadowing ) assert config.a == 100 assert config.x == 'x' # changes int to str assert config.b.c == 2 assert config.b.y == 0.7 # changes int to float assert config.b.d == 800 # changes subsource (dict) to single value assert config.b.e == 400 # 'e' should not be shadowed by other 'e' assert config.b.m.e == 4000 # shadowed by untyped but casted to type with pytest.raises(AttributeError) as exc_info: config.b.d.e assert "no attribute 'e'" in str(exc_info.value) # config.b.d overrides a dict with a value with pytest.raises(ValueError) as exc_info: config.b.dump() assert "conflicts" in str(exc_info.value) def test_layered_len(): config = LayeredConfig( DictSource({'a': 1, 'b': {'c': 2}}), DictSource({'x': 6, 'b': {'y': 7, 'd': {'e': 8}}}) ) assert len(config) == 3 def test_write_layered_source(): source1 = DictSource({'a': 1, 'b': {'c': 2}}) source2 = DictSource({'x': 6, 'b': {'y': 7, 'd': {'e': 8}}}) config = LayeredConfig(source1, source2) assert config.a == 1 assert config.b.c == 2 assert config.b.y == 7 config.a = 10 config['x'] = 60 config['b'].c = 20 config.b['y'] = 70 config.b['m'] = 'n' # add new key config.b.d.e = 80 assert config.a == 10 assert config.x == 60 assert config.b.c == 20 assert config.b.y == 70 assert config.b.m == 'n' assert config.b.d.e == 80 assert source1.a == 10 assert source1.b.c == 20 assert source2.x == 60 assert source2.b.y == 70 assert source2.b.m == 'n' assert source2.b.d.e == 80 @pytest.mark.parametrize('key, message', ( ('a', 'locked'), ('x', 'writable'), )) def test_write_layered_source_fails(key, message): source1 = DictSource({'a': 1, 'b': {'c': 2}}, readonly=True) config = LayeredConfig(source1) with pytest.raises(TypeError) as exc_info: config[key] = 10 assert message in str(exc_info.value) def test_layered_get(): config = LayeredConfig( DictSource({'a': 1, 'b': {'c': 2}}), DictSource({'x': 6, 'b': {'y': 7, 'd': {'e': 8}}}) ) assert config.get('a') == 1 assert config.get('x') == 6 assert config.get('b').get('c') == 2 assert config.get('b').get('y') == 7 assert config.get('nonexisting') is None assert config.get('nonexisting', 'default') == 'default' assert 'nonexisting' not in config def test_source_items(monkeypatch): monkeypatch.setenv('MVP_A', 10) config = LayeredConfig( DictSource({'a': 1, 'b': {'c': 2}}), Environment('MVP_'), DictSource({'x': 6, 'b': {'y': 7}}) ) items = list(config.items()) assert items == [('a', 10), ('b', config.b), ('x', 6)] items = list(config.b.items()) assert items == [('c', 2), ('y', 7)] @pytest.mark.parametrize('reverse', (1, -1)) def test_source_items_prevent_overriding_subsections_with_values(reverse): sources = [ DictSource({'a': 1, 'b': {'c': 2}}), DictSource({'x': 6, 'b': 5}) ] config = LayeredConfig(*sources[::reverse]) with pytest.raises(ValueError) as exc_info: list(config.items()) assert "conflicts" in str(exc_info.value) def test_source_items_with_strategies_and_untyped_source(monkeypatch): monkeypatch.setenv('MVP_A', 100) untyped_source = io.StringIO(pytest.helpers.unindent(u""" [__root__] a=1000 """)) config = LayeredConfig( Environment('MVP_'), # last source still needs a typed source DictSource({'a': 1, 'x': [5, 6], 'b': {'c': 2, 'd': [3, 4]}}), DictSource({'a': 10, 'x': [50, 60], 'b': {'c': 20, 'd': [30, 40]}}), INIFile(untyped_source), strategies={ 'a': strategy.add, 'x': strategy.collect, # keep lists intact 'c': strategy.collect, # collect values into list 'd': strategy.merge, # merge lists } ) items = list(config.items()) assert items == [('a', 1111), ('b', config.b), ('x', [[50, 60], [5, 6]])] items = list(config.b.items()) assert items == [('c', [20, 2]), ('d', [30, 40, 3, 4])] def test_layered_dump(): config = LayeredConfig( DictSource({'a': 1, 'b': {'c': 2}}), DictSource({'a': '10'}), DictSource({'x': 6, 'b': {'y': 7}}) ) assert config.dump() == {'a': '10', 'b': {'c': 2, 'y': 7}, 'x': 6} def test_layered_setdefault(): source1 = DictSource({'a': 1, 'b': {'c': 2}}) source2 = DictSource({'x': 6, 'b': {'y': 7}}) config = LayeredConfig(source1, source2) assert config.setdefault('a', 10) == 1 assert config.setdefault('nonexisting', 10) == 10 assert config.nonexisting == 10 assert 'nonexisting' in source2 assert config.b.setdefault('nonexisting', 20) == 20 assert config.b.nonexisting == 20 assert 'nonexisting' in source2.b @pytest.mark.parametrize('container', [ dict, DictSource ]) def test_layered_simple_update(container): source1 = DictSource({'a': 1, 'b': {'c': 2}}) source2 = DictSource({'x': 6, 'b': {'y': 7}}) config = LayeredConfig(source1, source2) data1 = container({'a': 10, 'x': 60}) data2 = container({'c': 20}) data3 = container({'y': 70}) config.update(data1) config.b.update(data2, data3) assert config.a == 10 assert config.x == 60 assert config.b.c == 20 assert config.b.y == 70 assert source1.a == 10 assert source1.b.c == 20 assert source2.x == 60 assert source2.b.y == 70 def test_layered_config_with_untyped_source(): typed_source1 = {'x': 5, 'b': {'y': 6}} typed_source2 = {'a': 1, 'b': {'c': 2}} untyped_source1 = io.StringIO(pytest.helpers.unindent(u""" [__root__] a=11 """)) untyped_source2 = io.StringIO(pytest.helpers.unindent(u""" [__root__] a=10 x=50 [b] c=20 y=60 [b.d] e=30 """)) typed1 = DictSource(typed_source1) typed2 = DictSource(typed_source2) untyped1 = INIFile(untyped_source1) untyped2 = INIFile(untyped_source2, subsection_token='.') config = LayeredConfig(typed1, typed2, untyped1, untyped2) assert typed1.x == 5 assert typed1.b.y == 6 assert typed2.a == 1 assert typed2.b.c == 2 with pytest.raises(KeyError): typed2.b.d.e assert untyped1.a == '11' assert untyped2.a == '10' assert untyped2.b.c == '20' assert untyped2.b.d.e == '30' assert config.a == 10 # found in first typed source assert config.x == 50 # found in second typed source assert config.b.c == 20 assert config.b.y == 60 assert config.b.d.e == '30' def test_read_layered_sources_with_strategies(): config = LayeredConfig( DictSource({'a': 1, 'x': [5, 6], 'b': {'c': 2, 'd': [3, 4]}}), DictSource({'a': 10, 'x': [50, 60], 'b': {'c': 20, 'd': [30, 40]}}), strategies={ 'a': strategy.add, 'x': strategy.collect, # keep lists intact 'c': strategy.collect, # collect values into list 'd': strategy.merge, # merge lists } ) assert config.a == 11 assert config.x == [[50, 60], [5, 6]] assert config.b.c == [20, 2] assert config.b.d == [30, 40, 3, 4] def test_read_layered_sources_with_strategies_and_untyped_sources(monkeypatch): monkeypatch.setenv('MVP_A', 100) untyped_source = io.StringIO(pytest.helpers.unindent(u""" [__root__] a=1000 """)) config = LayeredConfig( Environment('MVP_'), # last source still needs a typed source DictSource({'a': 1, 'x': [5, 6], 'b': {'c': 2, 'd': [3, 4]}}), DictSource({'a': 10, 'x': [50, 60], 'b': {'c': 20, 'd': [30, 40]}}), INIFile(untyped_source), strategies= { 'a': strategy.add, 'x': strategy.collect, # keep lists intact 'c': strategy.collect, # collect values into list 'd': strategy.merge, # merge lists } ) assert config.a == 1111 assert config.x == [[50, 60], [5, 6]] assert config.b.c == [20, 2] assert config.b.d == [30, 40, 3, 4]
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # Copyright 2016 Howard Liu # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import sys import time import socket import errno import struct import logging import random from shadowsocks import cryptor, eventloop, shell, common from shadowsocks.common import parse_header, onetimeauth_verify, \ onetimeauth_gen, ONETIMEAUTH_BYTES, ONETIMEAUTH_CHUNK_BYTES, \ ONETIMEAUTH_CHUNK_DATA_LEN, ADDRTYPE_AUTH, U, D # we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time TIMEOUTS_CLEAN_SIZE = 512 MSG_FASTOPEN = 0x20000000 # SOCKS command definition CMD_CONNECT = 1 CMD_BIND = 2 CMD_UDP_ASSOCIATE = 3 # for each opening port, we have a TCP Relay # for each connection, we have a TCP Relay Handler to handle the connection # for each handler, we have 2 sockets: # local: connected to the client # remote: connected to remote server # for each handler, it could be at one of several stages: # as sslocal: # stage 0 SOCKS hello received from local, send hello to local # stage 1 addr received from local, query DNS for remote # stage 2 UDP assoc # stage 3 DNS resolved, connect to remote # stage 4 still connecting, more data from local received # stage 5 remote connected, piping local and remote # as ssserver: # stage 0 just jump to stage 1 # stage 1 addr received from local, query DNS for remote # stage 3 DNS resolved, connect to remote # stage 4 still connecting, more data from local received # stage 5 remote connected, piping local and remote STAGE_INIT = 0 STAGE_ADDR = 1 STAGE_UDP_ASSOC = 2 STAGE_DNS = 3 STAGE_CONNECTING = 4 STAGE_STREAM = 5 STAGE_DESTROYED = -1 # for each handler, we have 2 stream directions: # upstream: from client to server direction # read local and write to remote # downstream: from server to client direction # read remote and write to local STREAM_UP = 0 STREAM_DOWN = 1 # for each stream, it's waiting for reading, or writing, or both WAIT_STATUS_INIT = 0 WAIT_STATUS_READING = 1 WAIT_STATUS_WRITING = 2 WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING BUF_SIZE = 32 * 1024 # helper exceptions for TCPRelayHandler class BadSocksHeader(Exception): pass class NoAcceptableMethods(Exception): pass class TCPRelayHandler(object): def __init__(self, server, fd_to_handlers, loop, local_sock, config, dns_resolver, is_local): self._server = server self._fd_to_handlers = fd_to_handlers self._loop = loop self._local_sock = local_sock self._remote_sock = None self._config = config self._dns_resolver = dns_resolver # TCP Relay works as either sslocal or ssserver # if is_local, this is sslocal self._is_local = is_local self._stage = STAGE_INIT self._cryptor = cryptor.Cryptor(config['password'], config['method']) self._ota_enable = config.get('one_time_auth', False) self._ota_enable_session = self._ota_enable self._ota_buff_head = b'' self._ota_buff_data = b'' self._ota_len = 0 self._ota_chunk_idx = 0 self._fastopen_connected = False self._data_to_write_to_local = [] self._data_to_write_to_remote = [] self._upstream_status = WAIT_STATUS_READING self._downstream_status = WAIT_STATUS_INIT self._client_address = local_sock.getpeername()[:2] self._remote_address = None self._forbidden_iplist = config.get('forbidden_ip') if is_local: self._chosen_server = self._get_a_server() fd_to_handlers[local_sock.fileno()] = self local_sock.setblocking(False) local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR, self._server) self.last_activity = 0 self._update_activity() def __hash__(self): # default __hash__ is id / 16 # we want to eliminate collisions return id(self) @property def remote_address(self): return self._remote_address def _get_a_server(self): server = self._config['server'] server_port = self._config['server_port'] if type(server_port) == list: server_port = random.choice(server_port) if type(server) == list: server = random.choice(server) logging.debug('chosen server: %s:%d', server, server_port) return server, server_port def _update_activity(self, direction=D, data_len=0): # tell the TCP Relay we have activities recently # else it will think we are inactive and timed out self._server.update_activity(self, direction, data_len) def _update_stream(self, stream, status): # update a stream to a new waiting status # check if status is changed # only update if dirty dirty = False if stream == STREAM_DOWN: if self._downstream_status != status: self._downstream_status = status dirty = True elif stream == STREAM_UP: if self._upstream_status != status: self._upstream_status = status dirty = True if not dirty: return if self._local_sock: event = eventloop.POLL_ERR if self._downstream_status & WAIT_STATUS_WRITING: event |= eventloop.POLL_OUT if self._upstream_status & WAIT_STATUS_READING: event |= eventloop.POLL_IN self._loop.modify(self._local_sock, event) if self._remote_sock: event = eventloop.POLL_ERR if self._downstream_status & WAIT_STATUS_READING: event |= eventloop.POLL_IN if self._upstream_status & WAIT_STATUS_WRITING: event |= eventloop.POLL_OUT self._loop.modify(self._remote_sock, event) def _write_to_sock(self, data, sock): # write data to sock # if only some of the data are written, put remaining in the buffer # and update the stream to wait for writing if not data or not sock: return False uncomplete = False try: l = len(data) s = sock.send(data) if s < l: data = data[s:] uncomplete = True except (socket.error, OSError, IOError) as e: error_no = eventloop.errno_from_exception(e) if sys.platform == "win32": if error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK): uncomplete = True elif error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK): uncomplete = True else: shell.print_exception(e) self.destroy() return False if uncomplete: if sock == self._local_sock: self._data_to_write_to_local.append(data) self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING) elif sock == self._remote_sock: self._data_to_write_to_remote.append(data) self._update_stream(STREAM_UP, WAIT_STATUS_WRITING) else: logging.error('write_all_to_sock:unknown socket') else: if sock == self._local_sock: self._update_stream(STREAM_DOWN, WAIT_STATUS_READING) elif sock == self._remote_sock: self._update_stream(STREAM_UP, WAIT_STATUS_READING) else: logging.error('write_all_to_sock:unknown socket') return True def _handle_stage_connecting(self, data): if not self._is_local: if self._ota_enable_session: self._ota_chunk_data(data, self._data_to_write_to_remote.append) else: self._data_to_write_to_remote.append(data) return if self._ota_enable_session: data = self._ota_chunk_data_gen(data) data = self._cryptor.encrypt(data) self._data_to_write_to_remote.append(data) if self._config['fast_open'] and not self._fastopen_connected: # for sslocal and fastopen, we basically wait for data and use # sendto to connect try: # only connect once self._fastopen_connected = True remote_sock = \ self._create_remote_socket(self._chosen_server[0], self._chosen_server[1]) self._loop.add(remote_sock, eventloop.POLL_ERR, self._server) data = b''.join(self._data_to_write_to_remote) l = len(data) s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server) if s < l: data = data[s:] self._data_to_write_to_remote = [data] else: self._data_to_write_to_remote = [] self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING) except (socket.error, OSError, IOError) as e: if eventloop.errno_from_exception(e) == errno.EINPROGRESS: # in this case data is not sent at all self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING) elif eventloop.errno_from_exception(e) == errno.ENOTCONN: self._fastopen_connected = False logging.error('fast open not supported on this OS') self._config['fast_open'] = False self.destroy() else: shell.print_exception(e) self.destroy() @shell.exception_handle(self_=True, destroy=True, conn_err=True) def _handle_stage_addr(self, data): addr, port = self._client_address if self._is_local: cmd = common.ord(data[1]) if cmd == CMD_UDP_ASSOCIATE: logging.debug('U[%d] UDP associate' % self._config['server_port']) if self._local_sock.family == socket.AF_INET6: header = b'\x05\x00\x00\x04' else: header = b'\x05\x00\x00\x01' addr_to_send = socket.inet_pton(self._local_sock.family, addr) port_to_send = struct.pack('>H', port) self._write_to_sock(header + addr_to_send + port_to_send, self._local_sock) self._stage = STAGE_UDP_ASSOC # just wait for the client to disconnect return elif cmd == CMD_CONNECT: # just trim VER CMD RSV data = data[3:] else: logging.error('U[%d] Unknown command %d', self._config['server_port'], cmd) self.destroy() return header_result = parse_header(data) if header_result is None: raise Exception('TCP can not parse header') addrtype, remote_addr, remote_port, header_length = header_result if self._config['firewall_ports'] and self._config['server_port'] not in self._config['firewall_trusted']: # Firewall enabled if self._config['firewall_mode'] == 'blacklist' and remote_port in self._config['firewall_ports']: firewall_blocked = True elif self._config['firewall_mode'] == 'whitelist' and remote_port not in self._config['firewall_ports']: firewall_blocked = True else: firewall_blocked = False else: firewall_blocked = False if firewall_blocked: logging.warning('U[%d] TCP PORT BANNED: RP[%d] A[%s-->%s]' % ( self._config['server_port'], remote_port, addr, common.to_str(remote_addr) )) return else: logging.info('U[%d] TCP CONN: RP[%d] A[%s-->%s]' % ( self._config['server_port'], remote_port, addr, common.to_str(remote_addr) )) self._remote_address = (common.to_str(remote_addr), remote_port) # pause reading self._update_stream(STREAM_UP, WAIT_STATUS_WRITING) self._stage = STAGE_DNS if self._is_local: # forward address to remote self._write_to_sock((b'\x05\x00\x00\x01' b'\x00\x00\x00\x00\x10\x10'), self._local_sock) data_to_send = self._cryptor.encrypt(data) self._data_to_write_to_remote.append(data_to_send) # notice here may go into _handle_dns_resolved directly self._dns_resolver.resolve(self._chosen_server[0], self._handle_dns_resolved) else: if self._ota_enable_session: data = data[header_length:] self._ota_chunk_data(data, self._data_to_write_to_remote.append) elif len(data) > header_length: self._data_to_write_to_remote.append(data[header_length:]) # notice here may go into _handle_dns_resolved directly self._dns_resolver.resolve(remote_addr, self._handle_dns_resolved) def _create_remote_socket(self, ip, port): addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM, socket.SOL_TCP) if len(addrs) == 0: raise Exception("TCP getaddrinfo failed for %s:%d" % (ip, port)) af, socktype, proto, canonname, sa = addrs[0] if self._forbidden_iplist: if common.to_str(sa[0]) in self._forbidden_iplist: raise Exception('U[%d] IP %s is in forbidden list, rejected' % (self._config['server_port'], common.to_str(sa[0]))) remote_sock = socket.socket(af, socktype, proto) self._remote_sock = remote_sock self._fd_to_handlers[remote_sock.fileno()] = self remote_sock.setblocking(False) remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) return remote_sock def _handle_dns_resolved(self, result, error): if error: logging.error(error) self.destroy() return if result and result[1]: ip = result[1] try: self._stage = STAGE_CONNECTING remote_addr = ip if self._is_local: remote_port = self._chosen_server[1] else: remote_port = self._remote_address[1] if self._is_local and self._config['fast_open']: # for fastopen: # wait for more data arrive and send them in one SYN self._stage = STAGE_CONNECTING # we don't have to wait for remote since it's not # created self._update_stream(STREAM_UP, WAIT_STATUS_READING) # TODO when there is already data in this packet else: # else do connect remote_sock = self._create_remote_socket(remote_addr, remote_port) try: remote_sock.connect((remote_addr, remote_port)) except (socket.error, OSError, IOError) as e: if eventloop.errno_from_exception(e) == \ errno.EINPROGRESS: pass self._loop.add(remote_sock, eventloop.POLL_ERR | eventloop.POLL_OUT, self._server) self._stage = STAGE_CONNECTING self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING) self._update_stream(STREAM_DOWN, WAIT_STATUS_READING) return except Exception as e: shell.print_exception(e) self.destroy() def _write_to_sock_remote(self, data): self._write_to_sock(data, self._remote_sock) def _ota_chunk_data(self, data, data_cb): # spec https://shadowsocks.org/en/spec/one-time-auth.html unchunk_data = b'' while len(data) > 0: if self._ota_len == 0: # get DATA.LEN + HMAC-SHA1 length = ONETIMEAUTH_CHUNK_BYTES - len(self._ota_buff_head) self._ota_buff_head += data[:length] data = data[length:] if len(self._ota_buff_head) < ONETIMEAUTH_CHUNK_BYTES: # wait more data return data_len = self._ota_buff_head[:ONETIMEAUTH_CHUNK_DATA_LEN] self._ota_len = struct.unpack('>H', data_len)[0] length = min(self._ota_len - len(self._ota_buff_data), len(data)) self._ota_buff_data += data[:length] data = data[length:] if len(self._ota_buff_data) == self._ota_len: # get a chunk data _hash = self._ota_buff_head[ONETIMEAUTH_CHUNK_DATA_LEN:] _data = self._ota_buff_data index = struct.pack('>I', self._ota_chunk_idx) key = self._cryptor.decipher_iv + index if onetimeauth_verify(_hash, _data, key) is False: logging.warn('U[%d] TCP One time auth fail, chunk is dropped!' % self._config[ 'server_port']) else: unchunk_data += _data self._ota_chunk_idx += 1 self._ota_buff_head = b'' self._ota_buff_data = b'' self._ota_len = 0 data_cb(unchunk_data) return def _ota_chunk_data_gen(self, data): data_len = struct.pack(">H", len(data)) index = struct.pack('>I', self._ota_chunk_idx) key = self._cryptor.cipher_iv + index sha110 = onetimeauth_gen(data, key) self._ota_chunk_idx += 1 return data_len + sha110 + data def _handle_stage_stream(self, data): if self._is_local: if self._ota_enable_session: data = self._ota_chunk_data_gen(data) data = self._cryptor.encrypt(data) self._write_to_sock(data, self._remote_sock) else: if self._ota_enable_session: self._ota_chunk_data(data, self._write_to_sock_remote) else: self._write_to_sock(data, self._remote_sock) return def _on_local_read(self): # handle all local read events and dispatch them to methods for # each stage if not self._local_sock: return is_local = self._is_local data = None try: data = self._local_sock.recv(BUF_SIZE) except (socket.error, OSError, IOError) as e: error_no = eventloop.errno_from_exception(e) if sys.platform == "win32": if error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK): return elif error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK): return if not data: self.destroy() return self._update_activity(U, len(data)) if not is_local: data = self._cryptor.decrypt(data) if not data: return if self._stage == STAGE_STREAM: self._handle_stage_stream(data) return elif is_local and self._stage == STAGE_INIT: # TODO check auth method self._write_to_sock(b'\x05\00', self._local_sock) self._stage = STAGE_ADDR return elif self._stage == STAGE_CONNECTING: self._handle_stage_connecting(data) elif (is_local and self._stage == STAGE_ADDR) or \ (not is_local and self._stage == STAGE_INIT): self._handle_stage_addr(data) def _on_remote_read(self): # handle all remote read events data = None try: data = self._remote_sock.recv(BUF_SIZE) except socket.error as err: error_no = err.args[0] if sys.platform == "win32": if error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK): return elif error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK): return if not data: self.destroy() return self._update_activity(D, len(data)) if self._is_local: data = self._cryptor.decrypt(data) else: data = self._cryptor.encrypt(data) try: self._write_to_sock(data, self._local_sock) except Exception as e: shell.print_exception(e) # TODO use logging when debug completed self.destroy() def _on_local_write(self): # handle local writable event if self._data_to_write_to_local: data = b''.join(self._data_to_write_to_local) self._data_to_write_to_local = [] self._write_to_sock(data, self._local_sock) else: self._update_stream(STREAM_DOWN, WAIT_STATUS_READING) def _on_remote_write(self): # handle remote writable event self._stage = STAGE_STREAM if self._data_to_write_to_remote: data = b''.join(self._data_to_write_to_remote) self._data_to_write_to_remote = [] self._write_to_sock(data, self._remote_sock) else: self._update_stream(STREAM_UP, WAIT_STATUS_READING) def _on_local_error(self): logging.debug('got local error') if self._local_sock: logging.error('U[%d] %s' % (self._config['server_port'], eventloop.get_sock_error(self._local_sock))) self.destroy() def _on_remote_error(self): logging.debug('got remote error') if self._remote_sock: logging.error('U[%d] %s' % (self._config['server_port'], eventloop.get_sock_error(self._remote_sock))) self.destroy() @shell.exception_handle(self_=True, destroy=True) def handle_event(self, sock, event): # handle all events in this handler and dispatch them to methods if self._stage == STAGE_DESTROYED: logging.debug('ignore handle_event: destroyed') return # order is important if sock == self._remote_sock: if event & eventloop.POLL_ERR: self._on_remote_error() if self._stage == STAGE_DESTROYED: return if event & (eventloop.POLL_IN | eventloop.POLL_HUP): self._on_remote_read() if self._stage == STAGE_DESTROYED: return if event & eventloop.POLL_OUT: self._on_remote_write() elif sock == self._local_sock: if event & eventloop.POLL_ERR: self._on_local_error() if self._stage == STAGE_DESTROYED: return if event & (eventloop.POLL_IN | eventloop.POLL_HUP): self._on_local_read() if self._stage == STAGE_DESTROYED: return if event & eventloop.POLL_OUT: self._on_local_write() else: logging.warn('unknown socket') def _log_error(self, e): if self._local_sock: addr, port = self._local_sock.getpeername()[:2] logging.error('U[%d] %s when handling connection from %s:%d' % (self._config['server_port'], e, addr, port)) else: logging.error('U[%d] Unknown TCP error occurred' % self._config['server_port']) def destroy(self): # destroy the handler and release any resources # promises: # 1. destroy won't make another destroy() call inside # 2. destroy releases resources so it prevents future call to destroy # 3. destroy won't raise any exceptions # if any of the promises are broken, it indicates a bug has been # introduced! mostly likely memory leaks, etc if self._stage == STAGE_DESTROYED: # this couldn't happen logging.debug('already destroyed') return self._stage = STAGE_DESTROYED if self._remote_address: logging.debug('destroy: %s:%d' % self._remote_address) else: logging.debug('destroy') if self._remote_sock: logging.debug('destroying remote') self._loop.remove(self._remote_sock) del self._fd_to_handlers[self._remote_sock.fileno()] self._remote_sock.close() self._remote_sock = None if self._local_sock: logging.debug('destroying local') self._loop.remove(self._local_sock) del self._fd_to_handlers[self._local_sock.fileno()] self._local_sock.close() self._local_sock = None self._dns_resolver.remove_callback(self._handle_dns_resolved) self._server.remove_handler(self) class TCPRelay(object): def __init__(self, config, dns_resolver, is_local, stat_callback=None): self._config = config self._is_local = is_local self._dns_resolver = dns_resolver self._closed = False self._eventloop = None self._fd_to_handlers = {} self._timeout = config['timeout'] self._timeouts = [] # a list for all the handlers # we trim the timeouts once a while self._timeout_offset = 0 # last checked position for timeout self._handler_to_timeouts = {} # key: handler value: index in timeouts if is_local: listen_addr = config['local_address'] listen_port = config['local_port'] else: listen_addr = config['server'] listen_port = config['server_port'] self._listen_port = listen_port addrs = socket.getaddrinfo(listen_addr, listen_port, 0, socket.SOCK_STREAM, socket.SOL_TCP) if len(addrs) == 0: raise Exception("TCP getaddrinfo failed for %s:%d" % (listen_addr, listen_port)) af, socktype, proto, canonname, sa = addrs[0] server_socket = socket.socket(af, socktype, proto) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(sa) server_socket.setblocking(False) if self._config['fast_open']: try: server_socket.setsockopt(socket.SOL_TCP, 23, 5) except socket.error: logging.warning( 'Fast open is not available, automatically turned off') self._config['fast_open'] = False server_socket.listen(1024) self._server_socket = server_socket self._stat_callback = stat_callback def add_to_loop(self, loop): if self._eventloop: raise Exception('already add to loop') if self._closed: raise Exception('already closed') self._eventloop = loop self._eventloop.add(self._server_socket, eventloop.POLL_IN | eventloop.POLL_ERR, self) self._eventloop.add_periodic(self.handle_periodic) def remove_handler(self, handler): index = self._handler_to_timeouts.get(hash(handler), -1) if index >= 0: # delete is O(n), so we just set it to None self._timeouts[index] = None del self._handler_to_timeouts[hash(handler)] def update_activity(self, handler, direction, data_len): if data_len and self._stat_callback: self._stat_callback(self._listen_port, direction, data_len) # set handler to active now = int(time.time()) if now - handler.last_activity < eventloop.TIMEOUT_PRECISION: # thus we can lower timeout modification frequency return handler.last_activity = now index = self._handler_to_timeouts.get(hash(handler), -1) if index >= 0: # delete is O(n), so we just set it to None self._timeouts[index] = None length = len(self._timeouts) self._timeouts.append(handler) self._handler_to_timeouts[hash(handler)] = length def _sweep_timeout(self): # tornado's timeout memory management is more flexible than we need # we just need a sorted last_activity queue and it's faster than heapq # in fact we can do O(1) insertion/remove so we invent our own if self._timeouts: logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts') now = time.time() length = len(self._timeouts) pos = self._timeout_offset while pos < length: handler = self._timeouts[pos] if handler: if now - handler.last_activity < self._timeout: break else: if handler.remote_address: logging.warn('timed out: %s:%d' % handler.remote_address) else: logging.warn('timed out') handler.destroy() self._timeouts[pos] = None # free memory pos += 1 else: pos += 1 if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1: # clean up the timeout queue when it gets larger than half # of the queue self._timeouts = self._timeouts[pos:] for key in self._handler_to_timeouts: self._handler_to_timeouts[key] -= pos pos = 0 self._timeout_offset = pos def handle_event(self, sock, fd, event): # handle events and dispatch to handlers if sock: logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd, eventloop.EVENT_NAMES.get(event, event)) if sock == self._server_socket: if event & eventloop.POLL_ERR: # TODO raise Exception('server_socket error') try: logging.debug('accept') conn = self._server_socket.accept() TCPRelayHandler(self, self._fd_to_handlers, self._eventloop, conn[0], self._config, self._dns_resolver, self._is_local) except socket.error as err: error_no = err.args[0] if sys.platform == "win32": if error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK): return elif error_no in (errno.EAGAIN, errno.EINPROGRESS, errno.EWOULDBLOCK): return else: shell.print_exception(err) else: if sock: handler = self._fd_to_handlers.get(fd, None) if handler: handler.handle_event(sock, event) else: logging.warn('poll removed fd') def handle_periodic(self): if self._closed: if self._server_socket: self._eventloop.remove(self._server_socket) self._server_socket.close() self._server_socket = None logging.info('TCP port %d closed', self._listen_port) if not self._fd_to_handlers: logging.info('stopping') self._eventloop.stop() self._sweep_timeout() def close(self, next_tick=False): logging.debug('TCP close') self._closed = True if not next_tick: if self._eventloop: self._eventloop.remove_periodic(self.handle_periodic) self._eventloop.remove(self._server_socket) self._server_socket.close() for handler in list(self._fd_to_handlers.values()): handler.destroy()
#!/usr/bin/env python """ Copyright 2012 GroupDocs. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ import sys import os from .models import * from groupdocs.FileStream import FileStream from groupdocs.ApiClient import ApiException class SystemApi(object): def __init__(self, apiClient): self.apiClient = apiClient self.__basePath = "https://dev-api.groupdocs.com/v2.0" @property def basePath(self): return self.__basePath @basePath.setter def basePath(self, value): self.__basePath = value def GetUserPlan(self, callerId, **kwargs): """Get user plan Args: callerId, str: User GUID (required) Returns: GetPlanResponse """ if( callerId == None ): raise ApiException(400, "missing required parameters") allParams = ['callerId'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method GetUserPlan" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{callerId}/plan'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('callerId' in params): replacement = str(self.apiClient.toPathValue(params['callerId'])) resourcePath = resourcePath.replace('{' + 'callerId' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'GetPlanResponse') return responseObject def GetUserSubscriptionPlan(self, callerId, **kwargs): """Get user plan Args: callerId, str: User GUID (required) Returns: GetUserSubscriptionPlanResponse """ if( callerId == None ): raise ApiException(400, "missing required parameters") allParams = ['callerId'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method GetUserSubscriptionPlan" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{callerId}/subscription'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('callerId' in params): replacement = str(self.apiClient.toPathValue(params['callerId'])) resourcePath = resourcePath.replace('{' + 'callerId' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'GetUserSubscriptionPlanResponse') return responseObject def GetSubscriptionPlans(self, callerId, family, **kwargs): """Get subscription plans Args: callerId, str: User GUID (required) family, str: Product Family Name (required) Returns: GetSubscriptionPlansResponse """ if( callerId == None or family == None ): raise ApiException(400, "missing required parameters") allParams = ['callerId', 'family'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method GetSubscriptionPlans" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{callerId}/plans/{family}?invalidate={invalidate}'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('callerId' in params): replacement = str(self.apiClient.toPathValue(params['callerId'])) resourcePath = resourcePath.replace('{' + 'callerId' + '}', replacement) if ('family' in params): replacement = str(self.apiClient.toPathValue(params['family'])) resourcePath = resourcePath.replace('{' + 'family' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'GetSubscriptionPlansResponse') return responseObject def SetSubscriptionPlan(self, userId, productId, body, **kwargs): """Set subscription plan user plan Args: userId, str: User GUID (required) productId, str: Product ID (required) body, SubscriptionPlanInfo: Subscription Plan (required) Returns: SetUserSubscriptionPlanResponse """ if( userId == None or productId == None or body == None ): raise ApiException(400, "missing required parameters") allParams = ['userId', 'productId', 'body'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method SetSubscriptionPlan" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{userId}/subscriptions/{productId}'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'PUT' queryParams = {} headerParams = {} if ('userId' in params): replacement = str(self.apiClient.toPathValue(params['userId'])) resourcePath = resourcePath.replace('{' + 'userId' + '}', replacement) if ('productId' in params): replacement = str(self.apiClient.toPathValue(params['productId'])) resourcePath = resourcePath.replace('{' + 'productId' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'SetUserSubscriptionPlanResponse') return responseObject def GetCountries(self, callerId, **kwargs): """Get countries Args: callerId, str: User GUID (required) Returns: GetCountriesResponse """ if( callerId == None ): raise ApiException(400, "missing required parameters") allParams = ['callerId'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method GetCountries" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{callerId}/countries'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('callerId' in params): replacement = str(self.apiClient.toPathValue(params['callerId'])) resourcePath = resourcePath.replace('{' + 'callerId' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'GetCountriesResponse') return responseObject def GetStates(self, callerId, countryName, **kwargs): """Get states Args: callerId, str: User GUID (required) countryName, str: Country Name (required) Returns: GetStatesResponse """ if( callerId == None or countryName == None ): raise ApiException(400, "missing required parameters") allParams = ['callerId', 'countryName'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method GetStates" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{callerId}/countries/{countryName}/states'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('callerId' in params): replacement = str(self.apiClient.toPathValue(params['callerId'])) resourcePath = resourcePath.replace('{' + 'callerId' + '}', replacement) if ('countryName' in params): replacement = str(self.apiClient.toPathValue(params['countryName'])) resourcePath = resourcePath.replace('{' + 'countryName' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'GetStatesResponse') return responseObject def SetBillingAddress(self, userId, body, **kwargs): """Set user billing address Args: userId, str: User GUID (required) body, BillingAddressInfo: Billing Address (required) Returns: GetBillingAddressResponse """ if( userId == None or body == None ): raise ApiException(400, "missing required parameters") allParams = ['userId', 'body'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method SetBillingAddress" % key) params[key] = val del params['kwargs'] resourcePath = '/system/{userId}/billingaddress'.replace('*', '') resourcePath = resourcePath.replace('{format}', 'json') method = 'PUT' queryParams = {} headerParams = {} if ('userId' in params): replacement = str(self.apiClient.toPathValue(params['userId'])) resourcePath = resourcePath.replace('{' + 'userId' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'GetBillingAddressResponse') return responseObject
#!/usr/bin/env python """ Contains player and NPC-classes. """ import os import numpy as np import pygame import logging from Placeable import Placeable from Item import Item from Text import TextDialog from FindPath import Maze DEFAULT_HEALTH = 100 DEFAULT_FEAR = 75 DEFAULT_HATE = 25 class Person(Placeable): """ Base class for all characters in game. """ persons = [] players = [] npcs = [] def __init__(self, position, game, world, sprite, health=DEFAULT_HEALTH): """ @param health The health that is given at init. @param position [x, y] the position at init. """ super(Person, self).__init__(position, sprite) self.health = health self.game = game self.world = world self.inventory = [] self.move_cool = 0.10 # seconds self.move_time = np.inf self.velocity = np.array([0,0]) self._add(self) @classmethod def _add(self, p): self.persons.append(p) def update(self): if len(self.game.text_dialog_queue) > 0: return if (self.velocity != 0).any(): newpos = self.position + self.velocity self.move(newpos) self.move_time += self.game.dt def move(self, newpos): # Change facing direction change = newpos - self.position self.facing = int(round(np.arctan2(change[0], change[1]) / np.pi * 2)) # Check if outside bounds of map inside_x = 0 <= newpos[0] < self.world.shape[0] inside_y = 0 <= newpos[1] < self.world.shape[1] # Check if new position is on walkable place on_walkable = self.world.board[tuple(newpos)] in ('g') if on_walkable: for person in Person.persons + [self.game.player]: if person == self: continue # Cannot collide with self. if (newpos == person.position).all(): on_walkable = False # If new position is on water, must have boat if self.world.board[tuple(newpos)] == 'w': names = [item.name for item in self.inventory] has_boat = 'Boat' in names on_walkable = has_boat # Only walk after certain cooldown cooldown_passed = self.move_time > self.move_cool # Check if step is valid, and if it is, move if (inside_x and inside_y and on_walkable and cooldown_passed): self.position = newpos self.move_time = 0 return True else: return False def speed_up(self, speed_vector): """ Changes the velocity of the player. """ self.velocity += speed_vector def hurt(self, dmg): self.health -= dmg if self.health <= 0: self.health = 0 self._die() def _die(self): self.dialog = "I died." self.update = self._update_dead def _update_dead(self): # Cannot do anything when dead. pass def give_item(self, item): if not isinstance(item, Item): logging.error( "Item given to player is not item instance." ) return self.inventory.append(item) class Player(Person): """ Contains the player-controlled character. """ def __init__(self, position, game, world, health=DEFAULT_HEALTH): super(Player, self).__init__(position, game, world, "player", health) self.interacting_with = None @classmethod def _add(self, p): self.players.append(p) self.persons.append(p) def give_item(self, item): """ Player reads pages if they are picked up. """ super(Player, self).give_item(item) TextDialog("You got %s!" % item.name.lower(), self.game) if item.name == "Page": TextDialog(item.text, self.game) class NPC(Person): """ Contains a character controlled by the game. """ def __init__(self, position, game, world, health=DEFAULT_HEALTH, dialog=None, items=[], fear=DEFAULT_FEAR, hate=DEFAULT_HATE, ): super(NPC, self).__init__(position, game, world, "npc", health) self.dialog = dialog self.fear = fear self.hate = hate for item in items: self.give_item(item) self.set_target() self.set_path() @classmethod def _add(self, p): self.persons.append(p) self.npcs.append(p) def next_step(self): """ Since the game controls this character, some algorithm should say where it moves. TODO """ water_to_left = 0 water_to_right = 0 water_up = 0 water_down = 0 for i in range(self.position[0], self.world.shape[0]): water_to_right += 1 if self.world.board[i, self.position[1]] == 'w': break for i in list(reversed(range(0, self.position[0]))): water_to_left += 1 if self.world.board[i, self.position[1]] == 'w': break for i in range(self.position[1], self.world.shape[1]): water_up += 1 if self.world.board[self.position[0], i] == 'w': break for i in list(reversed(range(0, self.position[1]))): water_down += 1 if self.world.board[self.position[0], i] == 'w': break if np.random.random() > 0.8: right_direction = max([water_down, water_up, water_to_left, water_to_right]) if right_direction == water_down: return np.asarray([0, -1]) elif right_direction == water_up: return np.asarray([0, 1]) elif right_direction == water_to_right: return np.asarray([1, 0]) elif right_direction == water_to_left: return np.asarray([-1, 0]) return np.asarray([0, 0]) def set_target(self): change = (self.position - self.game.player.position) if change.sum() <= 10 and max(self.fear, self.hate) > 50: if self.fear > self.hate: idealtarget = self.position + change else: idealtarget = self.position - change else: idealtarget = self.position + np.random.randint(-3, 3+1, size=2) for r in xrange(10): possibilities = [] for x in xrange(-r, r+1): for y in xrange(-r, r+1): target = idealtarget + [x, y] inside_x = 0 <= target[0] < self.world.shape[0] inside_y = 0 <= target[1] < self.world.shape[1] if (inside_x and inside_y): on_walkable = self.world.board[tuple(target)] in ('g') if on_walkable: possibilities.append(target) if not possibilities: continue else: self.target = possibilities[np.random.randint(0, len(possibilities))] break def set_path(self): maze = Maze( self.position, self.target, self.world.board == 'g', ) self.path = maze.solve(10) def update(self, depth=0): """ """ if len(self.game.text_dialog_queue) > 0: return False # Only walk after certain cooldown cooldown_passed = self.move_time > self.move_cool if cooldown_passed: if not self.path: # if empty or None self.set_target() self.set_path() if self.path: newpos = self.path[0] if self.move(newpos): # Successfull move. del self.path[0] elif depth >= 10: # Move was blocked by some entity. # Clear current path and try again. # Maxium depth to avoid potential infinite recursive loop. self.path = [] self.update(depth+1) return else: self.path = [] self.move_time = 0 else: # Else backup solution. goal = self.next_step() newpos = self.position + (self.velocity + goal) # If next is water, try turn is_water = self.world.board[tuple(newpos)] == 'w' if is_water: self.velocity = np.asarray([0, 0]) # If at end of world, move at_yend = self.position[0] == (self.world.shape[0] - 1) at_xend = self.position[1] == (self.world.shape[1] - 1) if at_yend or at_xend: self.velocity = np.asarray([0, 0]) if (self.velocity == [0, 0]).all(): self.speed_up(goal) # Do the actual moving newpos = self.position + self.velocity super(NPC, self).move(newpos) self.move_time += self.game.dt def interact(self): """ Called when player interacts with this NPC. """ self.fear -= 50 if not self.dialog: return TextDialog(self.dialog, self.game) self.dialog = "I have nothing more to tell you." for i in range(len(self.inventory)): self.game.player.give_item(self.inventory.pop(i))
import os import re import unittest import json import zlib from bs4 import BeautifulSoup from moto import mock_dynamodb2 from itsdangerous import base64_decode from sdc.crypto.key_store import KeyStore from app.keys import KEY_PURPOSE_AUTHENTICATION, KEY_PURPOSE_SUBMISSION from app.setup import create_app from tests.app.app_context_test_case import setup_tables from tests.integration.create_token import TokenGenerator EQ_USER_AUTHENTICATION_RRM_PRIVATE_KEY_KID = '709eb42cfee5570058ce0711f730bfbb7d4c8ade' SR_USER_AUTHENTICATION_PUBLIC_KEY_KID = 'e19091072f920cbf3ca9f436ceba309e7d814a62' EQ_SUBMISSION_SDX_PRIVATE_KEY = '2225f01580a949801274a5f3e6861947018aff5b' EQ_SUBMISSION_SR_PRIVATE_SIGNING_KEY = 'fe425f951a0917d7acdd49230b23a5c405c28510' KEYS_FOLDER = './jwt-test-keys' def get_file_contents(filename, trim=False): with open(os.path.join(KEYS_FOLDER, filename), 'r') as f: data = f.read() if trim: data = data.rstrip('\r\n') return data class IntegrationTestCase(unittest.TestCase): # pylint: disable=too-many-public-methods def setUp(self): # Cache for requests self.last_url = None self.last_response = None self.last_csrf_token = None # Perform setup steps self._set_up_app() def _set_up_app(self): self._ddb = mock_dynamodb2() self._ddb.start() from application import configure_logging configure_logging() setting_overrides = { 'SQLALCHEMY_DATABASE_URI': 'sqlite://', 'EQ_DYNAMODB_ENDPOINT': None } self._application = create_app(setting_overrides) self._key_store = KeyStore({ 'keys': { EQ_USER_AUTHENTICATION_RRM_PRIVATE_KEY_KID: { 'purpose': KEY_PURPOSE_AUTHENTICATION, 'type': 'private', 'value': get_file_contents('third-party/sdc-rrm-authentication-signing-private-v1.pem')}, SR_USER_AUTHENTICATION_PUBLIC_KEY_KID: { 'purpose': KEY_PURPOSE_AUTHENTICATION, 'type': 'public', 'value': get_file_contents('third-party/sdc-sr-authentication-encryption-public-v1.pem')}, EQ_SUBMISSION_SDX_PRIVATE_KEY: { 'purpose': KEY_PURPOSE_SUBMISSION, 'type': 'private', 'value': get_file_contents('third-party/sdc-sdx-submission-encryption-private-v1.pem')}, EQ_SUBMISSION_SR_PRIVATE_SIGNING_KEY: { 'purpose': KEY_PURPOSE_SUBMISSION, 'type': 'public', 'value': get_file_contents('sdc-sr-submission-signing-private-v1.pem')}, } }) self.token_generator = TokenGenerator( self._key_store, EQ_USER_AUTHENTICATION_RRM_PRIVATE_KEY_KID, SR_USER_AUTHENTICATION_PUBLIC_KEY_KID ) self._client = self._application.test_client() with self._application.app_context(): setup_tables() def tearDown(self): self._ddb.stop() def launchSurvey(self, eq_id='test', form_type_id='dates', **payload_kwargs): """ Launch a survey as an authenticated user and follow re-directs :param eq_id: The id of the survey to launch e.g. 'census', 'test' etc. :param form_type_id: The form type of the survey e.g. 'household', 'radio' etc. """ token = self.token_generator.create_token(form_type_id=form_type_id, eq_id=eq_id, **payload_kwargs) self.get('/session?token=' + token) def dumpAnswers(self): self.get('/dump/answers') # Then I get a 200 OK response self.assertStatusOK() # And the JSON response contains the data I submitted dump_answers = json.loads(self.getResponseData()) return dump_answers def dumpSubmission(self): self.get('/dump/submission') # Then I get a 200 OK response self.assertStatusOK() # And the JSON response contains the data I submitted dump_submission = json.loads(self.getResponseData()) return dump_submission def get(self, url, **kwargs): """ GETs the specified URL, following any redirects. If the response contains a CSRF token; it is cached to be use on the next POST. The URL will be cached for future POST requests. :param url: the URL to GET """ environ, response = self._client.get( url, as_tuple=True, follow_redirects=True, **kwargs ) self._cache_response(environ, response) def post(self, post_data=None, url=None, action='save_continue', action_value='', **kwargs): """ POSTs to the specified URL with post_data and performs a GET with the URL from the re-direct. Will add the last received CSRF token to the post_data automatically. :param url: the URL to POST to; use None to use the last received URL :param post_data: the data to POST :param action: The button action to post """ if url is None: url = self.last_url self.assertIsNotNone(url) _post_data = (post_data.copy() or {}) if post_data else {} if self.last_csrf_token is not None: _post_data.update({'csrf_token': self.last_csrf_token}) if action: _post_data.update({'action[{action}]'.format(action=action): action_value}) environ, response = self._client.post( url, data=_post_data, as_tuple=True, follow_redirects=True, **kwargs ) self._cache_response(environ, response) def _cache_response(self, environ, response): self.last_csrf_token = self._extract_csrf_token(response.get_data(True)) self.last_response = response self.last_url = environ['PATH_INFO'] if environ['QUERY_STRING']: self.last_url += '?' + environ['QUERY_STRING'] @staticmethod def _extract_csrf_token(html): match = re.search(r'<input id="csrf_token" name="csrf_token" type="hidden" value="(.+?)"/>', html) return (match.group(1) or None) if match else None def getResponseData(self): """ Returns the last received response data """ return self.last_response.get_data(True) def getCookie(self): """ Returns the last received response cookie session """ cookie = self.last_response.headers['Set-Cookie'] cookie_session = cookie.split('session=.')[1].split(';')[0] decoded_cookie_session = decode_flask_cookie(cookie_session) return json.loads(decoded_cookie_session) def getHtmlSoup(self): """ Returns the last received response data as a BeautifulSoup HTML object See https://www.crummy.com/software/BeautifulSoup/bs4/doc/ :return: a BeautifulSoup object for the response data """ return BeautifulSoup(self.getResponseData(), 'html.parser') # Extra Helper Assertions def assertInHead(self, content): self.assertInSelector(content, name='head') # Extra Helper Assertions def assertInBody(self, content): self.assertInSelector(content, name='body') # Extra Helper Assertions def assertNotInHead(self, content): self.assertNotInSelector(content, name='head') # Extra Helper Assertions def assertNotInBody(self, content): self.assertNotInSelector(content, name='body') def assertInSelector(self, content, **selectors): data = self.getHtmlSoup().find(**selectors) message = '\n{} not in \n{}'.format(content, data) # intentionally not using assertIn to avoid duplicating the output message self.assertTrue(content in str(data), msg=message) def assertNotInSelector(self, content, **selectors): data = self.getHtmlSoup().find(**selectors) message = '\n{} in \n{}'.format(content, data) # intentionally not using assertIn to avoid duplicating the output message self.assertFalse(content in str(data), msg=message) def assertNotInPage(self, content, message=None): self.assertNotIn(member=str(content), container=self.getResponseData(), msg=str(message)) def assertRegexPage(self, regex, message=None): self.assertRegex(text=self.getResponseData(), expected_regex=str(regex), msg=str(message)) def assertEqualPageTitle(self, title): self.assertEqual(self.getHtmlSoup().title.string, title) # pylint: disable=no-member def assertStatusOK(self): self.assertStatusCode(200) def assertStatusUnauthorised(self): self.assertStatusCode(401) def assertStatusForbidden(self): self.assertStatusCode(403) def assertStatusNotFound(self): self.assertStatusCode(404) def assertStatusCode(self, status_code): if self.last_response is not None: self.assertEqual(self.last_response.status_code, status_code) else: self.fail('last_response is invalid') def assertEqualUrl(self, url): if self.last_url: self.assertEqual(url, self.last_url) else: self.fail('last_url is invalid') def assertInUrl(self, content): if self.last_url: self.assertIn(content, self.last_url) else: self.fail('last_url is invalid') def assertNotInUrl(self, content): if self.last_url: self.assertNotIn(content, self.last_url) else: self.fail('last_url is invalid') def assertRegexUrl(self, regex): if self.last_url: self.assertRegex(text=self.last_url, expected_regex=regex) else: self.fail('last_url is invalid') def assertAnalyticsLength(self, arguments): if len(arguments[1]) == 2: self.assertEqual(2, len(arguments[1])) else: self.fail('Analytics not present') def assertNotAnalyticsLength(self, arguments): if not any(arguments[0]): self.assertEqual(0, len(arguments[0])) else: self.fail('Analytics present') def decode_flask_cookie(cookie): """Decode a Flask cookie.""" data = cookie.split('.')[0] data = base64_decode(data) data = zlib.decompress(data) return data.decode('utf-8')
""" homeassistant.components.mqtt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MQTT component, using paho-mqtt. For more details about this component, please refer to the documentation at https://home-assistant.io/components/mqtt/ """ import logging import os import socket import time from homeassistant.exceptions import HomeAssistantError import homeassistant.util as util from homeassistant.helpers import validate_config from homeassistant.const import ( EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP) _LOGGER = logging.getLogger(__name__) DOMAIN = "mqtt" MQTT_CLIENT = None DEFAULT_PORT = 1883 DEFAULT_KEEPALIVE = 60 DEFAULT_QOS = 0 DEFAULT_RETAIN = False SERVICE_PUBLISH = 'publish' EVENT_MQTT_MESSAGE_RECEIVED = 'MQTT_MESSAGE_RECEIVED' REQUIREMENTS = ['paho-mqtt==1.1'] CONF_BROKER = 'broker' CONF_PORT = 'port' CONF_CLIENT_ID = 'client_id' CONF_KEEPALIVE = 'keepalive' CONF_USERNAME = 'username' CONF_PASSWORD = 'password' CONF_CERTIFICATE = 'certificate' ATTR_TOPIC = 'topic' ATTR_PAYLOAD = 'payload' ATTR_QOS = 'qos' ATTR_RETAIN = 'retain' MAX_RECONNECT_WAIT = 300 # seconds def publish(hass, topic, payload, qos=None, retain=None): """ Send an MQTT message. """ data = { ATTR_TOPIC: topic, ATTR_PAYLOAD: payload, } if qos is not None: data[ATTR_QOS] = qos if retain is not None: data[ATTR_RETAIN] = retain hass.services.call(DOMAIN, SERVICE_PUBLISH, data) def subscribe(hass, topic, callback, qos=DEFAULT_QOS): """ Subscribe to a topic. """ def mqtt_topic_subscriber(event): """ Match subscribed MQTT topic. """ if _match_topic(topic, event.data[ATTR_TOPIC]): callback(event.data[ATTR_TOPIC], event.data[ATTR_PAYLOAD], event.data[ATTR_QOS]) hass.bus.listen(EVENT_MQTT_MESSAGE_RECEIVED, mqtt_topic_subscriber) MQTT_CLIENT.subscribe(topic, qos) def setup(hass, config): """ Get the MQTT protocol service. """ if not validate_config(config, {DOMAIN: ['broker']}, _LOGGER): return False conf = config[DOMAIN] broker = conf[CONF_BROKER] port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT) client_id = util.convert(conf.get(CONF_CLIENT_ID), str) keepalive = util.convert(conf.get(CONF_KEEPALIVE), int, DEFAULT_KEEPALIVE) username = util.convert(conf.get(CONF_USERNAME), str) password = util.convert(conf.get(CONF_PASSWORD), str) certificate = util.convert(conf.get(CONF_CERTIFICATE), str) # For cloudmqtt.com, secured connection, auto fill in certificate if certificate is None and 19999 < port < 30000 and \ broker.endswith('.cloudmqtt.com'): certificate = os.path.join(os.path.dirname(__file__), 'addtrustexternalcaroot.crt') global MQTT_CLIENT try: MQTT_CLIENT = MQTT(hass, broker, port, client_id, keepalive, username, password, certificate) except socket.error: _LOGGER.exception("Can't connect to the broker. " "Please check your settings and the broker " "itself.") return False def stop_mqtt(event): """ Stop MQTT component. """ MQTT_CLIENT.stop() def start_mqtt(event): """ Launch MQTT component when Home Assistant starts up. """ MQTT_CLIENT.start() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mqtt) def publish_service(call): """ Handle MQTT publish service calls. """ msg_topic = call.data.get(ATTR_TOPIC) payload = call.data.get(ATTR_PAYLOAD) qos = call.data.get(ATTR_QOS, DEFAULT_QOS) retain = call.data.get(ATTR_RETAIN, DEFAULT_RETAIN) if msg_topic is None or payload is None: return MQTT_CLIENT.publish(msg_topic, payload, qos, retain) hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mqtt) hass.services.register(DOMAIN, SERVICE_PUBLISH, publish_service) return True # pylint: disable=too-many-arguments class MQTT(object): """ Implements messaging service for MQTT. """ def __init__(self, hass, broker, port, client_id, keepalive, username, password, certificate): import paho.mqtt.client as mqtt self.userdata = { 'hass': hass, 'topics': {}, 'progress': {}, } if client_id is None: self._mqttc = mqtt.Client() else: self._mqttc = mqtt.Client(client_id) self._mqttc.user_data_set(self.userdata) if username is not None: self._mqttc.username_pw_set(username, password) if certificate is not None: self._mqttc.tls_set(certificate) self._mqttc.on_subscribe = _mqtt_on_subscribe self._mqttc.on_unsubscribe = _mqtt_on_unsubscribe self._mqttc.on_connect = _mqtt_on_connect self._mqttc.on_disconnect = _mqtt_on_disconnect self._mqttc.on_message = _mqtt_on_message self._mqttc.connect(broker, port, keepalive) def publish(self, topic, payload, qos, retain): """ Publish a MQTT message. """ self._mqttc.publish(topic, payload, qos, retain) def start(self): """ Run the MQTT client. """ self._mqttc.loop_start() def stop(self): """ Stop the MQTT client. """ self._mqttc.loop_stop() def subscribe(self, topic, qos): """ Subscribe to a topic. """ if topic in self.userdata['topics']: return result, mid = self._mqttc.subscribe(topic, qos) _raise_on_error(result) self.userdata['progress'][mid] = topic self.userdata['topics'][topic] = None def unsubscribe(self, topic): """ Unsubscribe from topic. """ result, mid = self._mqttc.unsubscribe(topic) _raise_on_error(result) self.userdata['progress'][mid] = topic def _mqtt_on_message(mqttc, userdata, msg): """ Message callback """ userdata['hass'].bus.fire(EVENT_MQTT_MESSAGE_RECEIVED, { ATTR_TOPIC: msg.topic, ATTR_QOS: msg.qos, ATTR_PAYLOAD: msg.payload.decode('utf-8'), }) def _mqtt_on_connect(mqttc, userdata, flags, result_code): """ On connect, resubscribe to all topics we were subscribed to. """ if result_code != 0: _LOGGER.error('Unable to connect to the MQTT broker: %s', { 1: 'Incorrect protocol version', 2: 'Invalid client identifier', 3: 'Server unavailable', 4: 'Bad username or password', 5: 'Not authorised' }.get(result_code, 'Unknown reason')) mqttc.disconnect() return old_topics = userdata['topics'] userdata['topics'] = {} userdata['progress'] = {} for topic, qos in old_topics.items(): # qos is None if we were in process of subscribing if qos is not None: mqttc.subscribe(topic, qos) def _mqtt_on_subscribe(mqttc, userdata, mid, granted_qos): """ Called when subscribe successful. """ topic = userdata['progress'].pop(mid, None) if topic is None: return userdata['topics'][topic] = granted_qos def _mqtt_on_unsubscribe(mqttc, userdata, mid, granted_qos): """ Called when subscribe successful. """ topic = userdata['progress'].pop(mid, None) if topic is None: return userdata['topics'].pop(topic, None) def _mqtt_on_disconnect(mqttc, userdata, result_code): """ Called when being disconnected. """ # When disconnected because of calling disconnect() if result_code == 0: return tries = 0 wait_time = 0 while True: try: if mqttc.reconnect() == 0: _LOGGER.info('Successfully reconnected to the MQTT server') break except socket.error: pass wait_time = min(2**tries, MAX_RECONNECT_WAIT) _LOGGER.warning( 'Disconnected from MQTT (%s). Trying to reconnect in %ss', result_code, wait_time) # It is ok to sleep here as we are in the MQTT thread. time.sleep(wait_time) tries += 1 def _raise_on_error(result): """ Raise error if error result. """ if result != 0: raise HomeAssistantError('Error talking to MQTT: {}'.format(result)) def _match_topic(subscription, topic): """ Returns if topic matches subscription. """ if subscription.endswith('#'): return (subscription[:-2] == topic or topic.startswith(subscription[:-1])) sub_parts = subscription.split('/') topic_parts = topic.split('/') return (len(sub_parts) == len(topic_parts) and all(a == b for a, b in zip(sub_parts, topic_parts) if a != '+'))
"""Support for Broadlink remotes.""" import asyncio from base64 import b64encode from collections import defaultdict from datetime import timedelta from itertools import product import logging from broadlink.exceptions import ( AuthorizationError, BroadlinkException, NetworkTimeoutError, ReadError, StorageError, ) import voluptuous as vol from homeassistant.components.remote import ( ATTR_ALTERNATIVE, ATTR_COMMAND, ATTR_COMMAND_TYPE, ATTR_DELAY_SECS, ATTR_DEVICE, ATTR_NUM_REPEATS, DEFAULT_DELAY_SECS, DOMAIN as RM_DOMAIN, PLATFORM_SCHEMA, SERVICE_DELETE_COMMAND, SERVICE_LEARN_COMMAND, SERVICE_SEND_COMMAND, SUPPORT_DELETE_COMMAND, SUPPORT_LEARN_COMMAND, RemoteEntity, ) from homeassistant.const import CONF_HOST, STATE_OFF from homeassistant.core import callback import homeassistant.helpers.config_validation as cv from homeassistant.helpers.restore_state import RestoreEntity from homeassistant.helpers.storage import Store from homeassistant.util.dt import utcnow from .const import DOMAIN from .helpers import data_packet, import_device _LOGGER = logging.getLogger(__name__) LEARNING_TIMEOUT = timedelta(seconds=30) COMMAND_TYPE_IR = "ir" COMMAND_TYPE_RF = "rf" COMMAND_TYPES = [COMMAND_TYPE_IR, COMMAND_TYPE_RF] CODE_STORAGE_VERSION = 1 FLAG_STORAGE_VERSION = 1 CODE_SAVE_DELAY = 15 FLAG_SAVE_DELAY = 15 COMMAND_SCHEMA = vol.Schema( { vol.Required(ATTR_COMMAND): vol.All( cv.ensure_list, [vol.All(cv.string, vol.Length(min=1))], vol.Length(min=1) ), }, extra=vol.ALLOW_EXTRA, ) SERVICE_SEND_SCHEMA = COMMAND_SCHEMA.extend( { vol.Optional(ATTR_DEVICE): vol.All(cv.string, vol.Length(min=1)), vol.Optional(ATTR_DELAY_SECS, default=DEFAULT_DELAY_SECS): vol.Coerce(float), } ) SERVICE_LEARN_SCHEMA = COMMAND_SCHEMA.extend( { vol.Required(ATTR_DEVICE): vol.All(cv.string, vol.Length(min=1)), vol.Optional(ATTR_COMMAND_TYPE, default=COMMAND_TYPE_IR): vol.In(COMMAND_TYPES), vol.Optional(ATTR_ALTERNATIVE, default=False): cv.boolean, } ) SERVICE_DELETE_SCHEMA = COMMAND_SCHEMA.extend( {vol.Required(ATTR_DEVICE): vol.All(cv.string, vol.Length(min=1))} ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_HOST): cv.string}, extra=vol.ALLOW_EXTRA ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Import the device and discontinue platform. This is for backward compatibility. Do not use this method. """ import_device(hass, config[CONF_HOST]) _LOGGER.warning( "The remote platform is deprecated, please remove it from your configuration" ) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Broadlink remote.""" device = hass.data[DOMAIN].devices[config_entry.entry_id] remote = BroadlinkRemote( device, Store(hass, CODE_STORAGE_VERSION, f"broadlink_remote_{device.unique_id}_codes"), Store(hass, FLAG_STORAGE_VERSION, f"broadlink_remote_{device.unique_id}_flags"), ) async_add_entities([remote], False) class BroadlinkRemote(RemoteEntity, RestoreEntity): """Representation of a Broadlink remote.""" def __init__(self, device, codes, flags): """Initialize the remote.""" self._device = device self._coordinator = device.update_manager.coordinator self._code_storage = codes self._flag_storage = flags self._storage_loaded = False self._codes = {} self._flags = defaultdict(int) self._state = True self._lock = asyncio.Lock() @property def name(self): """Return the name of the remote.""" return f"{self._device.name} Remote" @property def unique_id(self): """Return the unique id of the remote.""" return self._device.unique_id @property def is_on(self): """Return True if the remote is on.""" return self._state @property def available(self): """Return True if the remote is available.""" return self._device.update_manager.available @property def should_poll(self): """Return True if the remote has to be polled for state.""" return False @property def supported_features(self): """Flag supported features.""" return SUPPORT_LEARN_COMMAND | SUPPORT_DELETE_COMMAND @property def device_info(self): """Return device info.""" return { "identifiers": {(DOMAIN, self._device.unique_id)}, "manufacturer": self._device.api.manufacturer, "model": self._device.api.model, "name": self._device.name, "sw_version": self._device.fw_version, } def _extract_codes(self, commands, device=None): """Extract a list of codes. If the command starts with `b64:`, extract the code from it. Otherwise, extract the code from storage, using the command and device as keys. The codes are returned in sublists. For toggle commands, the sublist contains two codes that must be sent alternately with each call. """ code_list = [] for cmd in commands: if cmd.startswith("b64:"): codes = [cmd[4:]] else: if device is None: raise ValueError("You need to specify a device") try: codes = self._codes[device][cmd] except KeyError as err: raise ValueError(f"Command not found: {repr(cmd)}") from err if isinstance(codes, list): codes = codes[:] else: codes = [codes] for idx, code in enumerate(codes): try: codes[idx] = data_packet(code) except ValueError as err: raise ValueError(f"Invalid code: {repr(code)}") from err code_list.append(codes) return code_list @callback def _get_codes(self): """Return a dictionary of codes.""" return self._codes @callback def _get_flags(self): """Return a dictionary of toggle flags. A toggle flag indicates whether the remote should send an alternative code. """ return self._flags async def async_added_to_hass(self): """Call when the remote is added to hass.""" state = await self.async_get_last_state() self._state = state is None or state.state != STATE_OFF self.async_on_remove( self._coordinator.async_add_listener(self.async_write_ha_state) ) async def async_update(self): """Update the remote.""" await self._coordinator.async_request_refresh() async def async_turn_on(self, **kwargs): """Turn on the remote.""" self._state = True self.async_write_ha_state() async def async_turn_off(self, **kwargs): """Turn off the remote.""" self._state = False self.async_write_ha_state() async def _async_load_storage(self): """Load code and flag storage from disk.""" # Exception is intentionally not trapped to # provide feedback if something fails. self._codes.update(await self._code_storage.async_load() or {}) self._flags.update(await self._flag_storage.async_load() or {}) self._storage_loaded = True async def async_send_command(self, command, **kwargs): """Send a list of commands to a device.""" kwargs[ATTR_COMMAND] = command kwargs = SERVICE_SEND_SCHEMA(kwargs) commands = kwargs[ATTR_COMMAND] device = kwargs.get(ATTR_DEVICE) repeat = kwargs[ATTR_NUM_REPEATS] delay = kwargs[ATTR_DELAY_SECS] service = f"{RM_DOMAIN}.{SERVICE_SEND_COMMAND}" if not self._state: _LOGGER.warning( "%s canceled: %s entity is turned off", service, self.entity_id ) return if not self._storage_loaded: await self._async_load_storage() try: code_list = self._extract_codes(commands, device) except ValueError as err: _LOGGER.error("Failed to call %s: %s", service, err) raise rf_flags = {0xB2, 0xD7} if not hasattr(self._device.api, "sweep_frequency") and any( c[0] in rf_flags for codes in code_list for c in codes ): err_msg = f"{self.entity_id} doesn't support sending RF commands" _LOGGER.error("Failed to call %s: %s", service, err_msg) raise ValueError(err_msg) at_least_one_sent = False for _, codes in product(range(repeat), code_list): if at_least_one_sent: await asyncio.sleep(delay) if len(codes) > 1: code = codes[self._flags[device]] else: code = codes[0] try: await self._device.async_request(self._device.api.send_data, code) except (BroadlinkException, OSError) as err: _LOGGER.error("Error during %s: %s", service, err) break if len(codes) > 1: self._flags[device] ^= 1 at_least_one_sent = True if at_least_one_sent: self._flag_storage.async_delay_save(self._get_flags, FLAG_SAVE_DELAY) async def async_learn_command(self, **kwargs): """Learn a list of commands from a remote.""" kwargs = SERVICE_LEARN_SCHEMA(kwargs) commands = kwargs[ATTR_COMMAND] command_type = kwargs[ATTR_COMMAND_TYPE] device = kwargs[ATTR_DEVICE] toggle = kwargs[ATTR_ALTERNATIVE] service = f"{RM_DOMAIN}.{SERVICE_LEARN_COMMAND}" if not self._state: _LOGGER.warning( "%s canceled: %s entity is turned off", service, self.entity_id ) return if not self._storage_loaded: await self._async_load_storage() async with self._lock: if command_type == COMMAND_TYPE_IR: learn_command = self._async_learn_ir_command elif hasattr(self._device.api, "sweep_frequency"): learn_command = self._async_learn_rf_command else: err_msg = f"{self.entity_id} doesn't support learning RF commands" _LOGGER.error("Failed to call %s: %s", service, err_msg) raise ValueError(err_msg) should_store = False for command in commands: try: code = await learn_command(command) if toggle: code = [code, await learn_command(command)] except (AuthorizationError, NetworkTimeoutError, OSError) as err: _LOGGER.error("Failed to learn '%s': %s", command, err) break except BroadlinkException as err: _LOGGER.error("Failed to learn '%s': %s", command, err) continue self._codes.setdefault(device, {}).update({command: code}) should_store = True if should_store: await self._code_storage.async_save(self._codes) async def _async_learn_ir_command(self, command): """Learn an infrared command.""" try: await self._device.async_request(self._device.api.enter_learning) except (BroadlinkException, OSError) as err: _LOGGER.debug("Failed to enter learning mode: %s", err) raise self.hass.components.persistent_notification.async_create( f"Press the '{command}' button.", title="Learn command", notification_id="learn_command", ) try: start_time = utcnow() while (utcnow() - start_time) < LEARNING_TIMEOUT: await asyncio.sleep(1) try: code = await self._device.async_request(self._device.api.check_data) except (ReadError, StorageError): continue return b64encode(code).decode("utf8") raise TimeoutError( "No infrared code received within " f"{LEARNING_TIMEOUT.total_seconds()} seconds" ) finally: self.hass.components.persistent_notification.async_dismiss( notification_id="learn_command" ) async def _async_learn_rf_command(self, command): """Learn a radiofrequency command.""" try: await self._device.async_request(self._device.api.sweep_frequency) except (BroadlinkException, OSError) as err: _LOGGER.debug("Failed to sweep frequency: %s", err) raise self.hass.components.persistent_notification.async_create( f"Press and hold the '{command}' button.", title="Sweep frequency", notification_id="sweep_frequency", ) try: start_time = utcnow() while (utcnow() - start_time) < LEARNING_TIMEOUT: await asyncio.sleep(1) found = await self._device.async_request( self._device.api.check_frequency ) if found: break else: await self._device.async_request( self._device.api.cancel_sweep_frequency ) raise TimeoutError( "No radiofrequency found within " f"{LEARNING_TIMEOUT.total_seconds()} seconds" ) finally: self.hass.components.persistent_notification.async_dismiss( notification_id="sweep_frequency" ) await asyncio.sleep(1) try: await self._device.async_request(self._device.api.find_rf_packet) except (BroadlinkException, OSError) as err: _LOGGER.debug("Failed to enter learning mode: %s", err) raise self.hass.components.persistent_notification.async_create( f"Press the '{command}' button again.", title="Learn command", notification_id="learn_command", ) try: start_time = utcnow() while (utcnow() - start_time) < LEARNING_TIMEOUT: await asyncio.sleep(1) try: code = await self._device.async_request(self._device.api.check_data) except (ReadError, StorageError): continue return b64encode(code).decode("utf8") raise TimeoutError( "No radiofrequency code received within " f"{LEARNING_TIMEOUT.total_seconds()} seconds" ) finally: self.hass.components.persistent_notification.async_dismiss( notification_id="learn_command" ) async def async_delete_command(self, **kwargs): """Delete a list of commands from a remote.""" kwargs = SERVICE_DELETE_SCHEMA(kwargs) commands = kwargs[ATTR_COMMAND] device = kwargs[ATTR_DEVICE] service = f"{RM_DOMAIN}.{SERVICE_DELETE_COMMAND}" if not self._state: _LOGGER.warning( "%s canceled: %s entity is turned off", service, self.entity_id, ) return if not self._storage_loaded: await self._async_load_storage() try: codes = self._codes[device] except KeyError as err: err_msg = f"Device not found: {repr(device)}" _LOGGER.error("Failed to call %s. %s", service, err_msg) raise ValueError(err_msg) from err cmds_not_found = [] for command in commands: try: del codes[command] except KeyError: cmds_not_found.append(command) if cmds_not_found: if len(cmds_not_found) == 1: err_msg = f"Command not found: {repr(cmds_not_found[0])}" else: err_msg = f"Commands not found: {repr(cmds_not_found)}" if len(cmds_not_found) == len(commands): _LOGGER.error("Failed to call %s. %s", service, err_msg) raise ValueError(err_msg) _LOGGER.error("Error during %s. %s", service, err_msg) # Clean up if not codes: del self._codes[device] if self._flags.pop(device, None) is not None: self._flag_storage.async_delay_save(self._get_flags, FLAG_SAVE_DELAY) self._code_storage.async_delay_save(self._get_codes, CODE_SAVE_DELAY)
import os import re import urllib from datetime import datetime from urlparse import urljoin from flask import Flask, render_template, make_response, url_for, \ request as flask_request from werkzeug.contrib.cache import SimpleCache, MemcachedCache import requests from bs4 import BeautifulSoup app = Flask(__name__) app.config.update( DEBUG=(True if os.environ.get('DEBUG') in ['1', 'True'] else False), PORT=int(os.environ.get('PORT', 5000)), MEMCACHE_HOST=os.environ.get('MEMCACHE_HOST', None), CACHE_TIMEOUT=int(os.environ.get('CACHE_TIMEOUT', 300)), ) tent_mime = 'application/vnd.tent.v0+json' tent_link_rel = 'https://tent.io/rels/profile' if app.config['MEMCACHE_HOST'] is not None: cache = MemcachedCache(app.config['MEMCACHE_HOST']) else: cache = SimpleCache() class TentRSSError(Exception): """ A high-level error intended to be reported to the user. """ def __init__(self, desc): self.desc = desc def __str__(self): return self.desc def get_profile_links_from(response): """ Extract profile links from a Requests response. """ profiles = [] # Option 1: HTTP Link header. links = response.headers['link'] if links is not None and links != '': for link in re.split(',\s*', links): pattern = '''<([^>]+)>; rel="(https?://[^\"]+)"\s*$''' try: href, rel = re.match(pattern, link).groups() except AttributeError: continue # try next link. this one didn't parse if rel == tent_link_rel: profiles += [href] # Option 2: HTML <link> tag. soup = BeautifulSoup(response.content) links = soup.findAll('link', rel=tent_link_rel) profiles += [link['href'] for link in links] # Returned profiles are converted to absolute URLs. return [urljoin(response.url, href) for href in profiles] def get_latest_posts(tent_uri): """ Return array of 10 latest posts from tent_uri. Each post also has 'post_guid' and 'rfc822_time' elements set, as well as 'post_link' in cases where a permalink is available. """ # check cache posts = cache.get('posts:' + tent_uri) if posts is not None: return posts app.logger.debug('tent_uri is %s' % tent_uri) if tent_uri == '': raise TentRSSError('No URI!') try: response = requests.get(tent_uri, timeout=5) except requests.ConnectionError as e: app.logger.debug('Connection to %s failed: %s' % (tent_uri, repr(e))) raise TentRSSError("Can't connect to %s" % tent_uri) apiroots = None profiles = get_profile_links_from(response) if len(profiles) == 0: raise TentRSSError('No profile link found') for profile in profiles: headers = {'accept': tent_mime} try: response = requests.get(profile, timeout=5, headers=headers) response.raise_for_status() except requests.exceptions.RequestException as e: app.logger.debug('exception loading %s: %s' % (profile, repr(e))) continue # profile link worked, use it json = response.json() apiroots = json['https://tent.io/types/info/core/v0.1.0']['servers'] break if apiroots is None or len(apiroots) == 0: raise TentRSSError('No API roots found!') args = {'limit': '10', 'post_types': 'https://tent.io/types/post/status/v0.1.0'} headers = {'accept': tent_mime} posts = None for root in apiroots: url = root + "/posts" try: r = requests.get(url, timeout=5, headers=headers, params=args) r.raise_for_status() except requests.exceptions.RequestException as e: app.logger.debug('exception when getting %s: %s' % (url, repr(e))) continue posts = r.json() if posts is None: app.logger.debug('%s returned no valid JSON' % url) else: break # prepare info the template needs for post in posts: # The protocol unfortunately does not give us a canonical URL for # opening a post in a web browser. We can come up with a URL that # returns each individual post as raw JSON, but that's it. # # So, for user-friendliness use the JSON URL only as a GUID, but # not a link (it will try to download a JSON file). For the time # being at least, we will special-case https://username.tent.is/ # entities and provide a link in those cases only. post['post_guid'] = root + '/posts/' + post['id'] m = re.match('''https://(\w+)\.tent\.is/tent$''', root) if m is not None: # This is a Tent.is user post['post_link'] = 'https://' + m.groups()[0] \ + '.tent.is/posts/' + post['id'] dt = datetime.utcfromtimestamp(int(post['published_at'])) # We don't know the actual timezone in which the user made this # post, but UNIX timestamps are UTC-based so we hardcode +0000. post['rfc822_time'] = dt.strftime('%a, %d %b %Y %H:%M:%S +0000') # save result in cache cache.set('posts:' + tent_uri, posts, app.config['CACHE_TIMEOUT']) return posts def generate_feed_url(entity_uri): """ Generate feed URL for the given Tent entity URI. """ # Generating the correct full absolute URL, given proxying, # is hard! If proxying, this requires you add an # X-Original-Request-URI header to the proxy configuration. return urljoin(urljoin(flask_request.host_url, flask_request.headers.get('X-Original-Request-URI', '/')), '.' + url_for('user_feed') + '?uri=' + urllib.quote(entity_uri)) @app.route('/') def front_page(): tent_uri = flask_request.args.get('uri', '') if tent_uri is None or tent_uri == '': return render_template('index.html') try: posts = get_latest_posts(tent_uri) except TentRSSError as e: return render_template('error.html', uri=tent_uri, error=e), \ 404 feed_url = generate_feed_url(tent_uri) return render_template('feed.html', posts=posts, uri=tent_uri, feed_url=feed_url) @app.route('/feed') def user_feed(): tent_uri = flask_request.args.get('uri', '') try: posts = get_latest_posts(tent_uri) except TentRSSError as e: return render_template('error.html', uri=tent_uri, error=e), 404 feed_url = generate_feed_url(tent_uri) response = make_response(render_template('feed.xml', posts=posts, uri=tent_uri, feed_url=feed_url)) response.mimetype = 'application/xml' return response if __name__ == '__main__': app.run(host='0.0.0.0', port=app.config['PORT'])
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import warnings import random if sys.version >= '3': basestring = unicode = str long = int from functools import reduce else: from itertools import imap as map from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer from pyspark.storagelevel import StorageLevel from pyspark.traceback_utils import SCCallSiteSync from pyspark.sql import since from pyspark.sql.types import _parse_datatype_json_string from pyspark.sql.column import Column, _to_seq, _to_java_column from pyspark.sql.readwriter import DataFrameWriter from pyspark.sql.types import * __all__ = ["DataFrame", "SchemaRDD", "DataFrameNaFunctions", "DataFrameStatFunctions"] class DataFrame(object): """A distributed collection of data grouped into named columns. A :class:`DataFrame` is equivalent to a relational table in Spark SQL, and can be created using various functions in :class:`SQLContext`:: people = sqlContext.read.parquet("...") Once created, it can be manipulated using the various domain-specific-language (DSL) functions defined in: :class:`DataFrame`, :class:`Column`. To select a column from the data frame, use the apply method:: ageCol = people.age A more concrete example:: # To create DataFrame using SQLContext people = sqlContext.read.parquet("...") department = sqlContext.read.parquet("...") people.filter(people.age > 30).join(department, people.deptId == department.id)) \ .groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"}) .. note:: Experimental .. versionadded:: 1.3 """ def __init__(self, jdf, sql_ctx): self._jdf = jdf self.sql_ctx = sql_ctx self._sc = sql_ctx and sql_ctx._sc self.is_cached = False self._schema = None # initialized lazily self._lazy_rdd = None @property @since(1.3) def rdd(self): """Returns the content as an :class:`pyspark.RDD` of :class:`Row`. """ if self._lazy_rdd is None: jrdd = self._jdf.javaToPython() self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer())) return self._lazy_rdd @property @since("1.3.1") def na(self): """Returns a :class:`DataFrameNaFunctions` for handling missing values. """ return DataFrameNaFunctions(self) @property @since(1.4) def stat(self): """Returns a :class:`DataFrameStatFunctions` for statistic functions. """ return DataFrameStatFunctions(self) @ignore_unicode_prefix @since(1.3) def toJSON(self, use_unicode=True): """Converts a :class:`DataFrame` into a :class:`RDD` of string. Each row is turned into a JSON document as one element in the returned RDD. >>> df.toJSON().first() u'{"age":2,"name":"Alice"}' """ rdd = self._jdf.toJSON() return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode)) def saveAsParquetFile(self, path): """Saves the contents as a Parquet file, preserving the schema. .. note:: Deprecated in 1.4, use :func:`DataFrameWriter.parquet` instead. """ warnings.warn("saveAsParquetFile is deprecated. Use write.parquet() instead.") self._jdf.saveAsParquetFile(path) @since(1.3) def registerTempTable(self, name): """Registers this RDD as a temporary table using the given name. The lifetime of this temporary table is tied to the :class:`SQLContext` that was used to create this :class:`DataFrame`. >>> df.registerTempTable("people") >>> df2 = sqlContext.sql("select * from people") >>> sorted(df.collect()) == sorted(df2.collect()) True """ self._jdf.registerTempTable(name) def registerAsTable(self, name): """ .. note:: Deprecated in 1.4, use :func:`registerTempTable` instead. """ warnings.warn("Use registerTempTable instead of registerAsTable.") self.registerTempTable(name) def insertInto(self, tableName, overwrite=False): """Inserts the contents of this :class:`DataFrame` into the specified table. .. note:: Deprecated in 1.4, use :func:`DataFrameWriter.insertInto` instead. """ warnings.warn("insertInto is deprecated. Use write.insertInto() instead.") self.write.insertInto(tableName, overwrite) def saveAsTable(self, tableName, source=None, mode="error", **options): """Saves the contents of this :class:`DataFrame` to a data source as a table. .. note:: Deprecated in 1.4, use :func:`DataFrameWriter.saveAsTable` instead. """ warnings.warn("insertInto is deprecated. Use write.saveAsTable() instead.") self.write.saveAsTable(tableName, source, mode, **options) @since(1.3) def save(self, path=None, source=None, mode="error", **options): """Saves the contents of the :class:`DataFrame` to a data source. .. note:: Deprecated in 1.4, use :func:`DataFrameWriter.save` instead. """ warnings.warn("insertInto is deprecated. Use write.save() instead.") return self.write.save(path, source, mode, **options) @property @since(1.4) def write(self): """ Interface for saving the content of the :class:`DataFrame` out into external storage. :return: :class:`DataFrameWriter` """ return DataFrameWriter(self) @property @since(1.3) def schema(self): """Returns the schema of this :class:`DataFrame` as a :class:`types.StructType`. >>> df.schema StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true))) """ if self._schema is None: try: self._schema = _parse_datatype_json_string(self._jdf.schema().json()) except AttributeError as e: raise Exception( "Unable to parse datatype from schema. %s" % e) return self._schema @since(1.3) def printSchema(self): """Prints out the schema in the tree format. >>> df.printSchema() root |-- age: integer (nullable = true) |-- name: string (nullable = true) <BLANKLINE> """ print(self._jdf.schema().treeString()) @since(1.3) def explain(self, extended=False): """Prints the (logical and physical) plans to the console for debugging purpose. :param extended: boolean, default ``False``. If ``False``, prints only the physical plan. >>> df.explain() Scan PhysicalRDD[age#0,name#1] >>> df.explain(True) == Parsed Logical Plan == ... == Analyzed Logical Plan == ... == Optimized Logical Plan == ... == Physical Plan == ... """ if extended: print(self._jdf.queryExecution().toString()) else: print(self._jdf.queryExecution().executedPlan().toString()) @since(1.3) def isLocal(self): """Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally (without any Spark executors). """ return self._jdf.isLocal() @since(1.3) def show(self, n=20, truncate=True): """Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: Whether truncate long strings and align cells right. >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ """ print(self._jdf.showString(n, truncate)) def __repr__(self): return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes)) @since(1.3) def count(self): """Returns the number of rows in this :class:`DataFrame`. >>> df.count() 2 """ return int(self._jdf.count()) @ignore_unicode_prefix @since(1.3) def collect(self): """Returns all the records as a list of :class:`Row`. >>> df.collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ with SCCallSiteSync(self._sc) as css: port = self._sc._jvm.PythonRDD.collectAndServe(self._jdf.javaToPython().rdd()) return list(_load_from_socket(port, BatchedSerializer(PickleSerializer()))) @ignore_unicode_prefix @since(1.3) def limit(self, num): """Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() [] """ jdf = self._jdf.limit(num) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def take(self, num): """Returns the first ``num`` rows as a :class:`list` of :class:`Row`. >>> df.take(2) [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] """ with SCCallSiteSync(self._sc) as css: port = self._sc._jvm.org.apache.spark.sql.execution.EvaluatePython.takeAndServe( self._jdf, num) return list(_load_from_socket(port, BatchedSerializer(PickleSerializer()))) @ignore_unicode_prefix @since(1.3) def map(self, f): """ Returns a new :class:`RDD` by applying a the ``f`` function to each :class:`Row`. This is a shorthand for ``df.rdd.map()``. >>> df.map(lambda p: p.name).collect() [u'Alice', u'Bob'] """ return self.rdd.map(f) @ignore_unicode_prefix @since(1.3) def flatMap(self, f): """ Returns a new :class:`RDD` by first applying the ``f`` function to each :class:`Row`, and then flattening the results. This is a shorthand for ``df.rdd.flatMap()``. >>> df.flatMap(lambda p: p.name).collect() [u'A', u'l', u'i', u'c', u'e', u'B', u'o', u'b'] """ return self.rdd.flatMap(f) @since(1.3) def mapPartitions(self, f, preservesPartitioning=False): """Returns a new :class:`RDD` by applying the ``f`` function to each partition. This is a shorthand for ``df.rdd.mapPartitions()``. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(iterator): yield 1 >>> rdd.mapPartitions(f).sum() 4 """ return self.rdd.mapPartitions(f, preservesPartitioning) @since(1.3) def foreach(self, f): """Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`. This is a shorthand for ``df.rdd.foreach()``. >>> def f(person): ... print(person.name) >>> df.foreach(f) """ return self.rdd.foreach(f) @since(1.3) def foreachPartition(self, f): """Applies the ``f`` function to each partition of this :class:`DataFrame`. This a shorthand for ``df.rdd.foreachPartition()``. >>> def f(people): ... for person in people: ... print(person.name) >>> df.foreachPartition(f) """ return self.rdd.foreachPartition(f) @since(1.3) def cache(self): """ Persists with the default storage level (C{MEMORY_ONLY_SER}). """ self.is_cached = True self._jdf.cache() return self @since(1.3) def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER): """Sets the storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_ONLY_SER}). """ self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdf.persist(javaStorageLevel) return self @since(1.3) def unpersist(self, blocking=True): """Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from memory and disk. """ self.is_cached = False self._jdf.unpersist(blocking) return self @since(1.4) def coalesce(self, numPartitions): """ Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions. Similar to coalesce defined on an :class:`RDD`, this operation results in a narrow dependency, e.g. if you go from 1000 partitions to 100 partitions, there will not be a shuffle, instead each of the 100 new partitions will claim 10 of the current partitions. >>> df.coalesce(1).rdd.getNumPartitions() 1 """ return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx) @since(1.3) def repartition(self, numPartitions): """Returns a new :class:`DataFrame` that has exactly ``numPartitions`` partitions. >>> df.repartition(10).rdd.getNumPartitions() 10 """ return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx) @since(1.3) def distinct(self): """Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`. >>> df.distinct().count() 2 """ return DataFrame(self._jdf.distinct(), self.sql_ctx) @since(1.3) def sample(self, withReplacement, fraction, seed=None): """Returns a sampled subset of this :class:`DataFrame`. >>> df.sample(False, 0.5, 42).count() 1 """ assert fraction >= 0.0, "Negative fraction value: %s" % fraction seed = seed if seed is not None else random.randint(0, sys.maxsize) rdd = self._jdf.sample(withReplacement, fraction, long(seed)) return DataFrame(rdd, self.sql_ctx) @since(1.5) def sampleBy(self, col, fractions, seed=None): """ Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 8| +---+-----+ """ if not isinstance(col, str): raise ValueError("col must be a string, but got %r" % type(col)) if not isinstance(fractions, dict): raise ValueError("fractions must be a dict but got %r" % type(fractions)) for k, v in fractions.items(): if not isinstance(k, (float, int, long, basestring)): raise ValueError("key must be float, int, long, or string, but got %r" % type(k)) fractions[k] = float(v) seed = seed if seed is not None else random.randint(0, sys.maxsize) return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx) @since(1.4) def randomSplit(self, weights, seed=None): """Randomly splits this :class:`DataFrame` with the provided weights. :param weights: list of doubles as weights with which to split the DataFrame. Weights will be normalized if they don't sum up to 1.0. :param seed: The seed for sampling. >>> splits = df4.randomSplit([1.0, 2.0], 24) >>> splits[0].count() 1 >>> splits[1].count() 3 """ for w in weights: if w < 0.0: raise ValueError("Weights must be positive. Found weight value: %s" % w) seed = seed if seed is not None else random.randint(0, sys.maxsize) rdd_array = self._jdf.randomSplit(_to_seq(self.sql_ctx._sc, weights), long(seed)) return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array] @property @since(1.3) def dtypes(self): """Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')] """ return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields] @property @since(1.3) def columns(self): """Returns all column names as a list. >>> df.columns ['age', 'name'] """ return [f.name for f in self.schema.fields] @ignore_unicode_prefix @since(1.3) def alias(self, alias): """Returns a new :class:`DataFrame` with an alias set. >>> from pyspark.sql.functions import * >>> df_as1 = df.alias("df_as1") >>> df_as2 = df.alias("df_as2") >>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner') >>> joined_df.select(col("df_as1.name"), col("df_as2.name"), col("df_as2.age")).collect() [Row(name=u'Alice', name=u'Alice', age=2), Row(name=u'Bob', name=u'Bob', age=5)] """ assert isinstance(alias, basestring), "alias should be a string" return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx) @ignore_unicode_prefix @since(1.3) def join(self, other, on=None, how=None): """Joins with another :class:`DataFrame`, using the given join expression. The following performs a full outer join between ``df1`` and ``df2``. :param other: Right side of the join :param on: a string for join column name, a list of column names, , a join expression (Column) or a list of Columns. If `on` is a string or a list of string indicating the name of the join column(s), the column(s) must exist on both sides, and this performs an inner equi-join. :param how: str, default 'inner'. One of `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`. >>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect() [Row(name=None, height=80), Row(name=u'Alice', height=None), Row(name=u'Bob', height=85)] >>> cond = [df.name == df3.name, df.age == df3.age] >>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect() [Row(name=u'Bob', age=5), Row(name=u'Alice', age=2)] >>> df.join(df2, 'name').select(df.name, df2.height).collect() [Row(name=u'Bob', height=85)] >>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect() [Row(name=u'Bob', age=5)] """ if on is not None and not isinstance(on, list): on = [on] if on is None or len(on) == 0: jdf = self._jdf.join(other._jdf) elif isinstance(on[0], basestring): assert how is None or how == 'inner', "Equi-join does not support: %s" % how jdf = self._jdf.join(other._jdf, self._jseq(on)) else: assert isinstance(on[0], Column), "on should be Column or list of Column" if len(on) > 1: on = reduce(lambda x, y: x.__and__(y), on) else: on = on[0] if how is None: jdf = self._jdf.join(other._jdf, on._jc, "inner") else: assert isinstance(how, basestring), "how should be basestring" jdf = self._jdf.join(other._jdf, on._jc, how) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def sort(self, *cols, **kwargs): """Returns a new :class:`DataFrame` sorted by the specified column(s). :param cols: list of :class:`Column` or column names to sort by. :param ascending: boolean or list of boolean (default True). Sort ascending vs. descending. Specify list for multiple sort orders. If a list is specified, length of the list must equal length of the `cols`. >>> df.sort(df.age.desc()).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> df.sort("age", ascending=False).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> df.orderBy(df.age.desc()).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> from pyspark.sql.functions import * >>> df.sort(asc("age")).collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df.orderBy(desc("age"), "name").collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] >>> df.orderBy(["age", "name"], ascending=[0, 1]).collect() [Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')] """ if not cols: raise ValueError("should sort by at least one column") if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jcols = [_to_java_column(c) for c in cols] ascending = kwargs.get('ascending', True) if isinstance(ascending, (bool, int)): if not ascending: jcols = [jc.desc() for jc in jcols] elif isinstance(ascending, list): jcols = [jc if asc else jc.desc() for asc, jc in zip(ascending, jcols)] else: raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending)) jdf = self._jdf.sort(self._jseq(jcols)) return DataFrame(jdf, self.sql_ctx) orderBy = sort def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names""" return _to_seq(self.sql_ctx._sc, cols, converter) def _jmap(self, jm): """Return a JVM Scala Map from a dict""" return _to_scala_map(self.sql_ctx._sc, jm) def _jcols(self, *cols): """Return a JVM Seq of Columns from a list of Column or column names If `cols` has only one list in it, cols[0] will be used as the list. """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] return self._jseq(cols, _to_java_column) @since("1.3.1") def describe(self, *cols): """Computes statistics for numeric columns. This include count, mean, stddev, min, and max. If no columns are given, this function computes statistics for all numerical columns. .. note:: This function is meant for exploratory data analysis, as we make no \ guarantee about the backward compatibility of the schema of the resulting DataFrame. >>> df.describe().show() +-------+---+ |summary|age| +-------+---+ | count| 2| | mean|3.5| | stddev|1.5| | min| 2| | max| 5| +-------+---+ >>> df.describe(['age', 'name']).show() +-------+---+-----+ |summary|age| name| +-------+---+-----+ | count| 2| 2| | mean|3.5| null| | stddev|1.5| null| | min| 2|Alice| | max| 5| Bob| +-------+---+-----+ """ if len(cols) == 1 and isinstance(cols[0], list): cols = cols[0] jdf = self._jdf.describe(self._jseq(cols)) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def head(self, n=None): """Returns the first ``n`` rows. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')] """ if n is None: rs = self.head(1) return rs[0] if rs else None return self.take(n) @ignore_unicode_prefix @since(1.3) def first(self): """Returns the first row as a :class:`Row`. >>> df.first() Row(age=2, name=u'Alice') """ return self.head() @ignore_unicode_prefix @since(1.3) def __getitem__(self, item): """Returns the column as a :class:`Column`. >>> df.select(df['age']).collect() [Row(age=2), Row(age=5)] >>> df[ ["name", "age"]].collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df[ df.age > 3 ].collect() [Row(age=5, name=u'Bob')] >>> df[df[0] > 3].collect() [Row(age=5, name=u'Bob')] """ if isinstance(item, basestring): jc = self._jdf.apply(item) return Column(jc) elif isinstance(item, Column): return self.filter(item) elif isinstance(item, (list, tuple)): return self.select(*item) elif isinstance(item, int): jc = self._jdf.apply(self.columns[item]) return Column(jc) else: raise TypeError("unexpected item type: %s" % type(item)) @since(1.3) def __getattr__(self, name): """Returns the :class:`Column` denoted by ``name``. >>> df.select(df.age).collect() [Row(age=2), Row(age=5)] """ if name not in self.columns: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, name)) jc = self._jdf.apply(name) return Column(jc) @ignore_unicode_prefix @since(1.3) def select(self, *cols): """Projects a set of expressions and returns a new :class:`DataFrame`. :param cols: list of column names (string) or expressions (:class:`Column`). If one of the column names is '*', that column is expanded to include all columns in the current DataFrame. >>> df.select('*').collect() [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] >>> df.select('name', 'age').collect() [Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)] >>> df.select(df.name, (df.age + 10).alias('age')).collect() [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)] """ jdf = self._jdf.select(self._jcols(*cols)) return DataFrame(jdf, self.sql_ctx) @since(1.3) def selectExpr(self, *expr): """Projects a set of SQL expressions and returns a new :class:`DataFrame`. This is a variant of :func:`select` that accepts SQL expressions. >>> df.selectExpr("age * 2", "abs(age)").collect() [Row((age * 2)=4, 'abs(age)=2), Row((age * 2)=10, 'abs(age)=5)] """ if len(expr) == 1 and isinstance(expr[0], list): expr = expr[0] jdf = self._jdf.selectExpr(self._jseq(expr)) return DataFrame(jdf, self.sql_ctx) @ignore_unicode_prefix @since(1.3) def filter(self, condition): """Filters rows using the given condition. :func:`where` is an alias for :func:`filter`. :param condition: a :class:`Column` of :class:`types.BooleanType` or a string of SQL expression. >>> df.filter(df.age > 3).collect() [Row(age=5, name=u'Bob')] >>> df.where(df.age == 2).collect() [Row(age=2, name=u'Alice')] >>> df.filter("age > 3").collect() [Row(age=5, name=u'Bob')] >>> df.where("age = 2").collect() [Row(age=2, name=u'Alice')] """ if isinstance(condition, basestring): jdf = self._jdf.filter(condition) elif isinstance(condition, Column): jdf = self._jdf.filter(condition._jc) else: raise TypeError("condition should be string or Column") return DataFrame(jdf, self.sql_ctx) where = filter @ignore_unicode_prefix @since(1.3) def groupBy(self, *cols): """Groups the :class:`DataFrame` using the specified columns, so we can run aggregation on them. See :class:`GroupedData` for all the available aggregate functions. :func:`groupby` is an alias for :func:`groupBy`. :param cols: list of columns to group by. Each element should be a column name (string) or an expression (:class:`Column`). >>> df.groupBy().avg().collect() [Row(avg(age)=3.5)] >>> df.groupBy('name').agg({'age': 'mean'}).collect() [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)] >>> df.groupBy(df.name).avg().collect() [Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)] >>> df.groupBy(['name', df.age]).count().collect() [Row(name=u'Bob', age=5, count=1), Row(name=u'Alice', age=2, count=1)] """ jgd = self._jdf.groupBy(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self.sql_ctx) @since(1.4) def rollup(self, *cols): """ Create a multi-dimensional rollup for the current :class:`DataFrame` using the specified columns, so we can run aggregation on them. >>> df.rollup('name', df.age).count().show() +-----+----+-----+ | name| age|count| +-----+----+-----+ |Alice|null| 1| | Bob| 5| 1| | Bob|null| 1| | null|null| 2| |Alice| 2| 1| +-----+----+-----+ """ jgd = self._jdf.rollup(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self.sql_ctx) @since(1.4) def cube(self, *cols): """ Create a multi-dimensional cube for the current :class:`DataFrame` using the specified columns, so we can run aggregation on them. >>> df.cube('name', df.age).count().show() +-----+----+-----+ | name| age|count| +-----+----+-----+ | null| 2| 1| |Alice|null| 1| | Bob| 5| 1| | Bob|null| 1| | null| 5| 1| | null|null| 2| |Alice| 2| 1| +-----+----+-----+ """ jgd = self._jdf.cube(self._jcols(*cols)) from pyspark.sql.group import GroupedData return GroupedData(jgd, self.sql_ctx) @since(1.3) def agg(self, *exprs): """ Aggregate on the entire :class:`DataFrame` without groups (shorthand for ``df.groupBy.agg()``). >>> df.agg({"age": "max"}).collect() [Row(max(age)=5)] >>> from pyspark.sql import functions as F >>> df.agg(F.min(df.age)).collect() [Row(min(age)=2)] """ return self.groupBy().agg(*exprs) @since(1.3) def unionAll(self, other): """ Return a new :class:`DataFrame` containing union of rows in this frame and another frame. This is equivalent to `UNION ALL` in SQL. """ return DataFrame(self._jdf.unionAll(other._jdf), self.sql_ctx) @since(1.3) def intersect(self, other): """ Return a new :class:`DataFrame` containing rows only in both this frame and another frame. This is equivalent to `INTERSECT` in SQL. """ return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx) @since(1.3) def subtract(self, other): """ Return a new :class:`DataFrame` containing rows in this frame but not in another frame. This is equivalent to `EXCEPT` in SQL. """ return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx) @since(1.4) def dropDuplicates(self, subset=None): """Return a new :class:`DataFrame` with duplicate rows removed, optionally only considering certain columns. >>> from pyspark.sql import Row >>> df = sc.parallelize([ \ Row(name='Alice', age=5, height=80), \ Row(name='Alice', age=5, height=80), \ Row(name='Alice', age=10, height=80)]).toDF() >>> df.dropDuplicates().show() +---+------+-----+ |age|height| name| +---+------+-----+ | 5| 80|Alice| | 10| 80|Alice| +---+------+-----+ >>> df.dropDuplicates(['name', 'height']).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 5| 80|Alice| +---+------+-----+ """ if subset is None: jdf = self._jdf.dropDuplicates() else: jdf = self._jdf.dropDuplicates(self._jseq(subset)) return DataFrame(jdf, self.sql_ctx) @since("1.3.1") def dropna(self, how='any', thresh=None, subset=None): """Returns a new :class:`DataFrame` omitting rows with null values. :func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other. :param how: 'any' or 'all'. If 'any', drop a row if it contains any nulls. If 'all', drop a row only if all its values are null. :param thresh: int, default None If specified, drop rows that have less than `thresh` non-null values. This overwrites the `how` parameter. :param subset: optional list of column names to consider. >>> df4.na.drop().show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| +---+------+-----+ """ if how is not None and how not in ['any', 'all']: raise ValueError("how ('" + how + "') should be 'any' or 'all'") if subset is None: subset = self.columns elif isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") if thresh is None: thresh = len(subset) if how == 'any' else 1 return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx) @since("1.3.1") def fillna(self, value, subset=None): """Replace null values, alias for ``na.fill()``. :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other. :param value: int, long, float, string, or dict. Value to replace null values with. If the value is a dict, then `subset` is ignored and `value` must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, or string. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.fill(50).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| | 5| 50| Bob| | 50| 50| Tom| | 50| 50| null| +---+------+-----+ >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show() +---+------+-------+ |age|height| name| +---+------+-------+ | 10| 80| Alice| | 5| null| Bob| | 50| null| Tom| | 50| null|unknown| +---+------+-------+ """ if not isinstance(value, (float, int, long, basestring, dict)): raise ValueError("value should be a float, int, long, string, or dict") if isinstance(value, (int, long)): value = float(value) if isinstance(value, dict): return DataFrame(self._jdf.na().fill(value), self.sql_ctx) elif subset is None: return DataFrame(self._jdf.na().fill(value), self.sql_ctx) else: if isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx) @since(1.4) def replace(self, to_replace, value, subset=None): """Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. :param to_replace: int, long, float, string, or list. Value to be replaced. If the value is a dict, then `value` is ignored and `to_replace` must be a mapping from column name (string) to replacement value. The value to be replaced must be an int, long, float, or string. :param value: int, long, float, string, or list. Value to use to replace holes. The replacement value must be an int, long, float, or string. If `value` is a list or tuple, `value` should be of the same length with `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+ """ if not isinstance(to_replace, (float, int, long, basestring, list, tuple, dict)): raise ValueError( "to_replace should be a float, int, long, string, list, tuple, or dict") if not isinstance(value, (float, int, long, basestring, list, tuple)): raise ValueError("value should be a float, int, long, string, list, or tuple") rep_dict = dict() if isinstance(to_replace, (float, int, long, basestring)): to_replace = [to_replace] if isinstance(to_replace, tuple): to_replace = list(to_replace) if isinstance(value, tuple): value = list(value) if isinstance(to_replace, list) and isinstance(value, list): if len(to_replace) != len(value): raise ValueError("to_replace and value lists should be of the same length") rep_dict = dict(zip(to_replace, value)) elif isinstance(to_replace, list) and isinstance(value, (float, int, long, basestring)): rep_dict = dict([(tr, value) for tr in to_replace]) elif isinstance(to_replace, dict): rep_dict = to_replace if subset is None: return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx) elif isinstance(subset, basestring): subset = [subset] if not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") return DataFrame( self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx) @since(1.4) def corr(self, col1, col2, method=None): """ Calculates the correlation of two columns of a DataFrame as a double value. Currently only supports the Pearson Correlation Coefficient. :func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other. :param col1: The name of the first column :param col2: The name of the second column :param method: The correlation method. Currently only supports "pearson" """ if not isinstance(col1, str): raise ValueError("col1 should be a string.") if not isinstance(col2, str): raise ValueError("col2 should be a string.") if not method: method = "pearson" if not method == "pearson": raise ValueError("Currently only the calculation of the Pearson Correlation " + "coefficient is supported.") return self._jdf.stat().corr(col1, col2, method) @since(1.4) def cov(self, col1, col2): """ Calculate the sample covariance for the given columns, specified by their names, as a double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases. :param col1: The name of the first column :param col2: The name of the second column """ if not isinstance(col1, str): raise ValueError("col1 should be a string.") if not isinstance(col2, str): raise ValueError("col2 should be a string.") return self._jdf.stat().cov(col1, col2) @since(1.4) def crosstab(self, col1, col2): """ Computes a pair-wise frequency table of the given columns. Also known as a contingency table. The number of distinct values for each column should be less than 1e4. At most 1e6 non-zero pair frequencies will be returned. The first column of each row will be the distinct values of `col1` and the column names will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`. Pairs that have no occurrences will have zero as their counts. :func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases. :param col1: The name of the first column. Distinct items will make the first item of each row. :param col2: The name of the second column. Distinct items will make the column names of the DataFrame. """ if not isinstance(col1, str): raise ValueError("col1 should be a string.") if not isinstance(col2, str): raise ValueError("col2 should be a string.") return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx) @since(1.4) def freqItems(self, cols, support=None): """ Finding frequent items for columns, possibly with false positives. Using the frequent element count algorithm described in "http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou". :func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases. .. note:: This function is meant for exploratory data analysis, as we make no \ guarantee about the backward compatibility of the schema of the resulting DataFrame. :param cols: Names of the columns to calculate frequent items for as a list or tuple of strings. :param support: The frequency with which to consider an item 'frequent'. Default is 1%. The support must be greater than 1e-4. """ if isinstance(cols, tuple): cols = list(cols) if not isinstance(cols, list): raise ValueError("cols must be a list or tuple of column names as strings.") if not support: support = 0.01 return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx) @ignore_unicode_prefix @since(1.3) def withColumn(self, colName, col): """ Returns a new :class:`DataFrame` by adding a column or replacing the existing column that has the same name. :param colName: string, name of the new column. :param col: a :class:`Column` expression for the new column. >>> df.withColumn('age2', df.age + 2).collect() [Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)] """ assert isinstance(col, Column), "col should be Column" return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx) @ignore_unicode_prefix @since(1.3) def withColumnRenamed(self, existing, new): """Returns a new :class:`DataFrame` by renaming an existing column. :param existing: string, name of the existing column to rename. :param col: string, new name of the column. >>> df.withColumnRenamed('age', 'age2').collect() [Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')] """ return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx) @since(1.4) @ignore_unicode_prefix def drop(self, col): """Returns a new :class:`DataFrame` that drops the specified column. :param col: a string name of the column to drop, or a :class:`Column` to drop. >>> df.drop('age').collect() [Row(name=u'Alice'), Row(name=u'Bob')] >>> df.drop(df.age).collect() [Row(name=u'Alice'), Row(name=u'Bob')] >>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect() [Row(age=5, height=85, name=u'Bob')] >>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect() [Row(age=5, name=u'Bob', height=85)] """ if isinstance(col, basestring): jdf = self._jdf.drop(col) elif isinstance(col, Column): jdf = self._jdf.drop(col._jc) else: raise TypeError("col should be a string or a Column") return DataFrame(jdf, self.sql_ctx) @since(1.3) def toPandas(self): """Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``. This is only available if Pandas is installed and available. >>> df.toPandas() # doctest: +SKIP age name 0 2 Alice 1 5 Bob """ import pandas as pd return pd.DataFrame.from_records(self.collect(), columns=self.columns) ########################################################################################## # Pandas compatibility ########################################################################################## groupby = groupBy drop_duplicates = dropDuplicates # Having SchemaRDD for backward compatibility (for docs) class SchemaRDD(DataFrame): """SchemaRDD is deprecated, please use :class:`DataFrame`. """ def _to_scala_map(sc, jm): """ Convert a dict into a JVM Map. """ return sc._jvm.PythonUtils.toScalaMap(jm) class DataFrameNaFunctions(object): """Functionality for working with missing data in :class:`DataFrame`. .. versionadded:: 1.4 """ def __init__(self, df): self.df = df def drop(self, how='any', thresh=None, subset=None): return self.df.dropna(how=how, thresh=thresh, subset=subset) drop.__doc__ = DataFrame.dropna.__doc__ def fill(self, value, subset=None): return self.df.fillna(value=value, subset=subset) fill.__doc__ = DataFrame.fillna.__doc__ def replace(self, to_replace, value, subset=None): return self.df.replace(to_replace, value, subset) replace.__doc__ = DataFrame.replace.__doc__ class DataFrameStatFunctions(object): """Functionality for statistic functions with :class:`DataFrame`. .. versionadded:: 1.4 """ def __init__(self, df): self.df = df def corr(self, col1, col2, method=None): return self.df.corr(col1, col2, method) corr.__doc__ = DataFrame.corr.__doc__ def cov(self, col1, col2): return self.df.cov(col1, col2) cov.__doc__ = DataFrame.cov.__doc__ def crosstab(self, col1, col2): return self.df.crosstab(col1, col2) crosstab.__doc__ = DataFrame.crosstab.__doc__ def freqItems(self, cols, support=None): return self.df.freqItems(cols, support) freqItems.__doc__ = DataFrame.freqItems.__doc__ def sampleBy(self, col, fractions, seed=None): return self.df.sampleBy(col, fractions, seed) sampleBy.__doc__ = DataFrame.sampleBy.__doc__ def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.dataframe globs = pyspark.sql.dataframe.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF() globs['df3'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80), Row(name='Bob', age=5, height=None), Row(name='Tom', age=None, height=None), Row(name=None, age=None, height=None)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.dataframe, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
#!/usr/bin/python2 -u import os import sys import json import socket import getopt from collections import Counter import nmap import ipaddr import numpy as np import matplotlib matplotlib.use('Agg') from matplotlib import mlab, pyplot as plt def _delta(a): return a[1] - a[0] def _bar_label(rects, artist, offset_ratio=0.01): yoffset = offset_ratio * _delta(artist.ylim()) for rect in rects: height = rect.get_height() artist.text(rect.get_x() + rect.get_width() / 2., height + yoffset, '%d' % int(height), ha='center', va='bottom') def _barh_label(rects, artist, offset_ratio=0.01): xoffset = offset_ratio * _delta(artist.xlim()) for rect in rects: width = rect.get_width() artist.text(width + xoffset, rect.get_y() + rect.get_height() / 2., '%d' % int(width), ha='left', va='center') class Task(object): pass # nmap arguments used: # -sn ping scan only # -T4 quick scan # -F scan only the most popular 100 ports class OpenPorts(Task): '''Scan a subnet for open port statistics.''' def __init__(self, subnet): self.net = ipaddr.IPv4Network(subnet) self.addrs = [addr.exploded for addr in self.net] def scan(self): services = '' results = [] for addr in self.addrs: nm = nmap.PortScanner() #try: if 1: result = nm.scan(addr, arguments='-T4 -F') if not services: services = result['nmap']['scaninfo']['tcp']['services'] if not result['scan'].has_key(addr): sys.stderr.write('%s: no info\n' % addr) continue ports_stat = result['scan'][addr].get('tcp', {}) open_ports = [] for p, info in ports_stat.items(): if info['state'] == u'open': open_ports.append(p) sys.stderr.write('%s %r\n' % (addr, open_ports)) results.append((addr, open_ports)) #except nmap.PortScannerError: else: pass return results @classmethod def plot(cls, results): figures = [] num_open_ports = [] port_hits = Counter() for addr, open_ports in results: num_open_ports.append(len(open_ports)) port_hits.update(Counter(open_ports)) # plot 1: distribution of number of open ports bins = np.arange(11) * 10 plt.suptitle('number of opened ports: distribution') plt.hist(num_open_ports, bins) plt.xticks(bins) figures.append(('openports-dist', plt.gcf())) plt.figure() def _getserv(p): try: return socket.getservbyport(p, 'tcp') except socket.error: return '?' # plot 2: top list of opened ports N = 10 top_ports = port_hits.most_common(N) ports, hits = zip(*top_ports) plt.suptitle('top open tcp ports') plt.grid() plt.ylim(N-0.5, -0.5) ticks = ['%d\n%s' % (i, _getserv(i)) for i in ports] plt.yticks(np.arange(N), ticks) rects = plt.barh(np.arange(N), hits, align='center') _barh_label(rects, plt) figures.append(('openports-top', plt.gcf())) return figures class UpHosts(Task): '''Scan a series of subnets for online hosts.''' def __init__(self, template, xs): self.template = template self.xs = xs def scan(self): results = [] for x in self.xs: net = self.template.format(x=x) nm = nmap.PortScanner() try: nm.scan(net, arguments='-sn -T4') uphosts = int(nm.scanstats()['uphosts']) except nmap.PortScannerError: # NOTE The only known cause of exception here is "RTTVAR has # grown to over 2.3 seconds, decreasing to 2.0" uphosts = None sys.stderr.write('%d %s %r\n' % (x, net, uphosts)) results.append((x, uphosts)) return results @classmethod def plot(cls, results): figures = [] xs, ys = zip(*results) ys = [y or 0 for y in ys] # XXX hardcoded plt.suptitle('uphosts of 59.66.x.0/24') plt.gcf().set_size_inches(10, 40) plt.xticks(np.arange(5) * 64) plt.yticks(np.arange(256)) plt.grid() plt.axis([0, 256, 255.5, -0.5]) plt.barh(xs, ys, align='center') figures.append(('uphosts', plt.gcf())) plt.figure() bins = np.arange(17) * 16 plt.suptitle('uphosts of 59.66.x.0/24: distribution') plt.hist(ys, bins) plt.xticks(bins) figures.append(('uphosts-dist', plt.gcf())) return figures def main(): def _usage(): raise ValueError #sys.stderr.write('Usage: no doc yet\n') #sys.exit(1) SCAN = 'scan' PLOT = 'plot' UPHOSTS = 'uphosts' OPENPORTS = 'openports' opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:') opts = dict(opts) fname = opts.get('-f', None) if len(args) < 2: _usage() task, verb = args[0:2] additional = args[2:] if verb == SCAN: if os.getuid() != 0: sys.stderr.write('Warning: Not scanning as root. ' 'Performance may be degraded.\n') if task == UPHOSTS: worker = UpHosts('59.66.{x}.0/24', range(256)) elif task == OPENPORTS: subnet = additional and additional[0] or '59.66.0.0/16' worker = OpenPorts(subnet) else: _usage() if fname: stream = open(fname, 'w') else: stream = sys.stdout json.dump(worker.scan(), stream) if fname: stream.close() elif verb == PLOT: if task == UPHOSTS: cls = UpHosts elif task == OPENPORTS: cls = OpenPorts else: _usage() if fname: stream = open(fname, 'r') else: stream = sys.stdin results = json.load(stream) if fname: stream.close() folder = additional and additional[0] or '.' figures = cls.plot(results) for name, fig in figures: fig.savefig(os.path.join(folder, '%s.png' % name), bbox_inches='tight') del fig else: _usage() if __name__ == '__main__': main()
# -*- coding: utf-8 -*- """ Abstract interface to bounce windows and moratoria. """ __author__ = 'Jathan McCollum, Mark Thomas, Michael Shields' __maintainer__ = 'Jathan McCollum' __email__ = 'jathan.mccollum@teamaol.com' __copyright__ = 'Copyright 2006-2012, AOL Inc.' # Imports from datetime import datetime, timedelta from pytz import timezone, UTC from trigger.conf import settings from trigger import exceptions # Constants BOUNCE_VALUES = ('green', 'yellow', 'red') BOUNCE_DEFAULT_TZ = timezone(settings.BOUNCE_DEFAULT_TZ) BOUNCE_DEFAULT_COLOR = settings.BOUNCE_DEFAULT_COLOR BOUNCE_VALUE_MAP = { 'red': 3, 'yellow': 2, 'green': 1, } # Exports __all__ = ('BounceStatus', 'BounceWindow', 'bounce') # Classes class BounceStatus(object): """ An object that represents a bounce window risk-level status. + green: Low risk + yellow: Medium risk + red: High risk Objects stringify to 'red', 'green', or 'yellow', and can be compared against those strings. Objects can also be compared against each other. 'red' > 'yellow' > 'green'. >>> green = BounceStatus('green') >>> yellow = BounceStatus('yellow') >>> print green green >>> yellow > green True :param status_name: The colored risk-level status name. """ def __init__(self, status_name): self.status_name = status_name self.value = BOUNCE_VALUES.index(status_name) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.status_name) def __str__(self): return self.status_name def __cmp__(self, other): try: return self.value.__cmp__(other.value) except AttributeError: # Other object is not a BounceStatus; maybe it's a string. return self.value.__cmp__(BounceStatus(other).value) class BounceWindow(object): """ Build a bounce window of 24 `~trigger.changemgmt.BounceStatus` objects. You may either specify your own list of 24 `~trigger.changemgmt.BounceStatus` objects using ``status_by_hour``, or you may omit this argument and specify your 'green', 'yellow', and 'red' risk levels by using hyphenated and comma-separated text strings. You may use digits ("14") or hyphenated ranges ("0-5") and may join these together using a comma (",") with or without spacing separating them. For example "0-5, 14" will be parsed into ``[0, 1, 2, 3, 4, 5, 14]``. The `default` color is used to fill in the gaps between the other colors, so that the total is always 24 in the resultant list status objects. >>> b = BounceWindow(green='0-3, 23', red='10', default='yellow') >>> b.status() <BounceStatus: yellow> >>> b.next_ok('green') datetime.datetime(2012, 12, 5, 4, 0, tzinfo=<UTC>) >>> b.dump() {0: <BounceStatus: green>, 1: <BounceStatus: green>, 2: <BounceStatus: green>, 3: <BounceStatus: green>, 4: <BounceStatus: yellow>, 5: <BounceStatus: yellow>, 6: <BounceStatus: yellow>, 7: <BounceStatus: yellow>, 8: <BounceStatus: yellow>, 9: <BounceStatus: yellow>, 10: <BounceStatus: red>, 11: <BounceStatus: yellow>, 12: <BounceStatus: yellow>, 13: <BounceStatus: yellow>, 14: <BounceStatus: yellow>, 15: <BounceStatus: yellow>, 16: <BounceStatus: yellow>, 17: <BounceStatus: yellow>, 18: <BounceStatus: yellow>, 19: <BounceStatus: yellow>, 20: <BounceStatus: yellow>, 21: <BounceStatus: yellow>, 22: <BounceStatus: yellow>, 23: <BounceStatus: green>} You may modify the global default fallback color by setting :setting:`BOUNCE_DEFAULT_COLOR` in your ``settings.py``. Although the query API is generic and could accomodate any sort of bounce window policy, this constructor knows only about AOL's bounce windows, which operate on "US/Eastern" time (worldwide), always change on hour boundaries, and are the same every day. If that ever changes, only this class will need to be updated. End-users are not expected to create new ``BounceWindow`` objects; instead, use `~trigger.changemgmt.bounce()` or `~trigger.netdevices.NetDevice.bounce` to get an object, then query its methods. :param status_by_hour: (Optional) A list of 24 `~trigger.changemgmt.BounceStatus` objects. :param green: Representative string of hours. :param yellow: Representative string of hours. :param red: Representative string of hours. :param default: The color used to fill in the gaps between other risk levels. """ # Prepopulate these objects to save a little horsepower BOUNCE_STATUS = dict([(n, BounceStatus(n)) for n in BOUNCE_VALUES]) def __init__(self, status_by_hour=None, green=None, yellow=None, red=None, default=BOUNCE_DEFAULT_COLOR): # Parse the hours specified into BounceWindows self._green = green self._yellow = yellow self._red = red self.default = default hours = { 'green': self._parse_hours(green), 'yellow': self._parse_hours(yellow), 'red': self._parse_hours(red), } self.hours = hours self.hour_map = self._map_bounces(self.hours, default=default) # Allow for providing status_by_hour, but don't rely on it if status_by_hour is None: status_by_hour = self.hour_map.values() if not len(status_by_hour) == 24: msg = 'There must be exactly 24 hours defined for this BounceWindow.' raise exceptions.InvalidBounceWindow(msg) # Make sure each status occurs at least once, or next_ok() # might never return. for status in BOUNCE_VALUE_MAP: if status not in status_by_hour: msg = '%s risk-level must be defined!' % status raise exceptions.InvalidBounceWindow(msg) self._status_by_hour = status_by_hour def __repr__(self): return "%s(green=%r, yellow=%r, red=%r, default=%r)" % (self.__class__.__name__, self._green, self._yellow, self._red, self.default) def status(self, when=None): """ Return a `~trigger.changemgmt.BounceStatus` object for the specified time or now. :param when: A ``datetime`` object. """ when_et = (when or datetime.now(tz=UTC)).astimezone(BOUNCE_DEFAULT_TZ) # Return default during weekend moratorium, otherwise look it up. if (when_et.weekday() >= 5 or when_et.weekday() == 0 and when_et.hour < 4 or when_et.weekday() == 4 and when_et.hour >= 12): return BounceStatus(BOUNCE_DEFAULT_COLOR) else: return self._status_by_hour[when_et.hour] def next_ok(self, status, when=None): """ Return the next time at or after the specified time (default now) that it the bounce status will be at equal to or less than the given status. For example, ``next_ok('yellow')`` will return the time that the bounce window becomes 'yellow' or 'green'. Returns UTC time. :param status: The colored risk-level status name. :param when: A ``datetime`` object. """ when = when or datetime.now(tz=UTC) if self.status(when) <= status: return when.astimezone(UTC) when = datetime(when.year, when.month, when.day, when.hour, tzinfo=UTC) when += timedelta(hours=1) while self.status(when) > status: when += timedelta(hours=1) return when def dump(self): """Dump a mapping of hour to status""" return self.hour_map def _get_bounces(self, hours, color): """ Return a list of hours mapped to bounce objects :param hours: A list of integers representing hours :param color: The risk-level color name. """ return zip(hours, [self.BOUNCE_STATUS[color]] * len(hours)) def _map_bounces(self, hdict, default=None): """ Map a dictionary of colors and hours into a dictionary keyed by hour and the appropriate BounceStatus object. :param hdict: Dictionary mapping of hours to status objects. :param default: The default bounce status name. """ if default is None: default = self.default status = [] for color, hours in hdict.iteritems(): status.extend(self._get_bounces(hours, color)) # Fill in missing keys with the default color missing = [i for i in range(24) if i not in dict(status)] if missing: status.extend(self._get_bounces(missing, default)) return dict(status) def _parse_hours(self, hs): """ Parse hour strings into lists of hours. Or if a list of hours is passed in, just return it as is. >>> parse_hours('0-3, 23') [0, 1, 2, 3, 23] parse_hours(range(3)) [0, 1, 2] :param hs: A string representation of hours. """ myhours = [] if hs is None: return myhours # Assume it's a list of integers? if isinstance(hs, list): return hs # Split the pattern by ',' and then trim whitespace, carve hyphenated # ranges out and then return a list of hours. More error-checking # Coming "Soon". blocks = hs.split(',') for block in blocks: # Clean whitespace and split on hyphens parts = block.strip().split('-') parts = [int(p) for p in parts] # make ints if len(parts) == 1: # no hyphen parts.append(parts[0] + 1) elif len(parts) == 2: parts[1] += 1 else: raise RuntimeError("This should not have happened!") # Return the individual hours for i in range(*parts): myhours.append(i) return myhours # Load ``bounce()`` from the location of ``bounce.py`` or provide a dummy that # returns a hard-coded bounce window from .bounce import bounce
from direct.distributed import DoHierarchy import re #hack: BAD_DO_ID = BAD_ZONE_ID = 0 # 0xFFFFFFFF BAD_CHANNEL_ID = 0 # 0xFFFFFFFFFFFFFFFF class DoCollectionManager: def __init__(self): # Dict of {DistributedObject ids: DistributedObjects} self.doId2do = {} # (parentId, zoneId) to dict of doId->DistributedObjectAI ## self.zoneId2doIds={} if self.hasOwnerView(): # Dict of {DistributedObject ids: DistributedObjects} # for 'owner' views of objects self.doId2ownerView = {} # Dict of { # parent DistributedObject id: # { zoneIds: [child DistributedObject ids] }} self._doHierarchy = DoHierarchy.DoHierarchy() def getDo(self, doId): return self.doId2do.get(doId) def getGameDoId(self): return self.GameGlobalsId def callbackWithDo(self, doId, callback): do = self.doId2do.get(doId) if do is not None: callback(do) else: relatedObjectMgr(doId, allCallback=callback) def getOwnerView(self, doId): assert self.hasOwnerView() return self.doId2ownerView.get(doId) def callbackWithOwnerView(self, doId, callback): assert self.hasOwnerView() do = self.doId2ownerView.get(doId) if do is not None: callback(do) else: pass #relatedObjectMgr(doId, allCallback=callback) def getDoTable(self, ownerView): if ownerView: assert self.hasOwnerView() return self.doId2ownerView else: return self.doId2do def doFind(self, str): """ Returns list of distributed objects with matching str in value. """ for value in self.doId2do.values(): if repr(value).find(str) >= 0: return value def doFindAll(self, str): """ Returns list of distributed objects with matching str in value. """ matches = [] for value in self.doId2do.values(): if repr(value).find(str) >= 0: matches.append(value) return matches def doFindAllMatching(self, str): """ Returns list of distributed objects with matching str in value. """ matches = [] for value in self.doId2do.values(): if re.search(str,repr(value)): matches.append(value) return matches def doFindAllOfType(self, query): """ Useful method for searching through the Distributed Object collection for objects of a particular type """ matches = [] for value in self.doId2do.values(): if query in str(value.__class__): matches.append(value) return matches, len(matches) def doFindAllInstances(self, cls): matches = [] for value in self.doId2do.values(): if isinstance(value, cls): matches.append(value) return matches def _getDistanceFromLA(self, do): if hasattr(do, 'getPos'): return do.getPos(localAvatar).length() return None def _compareDistance(self, do1, do2): dist1 = self._getDistanceFromLA(do1) dist2 = self._getDistanceFromLA(do2) if dist1 is None and dist2 is None: return 0 if dist1 is None: return 1 if dist2 is None: return -1 if (dist1 < dist2): return -1 return 1 def dosByDistance(self): objs = list(self.doId2do.values()) objs.sort(cmp=self._compareDistance) return objs def doByDistance(self): objs = self.dosByDistance() for obj in objs: print('%s\t%s\t%s' % (obj.doId, self._getDistanceFromLA(obj), obj.dclass.getName())) if __debug__: def printObjects(self): format="%10s %10s %10s %30s %20s" title=format%("parentId", "zoneId", "doId", "dclass", "name") print(title) print('-'*len(title)) for distObj in self.doId2do.values(): print(format%( distObj.__dict__.get("parentId"), distObj.__dict__.get("zoneId"), distObj.__dict__.get("doId"), distObj.dclass.getName(), distObj.__dict__.get("name"))) def _printObjects(self, table): class2count = {} for obj in self.getDoTable(ownerView=False).values(): className = obj.__class__.__name__ class2count.setdefault(className, 0) class2count[className] += 1 count2classes = invertDictLossless(class2count) counts = list(count2classes.keys()) counts.sort() counts.reverse() for count in counts: count2classes[count].sort() for name in count2classes[count]: print('%s %s' % (count, name)) print('') def _returnObjects(self, table): class2count = {} stringToReturn = '' for obj in self.getDoTable(ownerView=False).values(): className = obj.__class__.__name__ class2count.setdefault(className, 0) class2count[className] += 1 count2classes = invertDictLossless(class2count) counts = list(count2classes.keys()) counts.sort() counts.reverse() for count in counts: count2classes[count].sort() for name in count2classes[count]: # print '%s %s' % (count, name) stringToReturn = '%s%s %s\n' % (stringToReturn, count, name) # print '' return stringToReturn def webPrintObjectCount(self): strToReturn = '==== OBJECT COUNT ====\n' if self.hasOwnerView(): strToReturn = '%s == doId2do\n' % (strToReturn) strToReturn = '%s%s' % (strToReturn, self._returnObjects(self.getDoTable(ownerView=False))) if self.hasOwnerView(): strToReturn = '%s\n== doId2ownerView\n' % (strToReturn) strToReturn = '%s%s' % (strToReturn, self._returnObjects(self.getDoTable(ownerView=False))) return strToReturn def printObjectCount(self): # print object counts by distributed object type print('==== OBJECT COUNT ====') if self.hasOwnerView(): print('== doId2do') self._printObjects(self.getDoTable(ownerView=False)) if self.hasOwnerView(): print('== doId2ownerView') self._printObjects(self.getDoTable(ownerView=True)) def getDoList(self, parentId, zoneId=None, classType=None): """ parentId is any distributed object id. zoneId is a uint32, defaults to None (all zones). Try zone 2 if you're not sure which zone to use (0 is a bad/null zone and 1 has had reserved use in the past as a no messages zone, while 2 has traditionally been a global, uber, misc stuff zone). dclassType is a distributed class type filter, defaults to None (no filter). If dclassName is None then all objects in the zone are returned; otherwise the list is filtered to only include objects of that type. """ return [self.doId2do.get(i) for i in self.getDoIdList(parentId, zoneId, classType)] def getDoIdList(self, parentId, zoneId=None, classType=None): return self._doHierarchy.getDoIds(self.getDo, parentId, zoneId, classType) def hasOwnerViewDoId(self, doId): assert self.hasOwnerView() return doId in self.doId2ownerView def getOwnerViewDoList(self, classType): assert self.hasOwnerView() l = [] for obj in self.doId2ownerView.values(): if isinstance(obj, classType): l.append(obj) return l def getOwnerViewDoIdList(self, classType): assert self.hasOwnerView() l = [] for doId, obj in self.doId2ownerView.items(): if isinstance(obj, classType): l.append(doId) return l def countObjects(self, classType): """ Counts the number of objects of the given type in the repository (for testing purposes) """ count = 0 for dobj in self.doId2do.values(): if isinstance(dobj, classType): count += 1 return count def getAllOfType(self, type): # Returns a list of all DistributedObjects in the repository # of a particular type. result = [] for obj in self.doId2do.values(): if isinstance(obj, type): result.append(obj) return result def findAnyOfType(self, type): # Searches the repository for any object of the given type. for obj in self.doId2do.values(): if isinstance(obj, type): return obj return None #---------------------------------- def deleteDistributedObjects(self): # Get rid of all the distributed objects for doId in self.doId2do.keys(): # Look up the object do = self.doId2do[doId] self.deleteDistObject(do) # Get rid of everything that manages distributed objects self.deleteObjects() # the zoneId2doIds table should be empty now if not self._doHierarchy.isEmpty(): self.notify.warning( '_doHierarchy table not empty: %s' % self._doHierarchy) self._doHierarchy.clear() def handleObjectLocation(self, di): # CLIENT_OBJECT_LOCATION doId = di.getUint32() parentId = di.getUint32() zoneId = di.getUint32() obj = self.doId2do.get(doId) if obj is not None: self.notify.debug( "handleObjectLocation: doId: %s parentId: %s zoneId: %s"% (doId, parentId, zoneId)) # Let the object finish the job # calls storeObjectLocation() obj.setLocation(parentId, zoneId) else: self.notify.warning( "handleObjectLocation: Asked to update non-existent obj: %s" % (doId)) def handleSetLocation(self, di): # This was initially added because creating a distributed quest # object would cause a message like this to be generated. assert self.notify.debugStateCall(self) parentId = di.getUint32() zoneId = di.getUint32() distObj = self.doId2do.get(self.getMsgChannel()) if distObj is not None: distObj.setLocation(parentId, zoneId) else: self.notify.warning('handleSetLocation: object %s not present' % self.getMsgChannel()) @exceptionLogged() def storeObjectLocation(self, object, parentId, zoneId): oldParentId = object.parentId oldZoneId = object.zoneId if (oldParentId != parentId): # notify any existing parent that we're moving away oldParentObj = self.doId2do.get(oldParentId) if oldParentObj is not None: oldParentObj.handleChildLeave(object, oldZoneId) self.deleteObjectLocation(object, oldParentId, oldZoneId) elif (oldZoneId != zoneId): # Remove old location oldParentObj = self.doId2do.get(oldParentId) if oldParentObj is not None: oldParentObj.handleChildLeaveZone(object, oldZoneId) self.deleteObjectLocation(object, oldParentId, oldZoneId) else: # object is already at that parent and zone return if ((parentId is None) or (zoneId is None) or (parentId == zoneId == 0)): # Do not store null values object.parentId = None object.zoneId = None else: # Add to new location self._doHierarchy.storeObjectLocation(object, parentId, zoneId) # this check doesn't work because of global UD objects; # should they have a location? #assert len(self._doHierarchy) == len(self.doId2do) # Set the new parent and zone on the object object.parentId = parentId object.zoneId = zoneId if oldParentId != parentId: # Give the parent a chance to run code when a new child # sets location to it. For example, the parent may want to # scene graph reparent the child to some subnode it owns. parentObj = self.doId2do.get(parentId) if parentObj is not None: parentObj.handleChildArrive(object, zoneId) elif parentId not in (None, 0, self.getGameDoId()): self.notify.warning('storeObjectLocation(%s): parent %s not present' % (object.doId, parentId)) elif oldZoneId != zoneId: parentObj = self.doId2do.get(parentId) if parentObj is not None: parentObj.handleChildArriveZone(object, zoneId) elif parentId not in (None, 0, self.getGameDoId()): self.notify.warning('storeObjectLocation(%s): parent %s not present' % (object.doId, parentId)) def deleteObjectLocation(self, object, parentId, zoneId): # Do not worry about null values if ((parentId is None) or (zoneId is None) or (parentId == zoneId == 0)): return self._doHierarchy.deleteObjectLocation(object, parentId, zoneId) def addDOToTables(self, do, location=None, ownerView=False): assert self.notify.debugStateCall(self) #assert not hasattr(do, "isQueryAllResponse") or not do.isQueryAllResponse if not ownerView: if location is None: location = (do.parentId, do.zoneId) doTable = self.getDoTable(ownerView) # make sure the object is not already present if do.doId in doTable: if ownerView: tableName = 'doId2ownerView' else: tableName = 'doId2do' self.notify.error('doId %s already in %s [%s stomping %s]' % ( do.doId, tableName, do.__class__.__name__, doTable[do.doId].__class__.__name__)) doTable[do.doId]=do if not ownerView: if self.isValidLocationTuple(location): self.storeObjectLocation(do, location[0], location[1]) ##assert do.doId not in self.zoneId2doIds.get(location, {}) ##self.zoneId2doIds.setdefault(location, {}) ##self.zoneId2doIds[location][do.doId]=do def isValidLocationTuple(self, location): return (location is not None and location != (0xffffffff, 0xffffffff) and location != (0, 0)) if __debug__: def isInDoTables(self, doId): assert self.notify.debugStateCall(self) return doId in self.doId2do def removeDOFromTables(self, do): assert self.notify.debugStateCall(self) #assert not hasattr(do, "isQueryAllResponse") or not do.isQueryAllResponse #assert do.doId in self.doId2do location = do.getLocation() if location: oldParentId, oldZoneId = location oldParentObj = self.doId2do.get(oldParentId) if oldParentObj: oldParentObj.handleChildLeave(do, oldZoneId) self.deleteObjectLocation(do, do.parentId, do.zoneId) ## location = do.getLocation() ## if location is not None: ## if location not in self.zoneId2doIds: ## self.notify.warning( ## 'dobj %s (%s) has invalid location: %s' % ## (do, do.doId, location)) ## else: ## assert do.doId in self.zoneId2doIds[location] ## del self.zoneId2doIds[location][do.doId] ## if len(self.zoneId2doIds[location]) == 0: ## del self.zoneId2doIds[location] if do.doId in self.doId2do: del self.doId2do[do.doId] ## def changeDOZoneInTables(self, do, newParentId, newZoneId, oldParentId, oldZoneId): ## if 1: ## self.storeObjectLocation(do.doId, newParentId, newZoneId) ## else: ## #assert not hasattr(do, "isQueryAllResponse") or not do.isQueryAllResponse ## oldLocation = (oldParentId, oldZoneId) ## newLocation = (newParentId, newZoneId) ## # HACK: DistributedGuildMemberUD starts in -1, -1, which isnt ever put in the ## # zoneId2doIds table ## if self.isValidLocationTuple(oldLocation): ## assert self.notify.debugStateCall(self) ## assert oldLocation in self.zoneId2doIds ## assert do.doId in self.zoneId2doIds[oldLocation] ## assert do.doId not in self.zoneId2doIds.get(newLocation, {}) ## # remove from old zone ## del(self.zoneId2doIds[oldLocation][do.doId]) ## if len(self.zoneId2doIds[oldLocation]) == 0: ## del self.zoneId2doIds[oldLocation] ## if self.isValidLocationTuple(newLocation): ## # add to new zone ## self.zoneId2doIds.setdefault(newLocation, {}) ## self.zoneId2doIds[newLocation][do.doId]=do def getObjectsInZone(self, parentId, zoneId): """ returns dict of doId:distObj for a zone. returned dict is safely mutable. """ assert self.notify.debugStateCall(self) doDict = {} for doId in self.getDoIdList(parentId, zoneId): doDict[doId] = self.getDo(doId) return doDict def getObjectsOfClassInZone(self, parentId, zoneId, objClass): """ returns dict of doId:object for a zone, containing all objects that inherit from 'class'. returned dict is safely mutable. """ assert self.notify.debugStateCall(self) doDict = {} for doId in self.getDoIdList(parentId, zoneId, objClass): doDict[doId] = self.getDo(doId) return doDict
from functools import partial from typing import Any, List, Optional, Type, Union from torchvision.prototype.transforms import ImageClassificationEval from torchvision.transforms.functional import InterpolationMode from ....models.quantization.resnet import ( QuantizableBasicBlock, QuantizableBottleneck, QuantizableResNet, _replace_relu, quantize_model, ) from .._api import WeightsEnum, Weights from .._meta import _IMAGENET_CATEGORIES from .._utils import handle_legacy_interface, _ovewrite_named_param from ..resnet import ResNet18_Weights, ResNet50_Weights, ResNeXt101_32X8D_Weights __all__ = [ "QuantizableResNet", "ResNet18_QuantizedWeights", "ResNet50_QuantizedWeights", "ResNeXt101_32X8D_QuantizedWeights", "resnet18", "resnet50", "resnext101_32x8d", ] def _resnet( block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]], layers: List[int], weights: Optional[WeightsEnum], progress: bool, quantize: bool, **kwargs: Any, ) -> QuantizableResNet: if weights is not None: _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) if "backend" in weights.meta: _ovewrite_named_param(kwargs, "backend", weights.meta["backend"]) backend = kwargs.pop("backend", "fbgemm") model = QuantizableResNet(block, layers, **kwargs) _replace_relu(model) if quantize: quantize_model(model, backend) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { "task": "image_classification", "size": (224, 224), "min_size": (1, 1), "categories": _IMAGENET_CATEGORIES, "interpolation": InterpolationMode.BILINEAR, "backend": "fbgemm", "quantization": "ptq", "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models", } class ResNet18_QuantizedWeights(WeightsEnum): IMAGENET1K_FBGEMM_V1 = Weights( url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth", transforms=partial(ImageClassificationEval, crop_size=224), meta={ **_COMMON_META, "architecture": "ResNet", "publication_year": 2015, "num_params": 11689512, "unquantized": ResNet18_Weights.IMAGENET1K_V1, "acc@1": 69.494, "acc@5": 88.882, }, ) DEFAULT = IMAGENET1K_FBGEMM_V1 class ResNet50_QuantizedWeights(WeightsEnum): IMAGENET1K_FBGEMM_V1 = Weights( url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth", transforms=partial(ImageClassificationEval, crop_size=224), meta={ **_COMMON_META, "architecture": "ResNet", "publication_year": 2015, "num_params": 25557032, "unquantized": ResNet50_Weights.IMAGENET1K_V1, "acc@1": 75.920, "acc@5": 92.814, }, ) IMAGENET1K_FBGEMM_V2 = Weights( url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth", transforms=partial(ImageClassificationEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, "architecture": "ResNet", "publication_year": 2015, "num_params": 25557032, "unquantized": ResNet50_Weights.IMAGENET1K_V2, "acc@1": 80.282, "acc@5": 94.976, }, ) DEFAULT = IMAGENET1K_FBGEMM_V2 class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum): IMAGENET1K_FBGEMM_V1 = Weights( url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth", transforms=partial(ImageClassificationEval, crop_size=224), meta={ **_COMMON_META, "architecture": "ResNeXt", "publication_year": 2016, "num_params": 88791336, "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1, "acc@1": 78.986, "acc@5": 94.480, }, ) IMAGENET1K_FBGEMM_V2 = Weights( url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth", transforms=partial(ImageClassificationEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, "architecture": "ResNeXt", "publication_year": 2016, "num_params": 88791336, "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2, "acc@1": 82.574, "acc@5": 96.132, }, ) DEFAULT = IMAGENET1K_FBGEMM_V2 @handle_legacy_interface( weights=( "pretrained", lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1 if kwargs.get("quantize", False) else ResNet18_Weights.IMAGENET1K_V1, ) ) def resnet18( *, weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None, progress: bool = True, quantize: bool = False, **kwargs: Any, ) -> QuantizableResNet: weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights) return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs) @handle_legacy_interface( weights=( "pretrained", lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1 if kwargs.get("quantize", False) else ResNet50_Weights.IMAGENET1K_V1, ) ) def resnet50( *, weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None, progress: bool = True, quantize: bool = False, **kwargs: Any, ) -> QuantizableResNet: weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights) return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs) @handle_legacy_interface( weights=( "pretrained", lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1 if kwargs.get("quantize", False) else ResNeXt101_32X8D_Weights.IMAGENET1K_V1, ) ) def resnext101_32x8d( *, weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None, progress: bool = True, quantize: bool = False, **kwargs: Any, ) -> QuantizableResNet: weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights) _ovewrite_named_param(kwargs, "groups", 32) _ovewrite_named_param(kwargs, "width_per_group", 8) return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
from simulatorutils import * from extra.schedule import Schedule from extra.printer import pprint, YELLOW, RED, GREEN, CYAN class Simulator(object): def __init__(self, plant): self.plant = plant self.graph = [] self.createGraph() self.printing = False self.numOfOrdersFinished = 0 self.inSchedule = None self.delay = 0 def createGraph(self): """ Creates a list with the sequence of the machines in the plant and traverses that connect a machine to a next one. Each machine consists of a list of MachineNode. Each traverse consists of a single TraverseNode The graph(list)looks like(where M is a MachineNode and T is a TraverseNode): M M -> T -> M -> T -> M M M """ for m in self.plant.machines: mList = [] for q in range(m.quantity): mList.append(MachineNode(m)) self.graph.append(mList) if m != self.plant.machines[-1]: self.graph.append(TraverseNode()) def machineIndexInGraph(self, machineName): """ Helper methods that returns the index of a machine with machineName in the graph. """ for i, m in enumerate(self.graph): if type(m) == list and m[0].machine.name == machineName: return i return None def minTimeFinish(self, machineNodeList): """ Helper method that returns the longest remaining time of orders that are currently being processed in the machine. It is used for the capacity constraint. """ max = -1 for m in machineNodeList: if m.currentOrder != None: if m.currentOrder[1] > max: max = m.currentOrder[1] return max def checkMachineNodeFinishTime(self,nodeIndex, node, t): """ Checks if a machine finished processing an order """ # m represents each basin in the machine for m in node: # if there is an order in the basin if m.currentOrder != None: # if the remaining processing time > 0 if m.currentOrder[1] != 0: # decrement the remaining processing time by 1 m.currentOrder[1] -= 1 # the order finished processing on this machine else: # check if this machine was the last one if node == self.graph[-1]: pprint("SIM %10s %20s at time %5s." % (m.currentOrder[0], "finished", t), GREEN, self.printing) self.inSchedule.finishTimes.append([m.currentOrder[0], t]) self.numOfOrdersFinished += 1 # the machine is not the last one else: self.graph[nodeIndex + 1].orders.append( [m.currentOrder[0], self.plant.craneMoveTime]) pprint("SIM %10s %20s %15s at time %5s." % (m.currentOrder[0], "left", m.machine, t), YELLOW, self.printing) # remove order from the machine currentOrder m.currentOrder = None def checkTraverseNodeFinishTime(self, nodeIndex, node, t): """ Checks if a traverse finished moving an order, and check next machine's constraints """ for j, o in enumerate(node.orders): if o == None: continue if o[1] > 0: o[1] -= 1 else: machine = self.graph[nodeIndex + 1][0].machine # check if there is no break for this machine at: # * The enter time of the order in the machine # * The processing time of the order if it enters now if [z in machine.setOfBreaks() for z in range(t, t + o[0].recipe[machine.name])].count(True) == 0: # check if one of the next machine basins for m in self.graph[nodeIndex + 1]: # check if one of the basins is empty if m.currentOrder == None: # check if orders in the machine is not important if m.machine.precedence == False: m.currentOrder = [o[0], o[0].recipe[m.machine.name]] pprint("SIM %10s %20s %15s at time %5s." % (o[0], "entered", m.machine, t), YELLOW, self.printing) # check if orders in the machine must be reserved (Drier) else: # set the minimum finish time the order can finish the # next machine time = max(self.minTimeFinish(self.graph[nodeIndex + 1]), o[0].recipe[m.machine.name]) m.currentOrder = [o[0], time] # if there is a delay if time != o[0].recipe[m.machine.name]: pprint("SIM %10s %20s %15s at time %5s with overtime %5s." % (o[0], "entered", m.machine, t, time - o[0].recipe[m.machine.name]), RED, self.printing) self.delay += time - o[0].recipe[m.machine.name] else: pprint("SIM %10s %20s %15s at time %5s." % (o[0], "entered", m.machine, t), YELLOW, self.printing) self.inSchedule.schedule.append( [m.currentOrder[0], str(m.machine.name), t]) if o[1] < 0: pprint("SIM %10s before %15s was delayed %5s." % (o[0], m.machine, o[1]), RED, self.printing) self.delay += 1 node.orders[j] = None break # If there is a break time for the machine, and the order cannot # enter the machine for processing else: pprint("SIM %10s %20s %15s at time %5s because of machine break." % (o[0], "could not enter", machine, t), RED, self.printing) self.delay += 1 def checkScheduleEnterTimes(self, schedule, t): """ Checks if an order's entering time at first machine """ # loop over the enter times to set the first machine of each order for i, s in enumerate(schedule): if s == None: continue # checks if enter time at first machine passed if s[2] <= t: entered = False currentMachineIndex = 0 # check if order was already in the pant currentMachine = s[0].currentMachine if currentMachine != "": currentMachineIndex = self.machineIndexInGraph(currentMachine) machine = self.graph[currentMachineIndex][0].machine # check if there is no break for this machine at: # * The enter time of the order # * The processing time of the order in the machine if [z in machine.setOfBreaks() for z in range(t, t + s[0].recipe[machine.name])].count(True) == 0: # loop over the basins of the machine for node in self.graph[currentMachineIndex]: # check if the basin is empty if node.currentOrder == None: # check if orders in the machine is not important if node.machine.precedence == False: # assign current order of this basin with the processing time node.currentOrder = [s[0], s[0].recipe[node.machine.name]] pprint("SIM %10s %20s %15s at time %5s." % (node.currentOrder[0], "entered", node.machine, t), YELLOW, self.printing) self.inSchedule.schedule.append( [node.currentOrder[0], str(node.machine.name), t]) schedule[i] = None entered = True break # check if orders in the machine must be reserved (Drier) else: # set the minimum finish time the order can finish the # next machine time = max(self.minTimeFinish(self.graph[currentMachineIndex]), s[0].recipe[node.machine.name]) node.currentOrder = [s[0], time] # if there is a delay if time != s[0].recipe[node.machine.name]: pprint("SIM %10s %20s %15s at time %5s with overtime %5s." % (s[0], "entered", node.machine, t, time - s[0].recipe[node.machine.name]), RED, self.printing) self.delay += time - s[0].recipe[node.machine.name] else: pprint("SIM %10s %20s %15s at time %5s." % (s[0], "entered", node.machine, t), YELLOW, self.printing) self.inSchedule.schedule.append( [node.currentOrder[0], str(node.machine.name), t]) # remove enter time from the schedule list schedule[i] = None entered = True break # order will be delayed in case the machine is busy if entered == False: pprint("SIM %10s %20s %15s at time %5s." % (node.currentOrder[0], "could not enter", node.machine, t), RED, self.printing) s[2] += 1 self.delay += 1 else: pprint("SIM %10s %20s %15s at time %5s because of machine break." % (s[0], "could not enter", machine, t), RED, self.printing) s[2] += 1 self.delay += 1 def simulate(self, inschedule): """ Simulates the schedule and checks delays caused by machine quantities, capacities constraints or breaks. inSchedule is the set of enter times of each order where each item is a tuple of (Order, Machine, RemainingProcessingTime). """ pprint("SIM Starting new simulation...", CYAN, self.printing) self.inSchedule = inschedule; assert type(self.inSchedule) == Schedule assert len(self.inSchedule.schedule) == 0 assert len(self.inSchedule.finishTimes) == 0 schedule = self.inSchedule.startTimes[:] # sort schedule by enter times schedule.sort(lambda a, b: cmp(a[2], b[2])) # t = first enter time in schedule t = schedule[0][2] self.delay = 0 self.numOfOrdersFinished = 0 numOfTotalOrders = len(schedule) schedule.sort(lambda a, b: cmp(b[0].currentMachine, a[0].currentMachine)) # while all orders finish processing while self.numOfOrdersFinished < numOfTotalOrders: self.checkScheduleEnterTimes(schedule, t) # loop on all machines to check if any order finished processing time for nodeIndex, node in enumerate(self.graph): # check if the node is a MachineNode not a TraverseNode if type(node) == list: self.checkMachineNodeFinishTime(nodeIndex, node, t) # if the node is a TraverseNode not MachineNode to check next machine #quantity and capacity, and if there is a break else: self.checkTraverseNodeFinishTime(nodeIndex, node, t) t += 1 pprint("SIM --------------------------------------------------------------", CYAN, self.printing)
"""Flask extension of Flask-Principal for handling authentication/authorization. """ from flask import session, current_app from flask_principal import ( Principal, identity_loaded, identity_changed, Identity, AnonymousIdentity, Permission, RoleNeed, TypeNeed ) # pylint: disable=invalid-name login_need = TypeNeed('login') # pylint: enable=invalid-name class SQLAlchemyAuthProvider(object): """SQLAlchemy backed UserProvider class which provides identity information for logged in user. """ # SQLAlchemy model used to query the database for identity information __model__ = None __id_key__ = 'id' __roles_key__ = 'roles' def __init__(self, db_session): self.session = db_session def identify(self, identity): """Identify a user via _id to provide information for role based authentication. """ ident = {} user = self.get_user(identity.id) if user: ident.update({ self.__id_key__: identity.id, self.__roles_key__: self.get_roles(user) }) return ident ## # Override these methods if they don't return user data correctly based on # your models. ## def get_user(self, _id): """Return user object from database.""" if _id is None: return None return self.session.query(self.__model__).get(_id) def get_roles(self, user): """Return a list of `roles` as strings.""" return getattr(user, self.__roles_key__, []) class Auth(object): """Auth extension.""" _extension_name = 'carafe.auth' def __init__(self, app=None, provider=None): self.principal = Principal(use_sessions=False) self.require = PermissionFactory() self.app = app if self.app: # pragma: no cover self.init_app(app, provider) def init_app(self, app, provider=None): """Initialize app.""" app.config.setdefault('CARAFE_AUTH_ENABLED', True) app.config.setdefault('CARAFE_AUTH_SESSION_ID_KEY', 'user_id') app.config.setdefault('CARAFE_AUTH_IDENTITY_ID_KEY', 'id') app.config.setdefault('CARAFE_AUTH_IDENTITY_ROLES_KEY', 'roles') if not app.config['CARAFE_AUTH_ENABLED']: # pragma: no cover return if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} app.extensions[self._extension_name] = {'provider': provider} # NOTE: Instead of having principal use it's session loader, we'll use # ours. self.principal.init_app(app) self.principal.identity_loader(self.session_identity_loader) identity_loaded.connect_via(app)(self.on_identity_loaded) @property def session_id_key(self): """Property access to config's CARAFE_AUTH_SESSION_ID_KEY.""" return current_app.config['CARAFE_AUTH_SESSION_ID_KEY'] @property def identity_id_key(self): """Property access to config's CARAFE_AUTH_IDENTITY_ID_KEY.""" return current_app.config['CARAFE_AUTH_IDENTITY_ID_KEY'] @property def identity_roles_key(self): """Property access to config's CARAFE_AUTH_IDENTITY_ROLES_KEY.""" return current_app.config['CARAFE_AUTH_IDENTITY_ROLES_KEY'] @property def user_id(self): """Property access to logged in user id.""" return session.get(self.session_id_key) @property def provider(self): """Property access to auth provider instance.""" return current_app.extensions[self._extension_name]['provider'] def session_identity_loader(self): """Fetch user id from session using config's auth id key""" if self.session_id_key in session: identity = Identity(session[self.session_id_key]) else: identity = None return identity def on_identity_loaded(self, app, identity): # pylint: disable=unused-argument """Called if session_identity_loader() returns an identity (i.e. not None). """ # Whatever is returned is used for our identity. Potentially, provider # may return a different user than original identity (e.g. app provides # way for admin users to access site using a different user account) if self.provider: ident = self.provider.identify(identity) else: ident = {self.identity_id_key: None} if self.user_id and not ident: # session has an user_id but the ident return is empty # user possibly deleted or inactivated in another process self.logout() # provide auth (whether user is not anonymous) if ident.get(self.identity_id_key): identity.provides.add(login_need) # provide roles for role in ident.get(self.identity_roles_key, []): identity.provides.add(RoleNeed(role)) def send_identity_changed(self, user_id): """Send identity changed event.""" if user_id is None: identity = AnonymousIdentity() else: identity = Identity(user_id) identity_changed.send( current_app._get_current_object(), identity=identity) def login(self, user_id, propagate=True): """Call after user has been authenticated for login.""" if session.get(self.session_id_key) != user_id: session[self.session_id_key] = user_id if propagate: self.send_identity_changed(user_id) def logout(self, propagate=True): """Call to log user out.""" if session.get(self.session_id_key): del session[self.session_id_key] if propagate: self.send_identity_changed(None) class PermissionFactory(object): """General purpose permissions factory which creates RoleNeed permissions from attribute access. """ def __init__(self): self._permissions = { 'login': Permission(login_need) } def __getattr__(self, role): """Return role permission's require method. If it doesn't exist yet, create it.""" if role not in self._permissions: self._permissions[role] = Permission(RoleNeed(role)) return self._permissions[role].require
import glob import os import shutil import sys import unittest from test.support import (TESTFN, skip_unless_symlink, can_symlink, create_empty_file, change_cwd) class GlobTests(unittest.TestCase): def norm(self, *parts): return os.path.normpath(os.path.join(self.tempdir, *parts)) def joins(self, *tuples): return [os.path.join(self.tempdir, *parts) for parts in tuples] def mktemp(self, *parts): filename = self.norm(*parts) base, file = os.path.split(filename) if not os.path.exists(base): os.makedirs(base) create_empty_file(filename) def setUp(self): self.tempdir = TESTFN + "_dir" self.mktemp('a', 'D') self.mktemp('aab', 'F') self.mktemp('.aa', 'G') self.mktemp('.bb', 'H') self.mktemp('aaa', 'zzzF') self.mktemp('ZZZ') self.mktemp('EF') self.mktemp('a', 'bcd', 'EF') self.mktemp('a', 'bcd', 'efg', 'ha') if can_symlink(): os.symlink(self.norm('broken'), self.norm('sym1')) os.symlink('broken', self.norm('sym2')) os.symlink(os.path.join('a', 'bcd'), self.norm('sym3')) def tearDown(self): shutil.rmtree(self.tempdir) def glob(self, *parts, **kwargs): if len(parts) == 1: pattern = parts[0] else: pattern = os.path.join(*parts) p = os.path.join(self.tempdir, pattern) res = glob.glob(p, **kwargs) self.assertCountEqual(glob.iglob(p, **kwargs), res) bres = [os.fsencode(x) for x in res] self.assertCountEqual(glob.glob(os.fsencode(p), **kwargs), bres) self.assertCountEqual(glob.iglob(os.fsencode(p), **kwargs), bres) return res def assertSequencesEqual_noorder(self, l1, l2): l1 = list(l1) l2 = list(l2) self.assertEqual(set(l1), set(l2)) self.assertEqual(sorted(l1), sorted(l2)) def test_glob_literal(self): eq = self.assertSequencesEqual_noorder eq(self.glob('a'), [self.norm('a')]) eq(self.glob('a', 'D'), [self.norm('a', 'D')]) eq(self.glob('aab'), [self.norm('aab')]) eq(self.glob('zymurgy'), []) res = glob.glob('*') self.assertEqual({type(r) for r in res}, {str}) res = glob.glob(os.path.join(os.curdir, '*')) self.assertEqual({type(r) for r in res}, {str}) res = glob.glob(b'*') self.assertEqual({type(r) for r in res}, {bytes}) res = glob.glob(os.path.join(os.fsencode(os.curdir), b'*')) self.assertEqual({type(r) for r in res}, {bytes}) def test_glob_one_directory(self): eq = self.assertSequencesEqual_noorder eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa'])) eq(self.glob('*a'), map(self.norm, ['a', 'aaa'])) eq(self.glob('.*'), map(self.norm, ['.aa', '.bb'])) eq(self.glob('?aa'), map(self.norm, ['aaa'])) eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab'])) eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab'])) eq(self.glob('*q'), []) def test_glob_nested_directory(self): eq = self.assertSequencesEqual_noorder if os.path.normcase("abCD") == "abCD": # case-sensitive filesystem eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')]) else: # case insensitive filesystem eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'), self.norm('a', 'bcd', 'efg')]) eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')]) def test_glob_directory_names(self): eq = self.assertSequencesEqual_noorder eq(self.glob('*', 'D'), [self.norm('a', 'D')]) eq(self.glob('*', '*a'), []) eq(self.glob('a', '*', '*', '*a'), [self.norm('a', 'bcd', 'efg', 'ha')]) eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'), self.norm('aab', 'F')]) def test_glob_directory_with_trailing_slash(self): # Patterns ending with a slash shouldn't match non-dirs res = glob.glob(self.norm('Z*Z') + os.sep) self.assertEqual(res, []) res = glob.glob(self.norm('ZZZ') + os.sep) self.assertEqual(res, []) # When there is a wildcard pattern which ends with os.sep, glob() # doesn't blow up. res = glob.glob(self.norm('aa*') + os.sep) self.assertEqual(len(res), 2) # either of these results is reasonable self.assertIn(set(res), [ {self.norm('aaa'), self.norm('aab')}, {self.norm('aaa') + os.sep, self.norm('aab') + os.sep}, ]) def test_glob_bytes_directory_with_trailing_slash(self): # Same as test_glob_directory_with_trailing_slash, but with a # bytes argument. res = glob.glob(os.fsencode(self.norm('Z*Z') + os.sep)) self.assertEqual(res, []) res = glob.glob(os.fsencode(self.norm('ZZZ') + os.sep)) self.assertEqual(res, []) res = glob.glob(os.fsencode(self.norm('aa*') + os.sep)) self.assertEqual(len(res), 2) # either of these results is reasonable self.assertIn(set(res), [ {os.fsencode(self.norm('aaa')), os.fsencode(self.norm('aab'))}, {os.fsencode(self.norm('aaa') + os.sep), os.fsencode(self.norm('aab') + os.sep)}, ]) @skip_unless_symlink def test_glob_symlinks(self): eq = self.assertSequencesEqual_noorder eq(self.glob('sym3'), [self.norm('sym3')]) eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'), self.norm('sym3', 'efg')]) self.assertIn(self.glob('sym3' + os.sep), [[self.norm('sym3')], [self.norm('sym3') + os.sep]]) eq(self.glob('*', '*F'), [self.norm('aaa', 'zzzF'), self.norm('aab', 'F'), self.norm('sym3', 'EF')]) @skip_unless_symlink def test_glob_broken_symlinks(self): eq = self.assertSequencesEqual_noorder eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'), self.norm('sym3')]) eq(self.glob('sym1'), [self.norm('sym1')]) eq(self.glob('sym2'), [self.norm('sym2')]) @unittest.skipUnless(sys.platform == "win32", "Win32 specific test") def test_glob_magic_in_drive(self): eq = self.assertSequencesEqual_noorder eq(glob.glob('*:'), []) eq(glob.glob(b'*:'), []) eq(glob.glob('?:'), []) eq(glob.glob(b'?:'), []) eq(glob.glob('\\\\?\\c:\\'), ['\\\\?\\c:\\']) eq(glob.glob(b'\\\\?\\c:\\'), [b'\\\\?\\c:\\']) eq(glob.glob('\\\\*\\*\\'), []) eq(glob.glob(b'\\\\*\\*\\'), []) def check_escape(self, arg, expected): self.assertEqual(glob.escape(arg), expected) self.assertEqual(glob.escape(os.fsencode(arg)), os.fsencode(expected)) def test_escape(self): check = self.check_escape check('abc', 'abc') check('[', '[[]') check('?', '[?]') check('*', '[*]') check('[[_/*?*/_]]', '[[][[]_/[*][?][*]/_]]') check('/[[_/*?*/_]]/', '/[[][[]_/[*][?][*]/_]]/') @unittest.skipUnless(sys.platform == "win32", "Win32 specific test") def test_escape_windows(self): check = self.check_escape check('?:?', '?:[?]') check('*:*', '*:[*]') check(r'\\?\c:\?', r'\\?\c:\[?]') check(r'\\*\*\*', r'\\*\*\[*]') check('//?/c:/?', '//?/c:/[?]') check('//*/*/*', '//*/*/[*]') def rglob(self, *parts, **kwargs): return self.glob(*parts, recursive=True, **kwargs) def test_recursive_glob(self): eq = self.assertSequencesEqual_noorder full = [('EF',), ('ZZZ',), ('a',), ('a', 'D'), ('a', 'bcd'), ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'), ('a', 'bcd', 'efg', 'ha'), ('aaa',), ('aaa', 'zzzF'), ('aab',), ('aab', 'F'), ] if can_symlink(): full += [('sym1',), ('sym2',), ('sym3',), ('sym3', 'EF'), ('sym3', 'efg'), ('sym3', 'efg', 'ha'), ] eq(self.rglob('**'), self.joins(('',), *full)) eq(self.rglob(os.curdir, '**'), self.joins((os.curdir, ''), *((os.curdir,) + i for i in full))) dirs = [('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''), ('aaa', ''), ('aab', '')] if can_symlink(): dirs += [('sym3', ''), ('sym3', 'efg', '')] eq(self.rglob('**', ''), self.joins(('',), *dirs)) eq(self.rglob('a', '**'), self.joins( ('a', ''), ('a', 'D'), ('a', 'bcd'), ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'), ('a', 'bcd', 'efg', 'ha'))) eq(self.rglob('a**'), self.joins(('a',), ('aaa',), ('aab',))) expect = [('a', 'bcd', 'EF'), ('EF',)] if can_symlink(): expect += [('sym3', 'EF')] eq(self.rglob('**', 'EF'), self.joins(*expect)) expect = [('a', 'bcd', 'EF'), ('aaa', 'zzzF'), ('aab', 'F'), ('EF',)] if can_symlink(): expect += [('sym3', 'EF')] eq(self.rglob('**', '*F'), self.joins(*expect)) eq(self.rglob('**', '*F', ''), []) eq(self.rglob('**', 'bcd', '*'), self.joins( ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'))) eq(self.rglob('a', '**', 'bcd'), self.joins(('a', 'bcd'))) with change_cwd(self.tempdir): join = os.path.join eq(glob.glob('**', recursive=True), [join(*i) for i in full]) eq(glob.glob(join('**', ''), recursive=True), [join(*i) for i in dirs]) eq(glob.glob(join('**', '*'), recursive=True), [join(*i) for i in full]) eq(glob.glob(join(os.curdir, '**'), recursive=True), [join(os.curdir, '')] + [join(os.curdir, *i) for i in full]) eq(glob.glob(join(os.curdir, '**', ''), recursive=True), [join(os.curdir, '')] + [join(os.curdir, *i) for i in dirs]) eq(glob.glob(join(os.curdir, '**', '*'), recursive=True), [join(os.curdir, *i) for i in full]) eq(glob.glob(join('**','zz*F'), recursive=True), [join('aaa', 'zzzF')]) eq(glob.glob('**zz*F', recursive=True), []) expect = [join('a', 'bcd', 'EF'), 'EF'] if can_symlink(): expect += [join('sym3', 'EF')] eq(glob.glob(join('**', 'EF'), recursive=True), expect) def test_glob_many_open_files(self): depth = 30 base = os.path.join(self.tempdir, 'deep') p = os.path.join(base, *(['d']*depth)) os.makedirs(p) pattern = os.path.join(base, *(['*']*depth)) iters = [glob.iglob(pattern, recursive=True) for j in range(100)] for it in iters: self.assertEqual(next(it), p) pattern = os.path.join(base, '**', 'd') iters = [glob.iglob(pattern, recursive=True) for j in range(100)] p = base for i in range(depth): p = os.path.join(p, 'd') for it in iters: self.assertEqual(next(it), p) @skip_unless_symlink class SymlinkLoopGlobTests(unittest.TestCase): def test_selflink(self): tempdir = TESTFN + "_dir" os.makedirs(tempdir) self.addCleanup(shutil.rmtree, tempdir) with change_cwd(tempdir): os.makedirs('dir') create_empty_file(os.path.join('dir', 'file')) os.symlink(os.curdir, os.path.join('dir', 'link')) results = glob.glob('**', recursive=True) self.assertEqual(len(results), len(set(results))) results = set(results) depth = 0 while results: path = os.path.join(*(['dir'] + ['link'] * depth)) self.assertIn(path, results) results.remove(path) if not results: break path = os.path.join(path, 'file') self.assertIn(path, results) results.remove(path) depth += 1 results = glob.glob(os.path.join('**', 'file'), recursive=True) self.assertEqual(len(results), len(set(results))) results = set(results) depth = 0 while results: path = os.path.join(*(['dir'] + ['link'] * depth + ['file'])) self.assertIn(path, results) results.remove(path) depth += 1 results = glob.glob(os.path.join('**', ''), recursive=True) self.assertEqual(len(results), len(set(results))) results = set(results) depth = 0 while results: path = os.path.join(*(['dir'] + ['link'] * depth + [''])) self.assertIn(path, results) results.remove(path) depth += 1 if __name__ == "__main__": unittest.main()
#! /usr/bin/env python # # Matasano Crypto Pals # Set 1 # import binascii import base64 from freqs import * from Crypto.Cipher import AES def hex2base64(a): '''Convert a string in hex to a string in base64.''' data = binascii.unhexlify(a) return base64.b64encode(data) def fixedXOR(b1, b2): int1 = int(b1.encode('hex'), base=16) int2 = int(b2.encode('hex'), base=16) ret = hex(int1 ^ int2)[2:].rstrip('L') if len(ret) % 2 == 1: ret = '0' + ret return ret.decode('hex') def score_string(s): '''Return a score associated with how likely the string is given English character frequencies.''' rare_char_score = -4 nonascii_score = -100 score = 0 for char in s: if char in en_log_freqs: score += en_log_freqs[char] elif 31 < ord(char) < 127: score += rare_char_score else: score += nonascii_score return score def encrypt_single_key_xor(instring, key): '''Encrypt a string against a repeating single character.''' repeated_char = len(instring) * key return fixedXOR(instring, repeated_char) def decrypt_single_key_xor(instring, return_key=False): '''Decode a string that has been encrypted against a single character.''' maxscore = None best_string = None best_key = None for i in xrange(32, 128): s = encrypt_single_key_xor(instring, chr(i)) score = score_string(s) if score > maxscore: maxscore = score best_string = s best_key = chr(i) if return_key: return (best_string, best_key) else: return best_string def detect_single_char_xor(filename): '''Find the line in the file that has been encrypted with single key XOR.''' maxscore = None best_line = None with open(filename) as infile: lines = infile.readlines() for raw_line in lines: line = raw_line.strip().decode('hex') decoded_line = decrypt_single_key_xor(line) score = score_string(decoded_line) if score > maxscore: best_line = decoded_line maxscore = score return best_line def repeating_key_xor(s, key): '''Encrypt a string s with the repeating key.''' key_len = len(key) enc_blocks = [] for i in range(key_len): decrypt_block = encrypt_single_key_xor(s[i::key_len], key[i]) enc_blocks.append(decrypt_block) enc_block = [elem for sublist in zip(*enc_blocks) for elem in sublist] enc_block = '' for i in xrange(len(enc_blocks[0])): for block in enc_blocks: try: enc_block += block[i] except IndexError: pass return enc_block.encode('hex') def hamming_distance(b1, b2): '''Compute the Hamming distance between s1 and s2.''' int1 = int(b1.encode('hex'), base=16) int2 = int(b2.encode('hex'), base=16) xored = int1 ^ int2 hamming_distance = 0 while xored: if xored % 2 == 1: hamming_distance += 1 xored >>= 1 return hamming_distance def find_keysize(data): '''Find the keysize used to encrypt the data.''' nblocks = 12 max_keysize = 40 normalized_hamdists = [] min_keysize = 1 for keysize in range(min_keysize, min(max_keysize+1, len(data)/nblocks)): hamdists = [] for i in range(nblocks): block1 = data[keysize*i:keysize*(i+1)] block2 = data[keysize*(i+1):keysize*(i+2)] hamdists.append(hamming_distance(block1, block2) / float(keysize)) mean_hamdist = sum(hamdists) / len(hamdists) normalized_hamdists.append(mean_hamdist) return normalized_hamdists.index(min(normalized_hamdists)) + min_keysize def break_rep_key_xor(data, return_key=False): '''Break repeating key xor.''' keysize = find_keysize(data) decrypt_blocks = [] key = '' for i in range(keysize): block = data[i::keysize] decrypt_block, decrypt_key = decrypt_single_key_xor(block, return_key=True) decrypt_blocks.append(decrypt_block) key += decrypt_key decrypted_message = '' for i in range(len(decrypt_blocks[0])): for decrypt_block in decrypt_blocks: try: decrypted_message += decrypt_block[i] except IndexError: pass if return_key: return decrypted_message, key else: return decrypted_message if __name__ == '__main__': # Challenge 1 STRING1_1 = '49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d' RESULT1_1 = 'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t' assert hex2base64(STRING1_1) == RESULT1_1 print "Challenge 1 test passed" print # Challenge 2 STRING1_2A = '1c0111001f010100061a024b53535009181c' STRING1_2B = '686974207468652062756c6c277320657965' RESULT1_2 = '746865206b696420646f6e277420706c6179' assert fixedXOR(STRING1_2A.decode('hex'), STRING1_2B.decode('hex')).encode('hex') == RESULT1_2 print "Challenge 2 test passed" print # Challenge 3 STRING1_3 = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736' print "Challenge 3 solution:" print decrypt_single_key_xor(STRING1_3.decode('hex')) print # Challenge 4 print "Challenge 4 solution:" print detect_single_char_xor('set1-4.txt') # Challenge 5 STRING1_5A = 'Burning \'em, if you ain\'t quick and nimble\n' STRING1_5B = 'I go crazy when I hear a cymbal' STRING1_5 = STRING1_5A + STRING1_5B STRING1_5KEY = 'ICE' RESULT1_5A = '0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272' RESULT1_5B = 'a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f' RESULT1_5 = RESULT1_5A + RESULT1_5B assert repeating_key_xor(STRING1_5, STRING1_5KEY) == RESULT1_5 print "Challenge 5 test passed" print # Challenge 6 B64_1_6 = '' with open('set1-6.txt') as infile: for line in infile: B64_1_6 += line.strip() DATA1_6 = base64.b64decode(B64_1_6) print "Challenge 6 solution:" print break_rep_key_xor(DATA1_6) print # Challenge 7 KEY1_7 = 'YELLOW SUBMARINE' DATA_64_1_7 = '' with open('set1-7.txt') as infile: for line in infile: DATA_64_1_7 += line.strip() DATA1_7 = base64.b64decode(DATA_64_1_7) CIPHER = AES.new(KEY1_7, AES.MODE_ECB) print "Challenge 7 solution:" print CIPHER.decrypt(DATA1_7)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import annotations import argparse import os from abc import ABC, abstractmethod from datetime import datetime import json from typing import Any, Dict, List, Union import pandas as pd from parlai.core.opt import Opt import parlai.utils.logging as logging # Defining the class only if Mephisto is installed, since it relies on Mephisto try: from mephisto.abstractions.databases.local_database import LocalMephistoDB from mephisto.data_model.unit import Unit from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser except ImportError: pass class AbstractResultsCompiler(ABC): """ Abstract class for compiling results of crowdsourcing runs. Currently only provides utility attributes/methods for analyzing turn annotations. """ @classmethod def setup_args(cls): parser = argparse.ArgumentParser('Compile crowdsourcing results') parser.add_argument( '--output-folder', type=str, help='Folder to save output files to' ) parser.add_argument( '--results-format', type=str, choices=['csv', 'json'], default='csv', help='Output format for results data', ) parser.add_argument( '--task-name', type=str, help='Name of the Mephisto task to open' ) parser.add_argument( '--database-path', type=str, default=None, help='Path to local Mephisto database. Leave empty for default location.', ) return parser def __init__(self, opt: Opt): self.task_name = opt['task_name'] self.output_folder = opt['output_folder'] self.results_format = opt.get('results_format', 'json') self.database_path = opt['database_path'] # We lazily load these later, or inject their mock version during testing. self._mephisto_db = None self._mephisto_data_browser = None def get_mephisto_data_browser(self) -> MephistoDataBrowser: if not self._mephisto_data_browser: db = self.get_mephisto_db() self._mephisto_data_browser = MephistoDataBrowser(db=db) return self._mephisto_data_browser def get_mephisto_db(self) -> LocalMephistoDB: if not self._mephisto_db: self._mephisto_db = LocalMephistoDB(self.database_path) return self._mephisto_db def get_results_path_base(self) -> str: """ Return the save path for the results file, not including the file extension. """ now = datetime.now() return os.path.join( self.output_folder, f'{self.__class__.__name__}__{now.strftime("%Y%m%d_%H%M%S")}', ) def get_worker_name(self, worker_id: str) -> str: """ Gets the global id of a worker from their Mephisto worker_id. The worker_id is the unique id that the crowdsourcing platforms (eg, Amazon Mechanical Turk) assign to a single human worker in their system. """ db = self.get_mephisto_db() return db.get_worker(worker_id)["worker_name"] def get_task_units(self) -> List[Unit]: """ Retrieves the list of work units from the Mephisto task. """ data_browser = self.get_mephisto_data_browser() return data_browser.get_units_for_task_name(self.task_name) def get_data_from_unit(self, unit: Unit) -> Dict[str, Any]: """ Retrieves task data for a single unit. """ try: data_browser = self.get_mephisto_data_browser() return data_browser.get_data_from_unit(unit) except (IndexError, AssertionError) as error: logging.error(error) logging.warning( f'Skipping unit {unit.db_id}. No message found for this unit.' ) def get_task_data(self) -> List[Dict[str, Any]]: """ Retrieves task data for a list of Mephisto task units. """ task_data = [] for unit in self.get_task_units(): unit_data = self.get_data_from_unit(unit) if unit_data and self.is_unit_acceptable(unit_data): task_data.append(unit_data) return task_data def is_unit_acceptable(self, unit_data: Dict[str, Any]) -> bool: """ Helps filtering units that are compiled. Override for use. Returning False means that the unit data will be discarded. """ if not unit_data: # Add your task-specific qualificaiton logic that justifies # discarding this unit, based on it data content. return False return True @abstractmethod def compile_results(self) -> Union[pd.DataFrame, Dict[str, Any]]: """ Method for returning the final results as a dataframe or a json. For Dict output each key is a unique identifier (eg Assignment ID) for a unit of crowdsourcing work. The data for that unit is stored in the value as dictionary. Each row of the dataframe consists of one utterance of one conversation, or crowdsourcing interaction. NOTE: Preference for new projects is Dict output (see the TODO below). TODO: Only support Dict. Deprecate ` pd.DataFrame` when no other code is relying on it. """ def _validate_compiled_result_type(self, results): assert isinstance(results, dict) or isinstance(results, pd.DataFrame), ( 'The output of result compiler needs to be a dictionary or a pandas dataframe. ' f'Found ({type(results)})' ) def compile_and_save_results(self): """ Compile results and save them. Results will be saved in the format given by --results-format. """ compiled_results = self.compile_results() self._validate_compiled_result_type(compiled_results) results_path_base = self.get_results_path_base() results_path = f'{results_path_base}.{self.results_format}' os.makedirs(self.output_folder, exist_ok=True) if self.results_format == 'csv': if not isinstance(compiled_results, pd.DataFrame): logging.warning( "The requested data output format was 'csv' while the data was compiled as a 'dict'. " 'Transforming dictionary data into pd.DataFrame using pandas.' ) compiled_results = pd.DataFrame.from_dict( compiled_results, orient='index' ) compiled_results.to_csv(results_path, index=False) elif self.results_format == 'json': if isinstance(compiled_results, pd.DataFrame): logging.warning( "The requested data output format was 'json' while the data was compiled as a 'dataframe'. " 'Transforming dataframe into json using pandas.' ) # Reset the index to make each row have a unique index value compiled_results.reset_index().to_json(results_path) else: with open(results_path, 'w') as fout: fout.write(json.dumps(compiled_results)) else: raise ValueError( f'Results save format of "{self.results_format}" currently unsupported!' ) logging.info(f'Wrote results file to {results_path}.') class AbstractTurnAnnotationResultsCompiler(AbstractResultsCompiler): """ Results compiler subclass to provide utility code for turn annotations. Currently incompatible with Mephisto's DataBrowser: all subclasses load results files directly from disk. TODO: make all subclasses compatible with DataBrowser """ @classmethod def setup_args(cls): parser = super().setup_args() parser.add_argument( '--results-folders', type=str, help='Comma-separated list of result folders' ) parser.add_argument( '--problem-buckets', type=str, help='Comma-separated list of buckets used for annotation. Set to an empty string to not analyze problem buckets.', default='bucket_0,bucket_1,bucket_2,bucket_3,bucket_4,none_all_good', ) return parser def __init__(self, opt: Opt): super().__init__(opt) # Handle inputs if 'results_folders' in opt: self.results_folders = opt['results_folders'].split(',') else: self.results_folders = None if opt['problem_buckets'].lower() not in ['', 'none']: self.use_problem_buckets = True self.problem_buckets = opt['problem_buckets'].split(',') else: self.use_problem_buckets = False self.problem_buckets = []
#!/usr/bin/env python import argparse import cfp_common import collections import datetime import math import nltk import re import Queue import random import scipherd import sys import time import zmq #### # from http://bytes.com/topic/python/answers/828486-ascii-binary-conversion def a2b(a, mask): ord_a = ord(a) if ord_a >= 256: raise Exception("Unsupported character: %s" % a) ai = ord_a ^ mask return ''.join('01'[(ai >> x) & 1] for x in xrange(7, -1, -1)) #### class Bitstring: def __init__(self): self.reset("") def reset(self, bitstring): self.bitstring = bitstring self.index = 0 class LastTime: def __init__(self): self.last_time = time.time() def next_time(self): self.last_time += random.randint(7, 60)*(24*60*60) return self.last_time EncodeState = collections.namedtuple('EncodeState', ['input_text', 'bitstring', 'common', 'header_grammar', 'body_grammar', 'list_bits', 'space_before', 'space_after', 'last_or_nots', 'last_time']) def choose(nonterm, in_list, prods, state): if nonterm in state.last_or_nots: # if True, use the last one, otherwise, use anything but. # this choice uses no bits if state.last_or_nots[nonterm]: return prods[len(prods)-1] else: return random.choice(prods[:-1]) elif state.bitstring.index >= len(state.bitstring.bitstring): # We're past the end of the message, so just pick randomly return random.choice(prods) elif len(prods) < 3: return prods[0] bits = int(math.log(len(prods)-1, 2)) prevPow2 = math.pow(2, bits) # For lists, only pick the end of the list once we've used all # the bits, or we're out of bits. Unless we're the last list # left, then keep going until we consume all bits end_list = False if in_list and in_list in state.list_bits: bits_left = state.list_bits[in_list] if (bits_left <= 0 and len(state.list_bits) > 1 and nonterm in state.common.list_recursive_terms()): end_list = True del state.list_bits[in_list] else: state.list_bits[in_list] = bits_left - bits #print ("Consuming %s bits for list %s (%s left)" % # (bits, in_list, state.list_bits[in_list])) # otherwise, use the first 'bits' bits to pick the index index = int(prevPow2) if not end_list: strindex = state.bitstring.bitstring[state.bitstring.index:bits+ state.bitstring.index] index = int(strindex, 2) #print ("(%s) Using bits %s (%s -> %s)" % # (len(state.bitstring.bitstring)-state.bitstring.index, strindex, nonterm, prods[index])) state.bitstring.index += bits state.bitstring.index = min(state.bitstring.index, len(state.bitstring.bitstring)) # now interpret as an int prod = prods[index] if len(state.list_bits) == 0 and nonterm == nltk.Nonterminal("CFP_BODY"): # set the list bits state.list_bits.update( state.common.calc_list_bits(len(state.input_text)*8, prod)) return prod def expand(grammar, nonterm, state, in_list = None): # pick random production, recurse prods = grammar.productions(nonterm) if len(prods) == 0: return prods p = choose(nonterm, in_list, prods, state) return p.rhs() def expand_all(grammar, nonterm, state): result = "" queue = Queue.LifoQueue() queue.put_nowait(nonterm) # do this iteratively; recursively blows past python's recursive limit in_list = None len_at_start_of_list = 0 while not queue.empty(): head = queue.get_nowait() # Keep track of being in a list until all the bits for the list # have been used up if head in state.list_bits: in_list = head len_at_start_of_list = queue.qsize() # done with the list once we consume the next item in the queue if in_list and queue.qsize() < len_at_start_of_list: in_list = None terms = expand(grammar, head, state, in_list) if len(terms) == 0: if isinstance(head, basestring): result = " ".join([result, head]) else: result = " ".join([result, str(head)]) else : # put them into the lifo queue backwards, so we'll get the # first one out for nt in reversed(terms): if nt in state.common.append_newlines(): queue.put_nowait(nltk.Nonterminal("\n")) queue.put_nowait(nt) return result # Must be determinstically reversible by the decoder, so use the # following rules: # 1) Erase spaces at the beginning and end of every line # 2) If the first letter of the line is not capitalized, make it so # 3) remove one space before all punctuation (except '(') # 4) remove one space after '(' def pretty_print(line, state): ppline = line.lstrip().rstrip() if len(ppline) > 0 and ppline[0].islower(): ppline = ppline[0].upper() + ppline[1:] ppline = state.space_before.sub(r'\1', ppline) ppline = state.space_after.sub(r'\1', ppline) return ppline def pretty_print_all(text, state): return "\n".join([pretty_print(line, state) for line in text.splitlines()]) # first thing: determine the header (subject line): # # The conference name is 3-5 upper-case characters. For 3 letters, # at least one must be a vowel; for 4-5 letters, at least 2 must be. # This allows for roughly 2.5M conference names, or 21 bits to work # with. # # bits 0-7: a random XOR mask for the body of the data # bits 8-15: a version number for the grammar itself (masked) # bits 16-20: the least significant 5 bits of the input text # # Once this is figured out, generate the header by using a masked # bitstring out of the rest of the byte-length of the input text # (15 bytes, capping the whole thing at 1M) def do_encode(state, website = None): mask = random.randint(0,255) version = state.common.version() if version < 0 or version > 255: print "Bad grammar version: %d" % version sys.exit(-1) ls_len = len(state.input_text) & 0x1f name_index = (mask << 13) | ((version ^ mask) << 5) | ls_len f = open(cfp_common.CfpCommon.conf_names_filename(), 'r') conf_name = cfp_common.CfpCommon.conf_name_from_index(f, name_index) f.close() #print "3) %s" % time.time() ms_len = len(state.input_text) >> 5 masked_len = "{0:015b}".format(ms_len ^ (mask | (mask & 0x7f) << 8)) #sys.stderr.write("header: %s\n" % masked_len) state.bitstring.reset(masked_len) header = expand_all(state.header_grammar, state.header_grammar.start(), state) header = pretty_print_all(header.replace("CFP_CONF_ABBREV", conf_name), state) #print "4) %s" % time.time() body_string = "".join([a2b(c, mask) for c in state.input_text]) state.bitstring.reset(body_string) body = expand_all(state.body_grammar, state.body_grammar.start(), state) if website: body = body.replace("WEBSITE_LINK", website) #print "5) %s" % time.time() # Replace dates: date_re = re.compile("(SUBSTITUTE_DATE)") def sub_datetime(matchobj): # pick a time about one month away from the last one next_time = state.last_time.next_time() return (datetime.date.fromtimestamp(next_time).strftime("%B %d, %Y"). lstrip("0").replace(" 0", " ")) body = date_re.sub(sub_datetime, body) #print "6) %s" % time.time() body = pretty_print_all(body.replace("CFP_CONF_ABBREV", conf_name), state) return (header, body) def main(): parser = argparse.ArgumentParser() parser.add_argument('--seed', metavar='S', type=int, help='the random number generator seed') parser.add_argument('--socket', metavar='SOCKET', type=str, help='the local socket to bind to') parser.add_argument('--infile', metavar='FILE', type=str, help='read from this file instead of stdin') parser.add_argument('--outfile', metavar='FILE', type=str, help='write to this file instead of stdout') parser.add_argument('--website', metavar='W', type=str, help='a website link to include, if any ' '(must start with "http://")') args = parser.parse_args() if args.socket: ok = scipherd.call_daemon(args.socket, True, args.infile, args.outfile) if ok: sys.exit(0) else: sys.exit(-1) if args.seed: seed = args.seed else: seed = random.randint(0, 2**32) random.seed(seed) sys.stderr.write("Random seed: %d\n" % seed) input_text = "" for line in sys.stdin: input_text += line.decode('utf-8') if len(input_text) > 2**20: print "Input text must be smaller than 1MB." sys.exit(-1) common = cfp_common.CfpCommon.get_latest_common() space_before = re.compile('\s([%s])' % common.chars_to_remove_a_space_before()) space_after = re.compile('([%s])\s' % common.chars_to_remove_a_space_after()) last_or_nots = common.choose_last_or_nots() if args.website: if args.website.find("http://") != 0: sys.stderr.write("Bad website: %s\n" % args.website) sys.exit(-1) last_or_nots[nltk.Nonterminal("SUBMIT_CLOSING")] = True # load grammars #print "1) %s" % time.time() header_grammar = nltk.data.load("file:%s" % common.header_cfg_filename(), 'cfg') body_grammar = nltk.data.load("file:%s" % common.body_cfg_filename(), 'cfg') #print "2) %s" % time.time() state = EncodeState(input_text, Bitstring(), common, header_grammar, body_grammar, {}, space_before, space_after, last_or_nots, LastTime()) (header, body) = do_encode(state, args.website) print header print "" print body if __name__ == "__main__": main()
import numpy as np from numpy.testing import assert_allclose import pytest from .. import hmm from . import assert_log_likelihood_increasing, make_covar_matrix, normalized class GaussianHMMTestMixin: covariance_type = None # set by subclasses @pytest.fixture(autouse=True) def setup(self): self.prng = prng = np.random.RandomState(10) self.n_components = n_components = 3 self.n_features = n_features = 3 self.startprob = prng.rand(n_components) self.startprob = self.startprob / self.startprob.sum() self.transmat = prng.rand(n_components, n_components) self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis], (1, n_components)) self.means = prng.randint(-20, 20, (n_components, n_features)) self.covars = make_covar_matrix( self.covariance_type, n_components, n_features, random_state=prng) @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_bad_covariance_type(self, implementation): with pytest.raises(ValueError): h = hmm.GaussianHMM(20, implementation=implementation, covariance_type='badcovariance_type') h.means_ = self.means h.covars_ = [] h.startprob_ = self.startprob h.transmat_ = self.transmat h._check() @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_score_samples_and_decode(self, implementation): h = hmm.GaussianHMM(self.n_components, self.covariance_type, init_params="st", implementation=implementation) h.means_ = self.means h.covars_ = self.covars # Make sure the means are far apart so posteriors.argmax() # picks the actual component used to generate the observations. h.means_ = 20 * h.means_ gaussidx = np.repeat(np.arange(self.n_components), 5) n_samples = len(gaussidx) X = (self.prng.randn(n_samples, self.n_features) + h.means_[gaussidx]) h._init(X) ll, posteriors = h.score_samples(X) assert posteriors.shape == (n_samples, self.n_components) assert_allclose(posteriors.sum(axis=1), np.ones(n_samples)) viterbi_ll, stateseq = h.decode(X) assert_allclose(stateseq, gaussidx) @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_sample(self, implementation, n=1000): h = hmm.GaussianHMM(self.n_components, self.covariance_type, implementation=implementation) h.startprob_ = self.startprob h.transmat_ = self.transmat # Make sure the means are far apart so posteriors.argmax() # picks the actual component used to generate the observations. h.means_ = 20 * self.means h.covars_ = np.maximum(self.covars, 0.1) X, state_sequence = h.sample(n, random_state=self.prng) assert X.shape == (n, self.n_features) assert len(state_sequence) == n @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit(self, implementation, params='stmc', n_iter=5, **kwargs): h = hmm.GaussianHMM(self.n_components, self.covariance_type, implementation=implementation) h.startprob_ = self.startprob h.transmat_ = normalized( self.transmat + np.diag(self.prng.rand(self.n_components)), 1) h.means_ = 20 * self.means h.covars_ = self.covars lengths = [10] * 10 X, _state_sequence = h.sample(sum(lengths), random_state=self.prng) # Mess up the parameters and see if we can re-learn them. # TODO: change the params and uncomment the check h.fit(X, lengths=lengths) # assert_log_likelihood_increasing(h, X, lengths, n_iter) @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_ignored_init_warns(self, implementation, caplog): h = hmm.GaussianHMM(self.n_components, self.covariance_type, implementation=implementation) h.startprob_ = self.startprob h.fit(np.random.randn(100, self.n_components)) assert len(caplog.records) == 1, caplog assert "will be overwritten" in caplog.records[0].getMessage() @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_too_little_data(self, implementation, caplog): h = hmm.GaussianHMM( self.n_components, self.covariance_type, init_params="", implementation=implementation) h.startprob_ = self.startprob h.transmat_ = self.transmat h.means_ = 20 * self.means h.covars_ = np.maximum(self.covars, 0.1) h._init(np.random.randn(5, self.n_components)) assert len(caplog.records) == 1 assert "degenerate solution" in caplog.records[0].getMessage() @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_sequences_of_different_length(self, implementation): lengths = [3, 4, 5] X = self.prng.rand(sum(lengths), self.n_features) h = hmm.GaussianHMM(self.n_components, self.covariance_type, implementation=implementation) # This shouldn't raise # ValueError: setting an array element with a sequence. h.fit(X, lengths=lengths) @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_with_length_one_signal(self, implementation): lengths = [10, 8, 1] X = self.prng.rand(sum(lengths), self.n_features) h = hmm.GaussianHMM(self.n_components, self.covariance_type, implementation=implementation) # This shouldn't raise # ValueError: zero-size array to reduction operation maximum which # has no identity h.fit(X, lengths=lengths) @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_zero_variance(self, implementation): # Example from issue #2 on GitHub. X = np.asarray([ [7.15000000e+02, 5.85000000e+02, 0.00000000e+00, 0.00000000e+00], [7.15000000e+02, 5.20000000e+02, 1.04705811e+00, -6.03696289e+01], [7.15000000e+02, 4.55000000e+02, 7.20886230e-01, -5.27055664e+01], [7.15000000e+02, 3.90000000e+02, -4.57946777e-01, -7.80605469e+01], [7.15000000e+02, 3.25000000e+02, -6.43127441e+00, -5.59954834e+01], [7.15000000e+02, 2.60000000e+02, -2.90063477e+00, -7.80220947e+01], [7.15000000e+02, 1.95000000e+02, 8.45532227e+00, -7.03294373e+01], [7.15000000e+02, 1.30000000e+02, 4.09387207e+00, -5.83621216e+01], [7.15000000e+02, 6.50000000e+01, -1.21667480e+00, -4.48131409e+01] ]) h = hmm.GaussianHMM(3, self.covariance_type, implementation=implementation) h.fit(X) @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_with_priors(self, implementation, params='stmc', n_iter=5): startprob_prior = 10 * self.startprob + 2.0 transmat_prior = 10 * self.transmat + 2.0 means_prior = self.means means_weight = 2.0 covars_weight = 2.0 if self.covariance_type in ('full', 'tied'): covars_weight += self.n_features covars_prior = self.covars h = hmm.GaussianHMM(self.n_components, self.covariance_type, implementation=implementation) h.startprob_ = self.startprob h.startprob_prior = startprob_prior h.transmat_ = normalized( self.transmat + np.diag(self.prng.rand(self.n_components)), 1) h.transmat_prior = transmat_prior h.means_ = 20 * self.means h.means_prior = means_prior h.means_weight = means_weight h.covars_ = self.covars h.covars_prior = covars_prior h.covars_weight = covars_weight lengths = [200] * 10 X, _state_sequence = h.sample(sum(lengths), random_state=self.prng) # Re-initialize the parameters and check that we can converge to # the original parameter values. h_learn = hmm.GaussianHMM(self.n_components, self.covariance_type, params=params, implementation=implementation) h_learn.n_iter = 0 h_learn.fit(X, lengths=lengths) assert_log_likelihood_increasing(h_learn, X, lengths, n_iter) # Make sure we've converged to the right parameters. # a) means assert_allclose(sorted(h.means_.tolist()), sorted(h_learn.means_.tolist()), 0.01) # b) covars are hard to estimate precisely from a relatively small # sample, thus the large threshold assert_allclose( *np.broadcast_arrays(sorted(h._covars_.tolist()), sorted(h_learn._covars_.tolist())), 10) class TestGaussianHMMWithSphericalCovars(GaussianHMMTestMixin): covariance_type = 'spherical' @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_startprob_and_transmat(self, implementation): self.test_fit(implementation, 'st') @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_underflow_from_scaling(self, implementation): # Setup an ill-conditioned dataset data1 = self.prng.normal(0, 1, 100).tolist() data2 = self.prng.normal(5, 1, 100).tolist() data3 = self.prng.normal(0, 1, 100).tolist() data4 = self.prng.normal(5, 1, 100).tolist() data = np.concatenate([data1, data2, data3, data4]) # Insert an outlier data[40] = 10000 data2d = data[:, None] lengths = [len(data2d)] h = hmm.GaussianHMM(2, n_iter=100, verbose=True, covariance_type=self.covariance_type, implementation=implementation, init_params="") h.startprob_ = [0.0, 1] h.transmat_ = [[0.4, 0.6], [0.6, 0.4]] h.means_ = [[0], [5]] h.covars_ = [[1], [1]] if implementation == "scaling": with pytest.raises(ValueError): h.fit(data2d, lengths) else: h.fit(data2d, lengths) class TestGaussianHMMWithDiagonalCovars(GaussianHMMTestMixin): covariance_type = 'diag' @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_covar_is_writeable(self, implementation): h = hmm.GaussianHMM(n_components=1, covariance_type="diag", init_params="c", implementation=implementation) X = np.random.normal(size=(1000, 5)) h._init(X) # np.diag returns a read-only view of the array in NumPy 1.9.X. # Make sure this doesn't prevent us from fitting an HMM with # diagonal covariance matrix. See PR#44 on GitHub for details # and discussion. assert h._covars_.flags["WRITEABLE"] @pytest.mark.parametrize("implementation", ["scaling", "log"]) def test_fit_left_right(self, implementation): transmat = np.zeros((self.n_components, self.n_components)) # Left-to-right: each state is connected to itself and its # direct successor. for i in range(self.n_components): if i == self.n_components - 1: transmat[i, i] = 1.0 else: transmat[i, i] = transmat[i, i + 1] = 0.5 # Always start in first state startprob = np.zeros(self.n_components) startprob[0] = 1.0 lengths = [10, 8, 1] X = self.prng.rand(sum(lengths), self.n_features) h = hmm.GaussianHMM(self.n_components, covariance_type="diag", params="mct", init_params="cm", implementation=implementation) h.startprob_ = startprob.copy() h.transmat_ = transmat.copy() h.fit(X) assert (h.startprob_[startprob == 0.0] == 0.0).all() assert (h.transmat_[transmat == 0.0] == 0.0).all() posteriors = h.predict_proba(X) assert not np.isnan(posteriors).any() assert_allclose(posteriors.sum(axis=1), 1.) score, state_sequence = h.decode(X, algorithm="viterbi") assert np.isfinite(score) class TestGaussianHMMWithTiedCovars(GaussianHMMTestMixin): covariance_type = 'tied' class TestGaussianHMMWithFullCovars(GaussianHMMTestMixin): covariance_type = 'full'
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Network to play no-press Diplomacy. Comments referring to tensor shapes use the following abbreviations: B := batch size T := learners unroll length. PLAYERS := num players REP_SIZE := generic representation size for an entity. NUM_AREAS := observation_utils.NUM_AREAS NUM_PROVINCES := observation_utils.NUM_PROVINCES MAX_ACTION_INDEX := action_utils.MAX_ACTION_INDEX MAX_ORDERS := action_utils.MAX_ORDERS """ import collections import functools from typing import Any, Dict, NamedTuple, Optional, Sequence, Tuple import haiku as hk import jax import jax.numpy as jnp import numpy as np import tree from diplomacy.environment import action_utils from diplomacy.environment import observation_transformation from diplomacy.environment import observation_utils as utils from diplomacy.environment import province_order from diplomacy.environment import tree_utils def normalize_adjacency(adjacency: np.ndarray) -> np.ndarray: """Computes the symmetric normalized Laplacian of an adjacency matrix. Symmetric normalized Laplacians are the representation of choice for graphs in GraphConvNets (see https://arxiv.org/pdf/1609.02907.pdf). Args: adjacency: map adjacency matrix without self-connections. Returns: Symmetric normalized Laplacian matrix of adjacency. """ adjacency += np.eye(*adjacency.shape) d = np.diag(np.power(adjacency.sum(axis=1), -0.5)) return d.dot(adjacency).dot(d) class EncoderCore(hk.Module): """Graph Network with non-shared weights across nodes. The input to this network is organized by area and the topology is described by the symmetric normalized Laplacian of an adjacency matrix. """ def __init__(self, adjacency: jnp.ndarray, *, filter_size: int = 32, batch_norm_config: Optional[Dict[str, Any]] = None, name: str = "encoder_core"): """Constructor. Args: adjacency: [NUM_AREAS, NUM_AREAS] symmetric normalized Laplacian of the adjacency matrix. filter_size: output size of per-node linear layer. batch_norm_config: config dict for hk.BatchNorm. name: a name for the module. """ super().__init__(name=name) self._adjacency = adjacency self._filter_size = filter_size bnc = dict(decay_rate=0.9, eps=1e-5, create_scale=True, create_offset=True) bnc.update(batch_norm_config or {}) self._bn = hk.BatchNorm(**bnc) def __call__(self, tensors: jnp.ndarray, *, is_training: bool = False) -> jnp.ndarray: """One round of message passing. Output nodes are represented as the concatenation of the sum of incoming messages and the message sent. Args: tensors: [B, NUM_AREAS, REP_SIZE] is_training: Whether this is during training. Returns: [B, NUM_AREAS, 2 * self._filter_size] """ w = hk.get_parameter( "w", shape=tensors.shape[-2:] + (self._filter_size,), init=hk.initializers.VarianceScaling()) messages = jnp.einsum("bni,nij->bnj", tensors, w) tensors = jnp.matmul(self._adjacency, messages) tensors = jnp.concatenate([tensors, messages], axis=-1) tensors = self._bn(tensors, is_training=is_training) return jax.nn.relu(tensors) class BoardEncoder(hk.Module): """Encode board state. Constructs a representation of the board state, organized per-area. The output depends on the season in the game, the specific power (player) we are considering as well as the number of builds for this player. Both season and player are embedded before being included in the representation. We first construct a "shared representation", which does not depend on the specific player, and then include player in the later layers. """ def __init__(self, adjacency: jnp.ndarray, *, shared_filter_size: int = 32, player_filter_size: int = 32, num_shared_cores: int = 8, num_player_cores: int = 8, num_players: int = 7, num_seasons: int = utils.NUM_SEASONS, player_embedding_size: int = 16, season_embedding_size: int = 16, min_init_embedding: float = -1.0, max_init_embedding: float = 1.0, batch_norm_config: Optional[Dict[str, Any]] = None, name: str = "board_encoder"): """Constructor. Args: adjacency: [NUM_AREAS, NUM_AREAS] symmetric normalized Laplacian of the adjacency matrix. shared_filter_size: filter_size of each EncoderCore for shared layers. player_filter_size: filter_size of each EncoderCore for player-specific layers. num_shared_cores: number of shared layers, or rounds of message passing. num_player_cores: number of player-specific layers, or rounds of message passing. num_players: number of players. num_seasons: number of seasons. player_embedding_size: size of player embedding. season_embedding_size: size of season embedding. min_init_embedding: min value for hk.initializers.RandomUniform for player and season embedding. max_init_embedding: max value for hk.initializers.RandomUnifor for player and season embedding. batch_norm_config: config dict for hk.BatchNorm. name: a name for this module. """ super().__init__(name=name) self._num_players = num_players self._season_embedding = hk.Embed( num_seasons, season_embedding_size, w_init=hk.initializers.RandomUniform(min_init_embedding, max_init_embedding)) self._player_embedding = hk.Embed( num_players, player_embedding_size, w_init=hk.initializers.RandomUniform(min_init_embedding, max_init_embedding)) make_encoder = functools.partial( EncoderCore, adjacency, batch_norm_config=batch_norm_config) self._shared_encode = make_encoder(filter_size=shared_filter_size) self._shared_core = [ make_encoder(filter_size=shared_filter_size) for _ in range(num_shared_cores) ] self._player_encode = make_encoder(filter_size=player_filter_size) self._player_core = [ make_encoder(filter_size=player_filter_size) for _ in range(num_player_cores) ] bnc = dict(decay_rate=0.9, eps=1e-5, create_scale=True, create_offset=True) bnc.update(batch_norm_config or {}) self._bn = hk.BatchNorm(**bnc) def __call__(self, state_representation: jnp.ndarray, season: jnp.ndarray, build_numbers: jnp.ndarray, is_training: bool = False) -> jnp.ndarray: """Encoder board state. Args: state_representation: [B, NUM_AREAS, REP_SIZE]. season: [B, 1]. build_numbers: [B, 1]. is_training: Whether this is during training. Returns: [B, NUM_AREAS, 2 * self._player_filter_size]. """ season_context = jnp.tile( self._season_embedding(season)[:, None], (1, utils.NUM_AREAS, 1)) build_numbers = jnp.tile(build_numbers[:, None].astype(jnp.float32), (1, utils.NUM_AREAS, 1)) state_representation = jnp.concatenate( [state_representation, season_context, build_numbers], axis=-1) representation = self._shared_encode( state_representation, is_training=is_training) for layer in self._shared_core: representation += layer(representation, is_training=is_training) player_context = jnp.tile( self._player_embedding.embeddings[None, :, None, :], (season.shape[0], 1, utils.NUM_AREAS, 1)) representation = jnp.tile(representation[:, None], (1, self._num_players, 1, 1)) representation = jnp.concatenate([representation, player_context], axis=3) representation = hk.BatchApply(self._player_encode)( representation, is_training=is_training) for layer in self._player_core: representation += hk.BatchApply(layer)( representation, is_training=is_training) return self._bn(representation, is_training=is_training) class RecurrentOrderNetworkInput(NamedTuple): average_area_representation: jnp.ndarray legal_actions_mask: jnp.ndarray teacher_forcing: jnp.ndarray previous_teacher_forcing_action: jnp.ndarray temperature: jnp.ndarray def previous_action_from_teacher_or_sample( teacher_forcing: jnp.ndarray, previous_teacher_forcing_action: jnp.ndarray, previous_sampled_action_index: jnp.ndarray): # Get previous action, from input (for teacher forcing) or state. return jnp.where( teacher_forcing, previous_teacher_forcing_action, jnp.asarray(action_utils.shrink_actions( action_utils.POSSIBLE_ACTIONS))[previous_sampled_action_index]) def one_hot_provinces_for_all_actions(): return jax.nn.one_hot( jnp.asarray(action_utils.ordered_province(action_utils.POSSIBLE_ACTIONS)), utils.NUM_PROVINCES) def blocked_provinces_and_actions( previous_action: jnp.ndarray, previous_blocked_provinces: jnp.ndarray): """Calculate which provinces and actions are illegal.""" # Compute which provinces are blocked by past decisions. updated_blocked_provinces = jnp.maximum( previous_blocked_provinces, ordered_provinces_one_hot(previous_action)) blocked_actions = jnp.squeeze( jnp.matmul(one_hot_provinces_for_all_actions(), updated_blocked_provinces[..., None]), axis=-1) blocked_actions *= jnp.logical_not( jnp.asarray(is_waive(action_utils.POSSIBLE_ACTIONS))) return updated_blocked_provinces, blocked_actions def sample_from_logits( logits: jnp.ndarray, legal_action_mask: jnp.ndarray, temperature: jnp.ndarray,): """Sample from logits respecting a legal actions mask.""" deterministic_logits = jnp.where( jax.nn.one_hot( jnp.argmax(logits, axis=-1), num_classes=action_utils.MAX_ACTION_INDEX, dtype=jnp.bool_), 0, jnp.finfo(jnp.float32).min) stochastic_logits = jnp.where(legal_action_mask, logits / temperature, jnp.finfo(jnp.float32).min) logits_for_sampling = jnp.where( jnp.equal(temperature, 0.0), deterministic_logits, stochastic_logits) # Sample an action for the current province and update the state so that # following orders can be conditioned on this decision. key = hk.next_rng_key() return jax.random.categorical( key, logits_for_sampling, axis=-1) class RelationalOrderDecoderState(NamedTuple): prev_orders: jnp.ndarray blocked_provinces: jnp.ndarray sampled_action_index: jnp.ndarray class RelationalOrderDecoder(hk.RNNCore): """RelationalOrderDecoder. Relational Order Decoders (ROD)s output order logits for a unit, based on the current board representation, and the orders selected for other units so far. """ def __init__(self, adjacency: jnp.ndarray, *, filter_size: int = 32, num_cores: int = 4, batch_norm_config: Optional[Dict[str, Any]] = None, name: str = "relational_order_decoder"): """Constructor. Args: adjacency: [NUM_PROVINCES, NUM_PROVINCES] symmetric normalized Laplacian of the per-province adjacency matrix. filter_size: filter_size for relational cores num_cores: number of relational cores batch_norm_config: config dict for hk.BatchNorm name: module's name. """ super().__init__(name=name) self._filter_size = filter_size self._encode = EncoderCore( adjacency, filter_size=self._filter_size, batch_norm_config=batch_norm_config) self._cores = [] for _ in range(num_cores): self._cores.append( EncoderCore( adjacency, filter_size=self._filter_size, batch_norm_config=batch_norm_config)) self._projection_size = 2 * self._filter_size # (nodes, messages) bnc = dict(decay_rate=0.9, eps=1e-5, create_scale=True, create_offset=True) bnc.update(batch_norm_config or {}) self._bn = hk.BatchNorm(**bnc) def _scatter_to_province(self, vector: jnp.ndarray, scatter: jnp.ndarray) -> jnp.ndarray: """Scatters vector to its province location in inputs. Args: vector: [B*PLAYERS, REP_SIZE] scatter: [B*PLAYER, NUM_PROVINCES] -- one-hot encoding. Returns: [B*PLAYERS, NUM_AREAS, REP_SIZE] where vectors has been added in the location prescribed by scatter. """ return vector[:, None, :] * scatter[..., None] def _gather_province(self, inputs: jnp.ndarray, gather: jnp.ndarray) -> jnp.ndarray: """Gathers specific province location from inputs. Args: inputs: [B*PLAYERS, NUM_PROVINCES, REP_SIZE] gather: [B*PLAYERS, NUM_PROVINCES] -- one-hot encoding Returns: [B*PLAYERS, REP_SIZE] gathered from inputs. """ return jnp.sum(inputs * gather[..., None], axis=1) def _relational_core(self, previous_orders: jnp.ndarray, board_representation, is_training: bool = False): """Apply relational core to current province and previous decisions.""" inputs = jnp.concatenate([previous_orders, board_representation], axis=-1) representation = self._encode(inputs, is_training=is_training) for core in self._cores: representation += core(representation, is_training=is_training) return self._bn(representation, is_training=is_training) def __call__( self, inputs: RecurrentOrderNetworkInput, prev_state: RelationalOrderDecoderState, *, is_training: bool = False, ) -> Tuple[jnp.ndarray, RelationalOrderDecoderState]: """Issue an order based on board representation and previous decisions. Args: inputs: RecurrentOrderNetworkInput( average_area_representation <-- [B*PLAYERS, REP_SIZE], legal_actions_mask <-- [B*PLAYERS, MAX_ACTION_INDEX], teacher_forcing <-- [B*PLAYERS], previous_teacher_forcing_action <-- [B*PLAYERS], temperature <-- [B*PLAYERS, 1] ) prev_state: RelationalOrderDecoderState( prev_orders <-- [B*PLAYERS, NUM_PROVINCES, 2 * self._filter_size], blocked_provinces <-- [B*PLAYERS, NUM_PROVINCES], sampled_action_index <-- [B*PLAYER] ) is_training: Whether this is during training. Returns: logits with shape [B*PLAYERS, MAX_ACTION_INDEX], updated RelationalOrderDecoderState shapes as above """ projection = hk.get_parameter( "projection", shape=(action_utils.MAX_ACTION_INDEX, self._projection_size), init=hk.initializers.VarianceScaling()) previous_action = previous_action_from_teacher_or_sample( inputs.teacher_forcing, inputs.previous_teacher_forcing_action, prev_state.sampled_action_index,) updated_blocked_provinces, blocked_actions = blocked_provinces_and_actions( previous_action, prev_state.blocked_provinces) # Construct representation of previous order. previous_order_representation = (previous_action[:, None] > 0) * projection[ previous_action >> (action_utils.ACTION_INDEX_START - 32)] legal_actions_provinces = ( jnp.matmul(inputs.legal_actions_mask, one_hot_provinces_for_all_actions()) > 0) # Place the representation of the province currently under consideration in # the appropriate slot in the graph. scattered_board_representation = jnp.array(0.0) + self._scatter_to_province( inputs.average_area_representation, legal_actions_provinces) # Place previous order in the appropriate slot in the graph. scattered_previous_orders = ( prev_state.prev_orders + self._scatter_to_province( previous_order_representation, jax.nn.one_hot( ordered_provinces(previous_action), utils.NUM_PROVINCES))) # Construct order logits conditional on province representation and previous # orders. board_representation = self._relational_core( scattered_previous_orders, scattered_board_representation, is_training=is_training) province_representation = self._gather_province( board_representation, legal_actions_provinces) order_logits = jnp.matmul(province_representation, projection.T) # Eliminate illegal actions is_legal_action = inputs.legal_actions_mask * (blocked_actions == 0) order_logits = jnp.where(is_legal_action, order_logits, jnp.finfo(jnp.float32).min) action_index = sample_from_logits(order_logits, is_legal_action, inputs.temperature) return order_logits, RelationalOrderDecoderState( prev_orders=scattered_previous_orders, blocked_provinces=updated_blocked_provinces, sampled_action_index=action_index ) def initial_state( self, batch_size: int, dtype: np.dtype = jnp.float32) -> RelationalOrderDecoderState: return RelationalOrderDecoderState( prev_orders=jnp.zeros( shape=(batch_size, utils.NUM_PROVINCES, 2 * self._filter_size), dtype=dtype), blocked_provinces=jnp.zeros( shape=(batch_size, utils.NUM_PROVINCES), dtype=dtype), sampled_action_index=jnp.zeros( shape=(batch_size,), dtype=jnp.int32), ) def ordered_provinces(actions: jnp.ndarray): return jnp.bitwise_and( jnp.right_shift(actions, action_utils.ACTION_ORDERED_PROVINCE_START), (1 << action_utils.ACTION_PROVINCE_BITS) - 1) def is_waive(actions: jnp.ndarray): return jnp.equal(jnp.bitwise_and( jnp.right_shift(actions, action_utils.ACTION_ORDER_START), (1 << action_utils.ACTION_ORDER_BITS) - 1), action_utils.WAIVE) def loss_from_logits(logits, actions, discounts): """Returns cross-entropy loss, unless actions are None; then it's entropy.""" if actions is not None: action_indices = actions >> (action_utils.ACTION_INDEX_START - 32) loss = jnp.take_along_axis( -jax.nn.log_softmax(logits), action_indices[..., None], axis=-1).squeeze(-1) # Only look at loss for actual actions. loss = jnp.where(actions > 0, loss, 0) else: loss = (jax.nn.softmax(logits) * -jax.nn.log_softmax(logits)).sum(-1) loss = loss.sum(3) # Only look at adequate players. loss *= discounts return loss.mean() def ordered_provinces_one_hot(actions, dtype=jnp.float32): provinces = jax.nn.one_hot( action_utils.ordered_province(actions), utils.NUM_PROVINCES, dtype=dtype) provinces *= ((actions > 0) & ~action_utils.is_waive(actions)).astype( dtype)[..., None] return provinces def reorder_actions(actions, areas, season): """Reorder actions to match area ordering.""" area_provinces = jax.nn.one_hot( [utils.province_id_and_area_index(a)[0] for a in range(utils.NUM_AREAS)], utils.NUM_PROVINCES, dtype=jnp.float32) provinces = jnp.tensordot(areas.astype(jnp.float32), area_provinces, (-1, 0)).astype(jnp.int32) action_provinces = ordered_provinces_one_hot(actions, jnp.int32) ordered_actions = jnp.sum( jnp.sum(actions[..., None] * action_provinces, -2, keepdims=True) * provinces, -1) n_actions_found = jnp.sum( jnp.sum(action_provinces, -2, keepdims=True) * provinces, -1) # `actions` has `-1`s for missing actions. ordered_actions += n_actions_found - 1 is_build = jnp.equal(season[..., None, None], utils.Season.BUILDS.value) tile_multiples = (1, 1) + actions.shape[2:] skip_reorder = jnp.tile(is_build, tile_multiples) reordered_actions = jnp.where(skip_reorder, actions, ordered_actions) return reordered_actions class Network(hk.Module): """Policy and Value Networks for Diplomacy. This network processes the board state to produce action and values for a full turn of Diplomacy. In Diplomacy, at each turn, all players submit orders for all the units they control. This Network outputs orders for each unit, one by one. Orders for later units depend on decisoins made previously. We organize this as follows: shared_inference: computations shared by all units (e.g. encode board). initial_inference: set up initial state to implement inter-unit dependence. step_inference: compute order for one unit. inference: full turn inference (organizes other methods in obvious ways). """ @classmethod def initial_inference_params_and_state( cls, constructor_kwargs, rng, num_players): def _inference(observations): network = cls(**constructor_kwargs) # pytype: disable=not-instantiable return network.inference(observations) inference_fn = hk.transform_with_state(_inference) params, net_state = inference_fn.init( rng, tree_utils.tree_expand_dims( cls.get_observation_transformer(constructor_kwargs ).zero_observation(num_players))) return params, net_state @classmethod def get_observation_transformer(cls, class_constructor_kwargs, rng_key=None): del class_constructor_kwargs # Unused return observation_transformation.GeneralObservationTransformer( rng_key=rng_key) @classmethod def zero_observation(cls, class_constructor_kwargs, num_players): return cls.get_observation_transformer( class_constructor_kwargs).zero_observation(num_players) def __init__( self, *, rnn_ctor, rnn_kwargs, name: str = "delta", num_players: int = 7, area_mdf: province_order.MapMDF = province_order.MapMDF.BICOASTAL_MAP, province_mdf: province_order.MapMDF = province_order.MapMDF.STANDARD_MAP, is_training: bool = False, shared_filter_size: int = 32, player_filter_size: int = 32, num_shared_cores: int = 8, num_player_cores: int = 8, value_mlp_hidden_layer_sizes: Sequence[int] = (256,), actions_since_last_moves_embedding_size: int = 10, batch_norm_config: Optional[Dict[str, Any]] = None, ): """Constructor. Args: rnn_ctor: Constructor for the RNN. The RNN will be constructed as `rnn_ctor(batch_norm_config=batch_norm_config, **rnn_kwargs)`. rnn_kwargs: kwargs for the RNN. name: a name for this module. num_players: number of players in the game, usually 7 (standard Diplomacy) or 2 (1v1 Diplomacy). area_mdf: path to mdf file containing a description of the board organized by area (e.g. Spain, Spain North Coast and Spain South Coast). province_mdf: path to mdf file containing a description of the board organized by province (e.g. Spain). is_training: whether this is a training instance. shared_filter_size: filter size in BoardEncoder, shared (across players) layers. player_filter_size: filter size in BoardEncoder, player specific layers. num_shared_cores: depth of BoardEncoder, shared (across players) layers. num_player_cores: depth of BoardEncoder, player specific layers. value_mlp_hidden_layer_sizes: sizes for value head. Output layer with size num_players is appended by this module. actions_since_last_moves_embedding_size: embedding size for last moves actions. batch_norm_config: kwargs for batch norm, eg the cross_replica_axis. """ super().__init__() self._area_adjacency = normalize_adjacency( province_order.build_adjacency( province_order.get_mdf_content(area_mdf))) self._province_adjacency = normalize_adjacency( province_order.build_adjacency( province_order.get_mdf_content(province_mdf))) self._is_training = is_training self._moves_actions_encoder = hk.Embed( action_utils.MAX_ACTION_INDEX + 1, actions_since_last_moves_embedding_size) self._board_encoder = BoardEncoder( self._area_adjacency, shared_filter_size=shared_filter_size, player_filter_size=player_filter_size, num_shared_cores=num_shared_cores, num_player_cores=num_player_cores, batch_norm_config=batch_norm_config) self._last_moves_encoder = BoardEncoder( self._area_adjacency, shared_filter_size=shared_filter_size, player_filter_size=player_filter_size, num_shared_cores=num_shared_cores, num_player_cores=num_player_cores, batch_norm_config=batch_norm_config) self._rnn = rnn_ctor(batch_norm_config=batch_norm_config, **rnn_kwargs) self._value_mlp = hk.nets.MLP( output_sizes=list(value_mlp_hidden_layer_sizes) + [num_players]) def loss_info( self, step_types: jnp.ndarray, # [B,T+1]. rewards: jnp.ndarray, # [B,T+1]. discounts: jnp.ndarray, # [B,T+1]. observations: Tuple[ # Batch dimensions [B,T+1]. Dict[str, jnp.ndarray], Dict[str, jnp.ndarray], jnp.ndarray], step_outputs: Dict[str, Any] # Batch dimensions [B, T]. ) -> Dict[str, jnp.ndarray]: """Losses to update the network's policy given a batch of experience. `(step_type[i], rewards[i], observations[i])` is the `actions[i]`. In other words, the reward accumulated over this sequence is `sum(rewards[1:])`, obtained by taking actions `actions`. The observation at the end of the sequence (resulting from `step_outputs.actions[-1]`) is `observations[-1]`. Args: step_types: tensor of step types. rewards: tensor of rewards. discounts: tensor of discounts. observations: observations, in format given by observation_transform. This is a tuple (initial_observation, step_observations, num_actions). Within a turn step_observations are stacked for each step. step_outputs: tensor of network outputs produced by inference. Returns: dict of logging outputs, including per-batch per-timestep losses. """ del step_types, rewards # unused observations = tree.map_structure(lambda xs: xs[:, :-1], observations) discounts = discounts[:, :-1] returns = step_outputs["returns"] initial_observation, step_observations, sequence_lengths = observations # Losses are always built with temperature 1.0 step_observations["temperature"] = jnp.ones_like( step_observations["temperature"]) # Reorder the actions to match with the legal_actions ordering. actions = reorder_actions( step_outputs["actions"], season=initial_observation["season"], areas=step_observations["areas"]) # Get all but the last order for teacher forcing. last_action = actions[..., :-1] # Pad the previous action with -1 for the first order, as that is # never forced. last_action = jnp.concatenate( [-jnp.ones_like(last_action[:, :, :, :1]), last_action], axis=3) # Set teacher forcing actions. step_observations["last_action"] = last_action (initial_outputs, step_outputs) = hk.BatchApply( functools.partial(self.inference, all_teacher_forcing=True))( (initial_observation, step_observations, sequence_lengths)) policy_logits = step_outputs["logits"] value_logits = initial_outputs["value_logits"] policy_loss = loss_from_logits(policy_logits, actions, discounts) policy_entropy = loss_from_logits(policy_logits, None, discounts) value_loss = -(jax.nn.log_softmax(value_logits) * returns).sum(-1) * jnp.max(discounts, -1) returns_entropy = -( jnp.sum(returns * jnp.log(returns + 1e-7), -1) * jnp.max(discounts, -1)) loss = policy_loss + value_loss # Get accuracy. labels = ( jnp.asarray(action_utils.shrink_actions( action_utils.POSSIBLE_ACTIONS)) == actions[..., None]) greedy_prediction = jnp.argmax(policy_logits, axis=-1) gathered_labels = jnp.take_along_axis( labels.astype(jnp.float32), greedy_prediction[..., None], -1).squeeze(-1) accuracy = jnp.count_nonzero( gathered_labels, axis=(2, 3)).astype(jnp.float32) accuracy_weights = jnp.count_nonzero( labels, axis=(2, 3, 4)).astype(jnp.float32) accuracy = jnp.sum(accuracy) / jnp.sum(accuracy_weights) # Get full-move accuracy. nonempty = jnp.any(labels, (-1, -2)) whole_correct = jnp.equal( jnp.count_nonzero(gathered_labels, -1), jnp.count_nonzero(labels, (-1, -2))) whole_accuracy = jnp.count_nonzero(whole_correct & nonempty, 2).astype(jnp.float32) whole_accuracy_weights = jnp.count_nonzero( nonempty, axis=2).astype(jnp.float32) whole_accuracy = jnp.sum(whole_accuracy) / jnp.sum(whole_accuracy_weights) # For comparison, calculate the loss from a uniform random agent. ur_policy_logits = jnp.finfo( jnp.float32).min * (1 - step_observations["legal_actions_mask"]) ur_policy_logits = jnp.broadcast_to(ur_policy_logits, policy_logits.shape) ur_policy_loss = loss_from_logits(ur_policy_logits, actions, discounts) ur_value_loss = -(jax.nn.log_softmax(jnp.zeros_like(value_logits)) * returns).sum(-1) * jnp.max(discounts, -1) loss_dict = { "policy_loss": policy_loss, "policy_entropy": policy_entropy, "value_loss": value_loss, "total_loss": loss, "returns_entropy": returns_entropy, "uniform_random_policy_loss": ur_policy_loss, "uniform_random_value_loss": ur_value_loss, "uniform_random_total_loss": ur_value_loss + ur_policy_loss, "accuracy": accuracy, "accuracy_weight": jnp.sum(accuracy_weights), "whole_accuracy": whole_accuracy, "whole_accuracy_weight": jnp.sum(whole_accuracy_weights), } return tree.map_structure(jnp.mean, loss_dict) def loss(self, step_types, rewards, discounts, observations, step_outputs) -> jnp.ndarray: """Imitation learning loss.""" losses = self.loss_info(step_types, rewards, discounts, observations, step_outputs) return losses["total_loss"] def shared_rep( self, initial_observation: Dict[str, jnp.ndarray] ) -> Tuple[Dict[str, jnp.ndarray], jnp.ndarray]: """Processing shared by all units that require an order. Encodes board state, season and previous moves and implements. Computes value head. Args: initial_observation: see initial_observation_spec. Returns: value information and shared board representation. """ # Stitch together board current situation and past moves. season = initial_observation["season"] build_numbers = initial_observation["build_numbers"] board = initial_observation["board_state"] last_moves = initial_observation["last_moves_phase_board_state"] moves_actions = jnp.sum( self._moves_actions_encoder( 1 + initial_observation["actions_since_last_moves_phase"]), axis=-2) last_moves = jnp.concatenate([last_moves, moves_actions], axis=-1) # Compute board representation. # [B, PLAYERS, NUM_AREAS, REP_SIZE] board_representation = self._board_encoder( board, season, build_numbers, is_training=self._is_training) last_moves_representation = self._last_moves_encoder( last_moves, season, build_numbers, is_training=self._is_training) area_representation = jnp.concatenate( [board_representation, last_moves_representation], axis=-1) # Compute value head. value_logits = self._value_mlp(jnp.mean(area_representation, axis=(1, 2))) return (collections.OrderedDict( value_logits=value_logits, values=jax.nn.softmax(value_logits)), area_representation) def initial_inference( self, shared_rep: jnp.ndarray, player: jnp.ndarray ) -> Tuple[Dict[str, jnp.ndarray], Any]: """Set up initial state to implement inter-unit dependence.""" batch_size = shared_rep.shape[0] return (jax.vmap(functools.partial(jnp.take, axis=0))(shared_rep, player.squeeze(1)), self._rnn.initial_state(batch_size=batch_size)) def step_inference( self, step_observation: Dict[str, jnp.ndarray], inference_internal_state: Any, all_teacher_forcing: bool = False ) -> Tuple[Dict[str, jnp.ndarray], Tuple[jnp.ndarray, Any]]: """Computes logits for 1 unit that requires order. Args: step_observation: see step_observation_spec. inference_internal_state: Board representation for each player and RelationalOrderDecoder previous state. all_teacher_forcing: Whether to leave sampled actions out of the inference state (for a learning speed boost). Returns: action information for this unit, updated inference_internal_state """ area_representation, rnn_state = inference_internal_state average_area_representation = jnp.matmul( step_observation["areas"][:, None].astype(np.float32), area_representation).squeeze(1) / utils.NUM_AREAS inputs = RecurrentOrderNetworkInput( average_area_representation=average_area_representation, legal_actions_mask=step_observation["legal_actions_mask"], teacher_forcing=step_observation["last_action"] != 0, previous_teacher_forcing_action=step_observation["last_action"], temperature=step_observation["temperature"], ) logits, updated_rnn_state = self._rnn( inputs, rnn_state, is_training=self._is_training) policy = jax.nn.softmax(logits) legal_action_mask = logits > jnp.finfo(jnp.float32).min actions = jnp.take_along_axis( jnp.asarray(action_utils.shrink_actions( action_utils.POSSIBLE_ACTIONS))[None], updated_rnn_state.sampled_action_index[..., None], axis=1).squeeze(1) if all_teacher_forcing: updated_rnn_state = updated_rnn_state._replace( sampled_action_index=jnp.zeros_like( updated_rnn_state.sampled_action_index)) next_inference_internal_state = (area_representation, updated_rnn_state) return collections.OrderedDict( actions=actions, legal_action_mask=legal_action_mask, policy=policy, logits=logits), next_inference_internal_state def inference( self, observation: Tuple[Dict[str, jnp.ndarray], Dict[str, jnp.ndarray], jnp.ndarray], num_copies_each_observation: Optional[Tuple[int]] = None, all_teacher_forcing: bool = False, ) -> Tuple[Dict[str, jnp.ndarray], Dict[str, jnp.ndarray]]: """Computes value estimates and actions for the full turn. Args: observation: see observation_spec. num_copies_each_observation: How many times to copy each observation. This allows us to produce multiple samples for the same state without recalculating the deterministic part of the Network. all_teacher_forcing: Whether to leave sampled actions out of the inference state (for a learning speed boost). Returns: Value and action information for the full turn. """ initial_observation, step_observations, seq_lengths = observation num_players = int(seq_lengths.shape[1]) initial_outputs, shared_rep = self.shared_rep(initial_observation) initial_inference_states_list = [] batch_dim = jnp.shape(seq_lengths)[0] for player in range(num_players): player_tensor = jnp.full((1, 1), player) player_tensor = jnp.tile(player_tensor, (batch_dim, 1)) initial_inference_states_list.append( self.initial_inference(shared_rep, player_tensor)) initial_inference_states = tree.map_structure( lambda *x: jnp.stack(x, axis=1), *initial_inference_states_list) rnn_inputs = (step_observations, seq_lengths, initial_inference_states) # Replicate the inputs to the RNN according to num_copies_each_observation. if num_copies_each_observation is not None: num_copies = np.array(num_copies_each_observation) rnn_inputs = tree.map_structure( lambda x: jnp.repeat(x, num_copies, axis=0), rnn_inputs) def _apply_rnn_one_player( player_step_observations, # [B, 17, ...] player_sequence_length, # [B] player_initial_state): # [B] player_step_observations = tree.map_structure(jnp.asarray, player_step_observations) def apply_one_step(state, i): output, next_state = self.step_inference( tree.map_structure(lambda x: x[:, i], player_step_observations), state, all_teacher_forcing=all_teacher_forcing) def update(x, y, i=i): return jnp.where( i >= player_sequence_length[np.s_[:,] + (None,) * (x.ndim - 1)], x, y) state = tree.map_structure(update, state, next_state) zero_output = tree.map_structure(jnp.zeros_like, output) return state, tree.map_structure(update, zero_output, output) _, outputs = hk.scan(apply_one_step, player_initial_state, jnp.arange(action_utils.MAX_ORDERS)) return tree.map_structure(lambda x: x.swapaxes(0, 1), outputs) outputs = hk.BatchApply(_apply_rnn_one_player)(*rnn_inputs) return initial_outputs, outputs
"""Search task tests""" # pylint: disable=redefined-outer-name,unused-argument from django.conf import settings from praw.exceptions import PRAWException from prawcore.exceptions import PrawcoreException, NotFound import pytest from channels.factories.models import CommentFactory, PostFactory from channels.models import Post from channels.constants import LINK_TYPE_LINK, LINK_TYPE_SELF from course_catalog.constants import PlatformType from course_catalog.factories import ( CourseFactory, ProgramFactory, VideoFactory, UserListFactory, ContentFileFactory, LearningResourceRunFactory, PodcastFactory, PodcastEpisodeFactory, ) from open_discussions.factories import UserFactory from open_discussions.test_utils import assert_not_raises from search.api import ( gen_course_id, gen_program_id, gen_video_id, gen_user_list_id, gen_profile_id, gen_content_file_id, gen_podcast_id, gen_podcast_episode_id, ) from search.constants import ( COURSE_TYPE, POST_TYPE, PROGRAM_TYPE, COMMENT_TYPE, VALID_OBJECT_TYPES, VIDEO_TYPE, USER_LIST_TYPE, PROFILE_TYPE, PODCAST_TYPE, PODCAST_EPISODE_TYPE, RESOURCE_FILE_TYPE, ) from search.exceptions import ReindexException, RetryException from search.serializers import ( ESCourseSerializer, ESProgramSerializer, ESVideoSerializer, ESUserListSerializer, ESProfileSerializer, ESContentFileSerializer, ESPodcastSerializer, ESPodcastEpisodeSerializer, ) from search import tasks from search.tasks import ( create_document, create_post_document, update_link_post_with_preview, update_document_with_partial, finish_recreate_index, increment_document_integer_field, update_field_values_by_query, index_posts, start_recreate_index, wrap_retry_exception, index_comments, index_courses, index_videos, delete_document, upsert_course, upsert_program, upsert_video, upsert_user_list, upsert_profile, upsert_content_file, index_course_content_files, index_run_content_files, delete_run_content_files, upsert_podcast, upsert_podcast_episode, start_update_index, ) pytestmark = pytest.mark.django_db @pytest.fixture() def wrap_retry_mock(mocker): """ Patches the wrap_retry_exception context manager and asserts that it was called by any test that uses it """ wrap_mock = mocker.patch("search.tasks.wrap_retry_exception") yield wrap_mock.assert_called_once_with(PrawcoreException, PRAWException) @pytest.fixture() def mocked_api(mocker): """Mock object that patches the channels API""" return mocker.patch("search.tasks.api") def test_create_document_task(mocked_api): """Test that the create_document task calls the indexing API function with the right args""" indexing_api_args = ("doc_id", {"test": "data"}) create_document(*indexing_api_args) assert mocked_api.create_document.call_count == 1 assert mocked_api.create_document.call_args[0] == indexing_api_args def test_update_document_with_partial_task(mocked_api): """Test that the create_document task calls the indexing API function with the right args""" indexing_api_args = ("doc_id", {"test": "data"}, COMMENT_TYPE) update_document_with_partial(*indexing_api_args) assert mocked_api.update_document_with_partial.call_count == 1 assert mocked_api.update_document_with_partial.call_args[0] == indexing_api_args def test_upsert_course_task(mocked_api): """Test that upsert_course will serialize the course data and upsert it to the ES index""" course = CourseFactory.create() upsert_course(course.id) data = ESCourseSerializer(course).data mocked_api.upsert_document.assert_called_once_with( gen_course_id(course.platform, course.course_id), data, COURSE_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_upsert_program_task(mocked_api): """Test that upsert_program will serialize the video data and upsert it to the ES index""" program = ProgramFactory.create() upsert_program(program.id) data = ESProgramSerializer(program).data mocked_api.upsert_document.assert_called_once_with( gen_program_id(program), data, PROGRAM_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_upsert_video_task(mocked_api): """Test that upsert_video will serialize the video data and upsert it to the ES index""" video = VideoFactory.create() upsert_video(video.id) video_data = ESVideoSerializer(video).data mocked_api.upsert_document.assert_called_once_with( gen_video_id(video), video_data, VIDEO_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_upsert_user_list_task(mocked_api): """Test that upsert_user_list will serialize the UserList data and upsert it to the ES index""" user_list = UserListFactory.create() upsert_user_list(user_list.id) data = ESUserListSerializer(user_list).data mocked_api.upsert_document.assert_called_once_with( gen_user_list_id(user_list), data, USER_LIST_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_upsert_podcast_task(mocked_api): """Test that upsert_podcast will serialize the podcast data and upsert it to the ES index""" podcast = PodcastFactory.create() upsert_podcast(podcast.id) podcast_data = ESPodcastSerializer(podcast).data mocked_api.upsert_document.assert_called_once_with( gen_podcast_id(podcast), podcast_data, PODCAST_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_upsert_podcast_episode_task(mocked_api): """Test that upsert_podcast_episode will serialize the podcast episode data and upsert it to the ES index""" podcast_episode = PodcastEpisodeFactory.create() upsert_podcast_episode(podcast_episode.id) podcast_episode_data = ESPodcastEpisodeSerializer(podcast_episode).data mocked_api.upsert_document.assert_called_once_with( gen_podcast_episode_id(podcast_episode), podcast_episode_data, PODCAST_EPISODE_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_increment_document_integer_field_task(mocked_api): """ Test that the increment_document_integer_field task calls the indexing API function with the right args """ indexing_api_args = ("doc_id", {"test": "data"}, 1, POST_TYPE) increment_document_integer_field(*indexing_api_args) assert mocked_api.increment_document_integer_field.call_count == 1 assert mocked_api.increment_document_integer_field.call_args[0] == indexing_api_args def test_update_field_values_by_query(mocked_api): """ Test that the update_field_values_by_query task calls the indexing API function with the right args """ indexing_api_args = ({"query": {}}, {"field1": "value1"}, [POST_TYPE]) update_field_values_by_query(*indexing_api_args) assert mocked_api.update_field_values_by_query.call_count == 1 assert mocked_api.update_field_values_by_query.call_args[0] == indexing_api_args @pytest.mark.parametrize( "post_type,post_url,exp_update_link_post", [[LINK_TYPE_LINK, "example.com", True], [LINK_TYPE_SELF, None, False]], ) def test_create_post_document(mocker, post_type, post_url, exp_update_link_post): """ Test that the create_post_document task calls the API method to create a post document, and for link posts, also calls the API method to fetch preview data and update the post """ create_document_mock = mocker.patch("search.tasks.create_document") update_link_post_mock = mocker.patch("search.tasks.update_link_post_with_preview") indexing_api_args = ( "doc_id", {"post_id": "a", "post_type": post_type, "post_link_url": post_url}, ) create_post_document(*indexing_api_args) assert create_document_mock.si.call_count == 1 assert create_document_mock.si.call_args[0] == indexing_api_args assert update_link_post_mock.si.called is exp_update_link_post @pytest.mark.parametrize( "resp_content,resp_description,exp_preview_text", [ ["<a> content", None, "content"], ["", "description", "description"], [None, "description", "description"], ], ) def test_update_link_post_with_preview( mocker, mocked_api, resp_content, resp_description, exp_preview_text ): """ Test that update_link_post_with_preview fetches embedly content and updates the given post in the database and in ES """ get_embedly_content_mock = mocker.patch("search.tasks.get_embedly_content") get_embedly_content_mock.return_value.json.return_value = { "content": resp_content, "description": resp_description, } post_data = {"post_id": "a", "post_link_url": "example.com"} post = PostFactory.create(post_id=post_data["post_id"]) update_link_post_with_preview("abc", post_data) assert get_embedly_content_mock.call_args[0][0] == post_data["post_link_url"] assert Post.objects.get(id=post.id).preview_text == exp_preview_text assert mocked_api.update_post.call_count == 1 @pytest.mark.parametrize("error", [KeyError, NotFound]) def test_wrap_retry_exception(error): """wrap_retry_exception should raise RetryException when other exceptions are raised""" with assert_not_raises(): with wrap_retry_exception(error): # Should not raise an exception pass @pytest.mark.parametrize("matching", [True, False]) def test_wrap_retry_exception_matching(matching): """A matching exception should raise a RetryException""" class SubError(KeyError): """Use a subclass to assert isinstance use""" def raise_thing(): """raise the exception""" if matching: raise SubError() else: raise TabError() matching_exception = RetryException if matching else TabError with pytest.raises(matching_exception): with wrap_retry_exception(KeyError): raise_thing() @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize("update_only", [True, False]) def test_index_posts( mocker, wrap_retry_mock, with_error, update_only ): # pylint: disable=unused-argument """index_post should call the api function of the same name""" index_post_mock = mocker.patch("search.indexing_api.index_posts") if with_error: index_post_mock.side_effect = TabError post_ids = [1, 2, 3] result = index_posts.delay(post_ids, update_only).get() assert result == ("index_posts threw an error" if with_error else None) index_post_mock.assert_called_once_with(post_ids, update_only) @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize("update_only", [True, False]) def test_index_comments( mocker, wrap_retry_mock, with_error, update_only ): # pylint: disable=unused-argument """index_comments should call the api function of the same name""" index_comments_mock = mocker.patch("search.indexing_api.index_comments") if with_error: index_comments_mock.side_effect = TabError post_ids = [1, 2, 3] result = index_comments.delay(post_ids, update_only).get() assert result == ("index_comments threw an error" if with_error else None) index_comments_mock.assert_called_once_with(post_ids, update_only) @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize("update_only", [True, False]) def test_index_videos( mocker, with_error, update_only ): # pylint: disable=unused-argument """index_videos should call the api function of the same name""" index_videos_mock = mocker.patch("search.indexing_api.index_videos") if with_error: index_videos_mock.side_effect = TabError result = index_videos.delay([1, 2, 3], update_only).get() assert result == ("index_videos threw an error" if with_error else None) index_videos_mock.assert_called_once_with([1, 2, 3], update_only) @pytest.mark.parametrize( "indexes", [ ["post", "comment", "profile"], ["course", "program"], ["userlist"], ["video"], ["podcast", "podcastepisode"], ], ) def test_start_recreate_index( mocker, mocked_celery, user, indexes ): # pylint:disable=too-many-locals,too-many-statements """ recreate_index should recreate the elasticsearch index and reindex all data with it """ settings.INDEXING_API_USERNAME = user.username settings.ELASTICSEARCH_INDEXING_CHUNK_SIZE = 2 mock_blocklist = mocker.patch("search.tasks.load_course_blocklist", return_value=[]) UserFactory.create_batch( 4, is_active=False ) # these should not show up in the indexing comments = sorted(CommentFactory.create_batch(4), key=lambda comment: comment.id) posts = sorted([comment.post for comment in comments], key=lambda post: post.id) users = sorted([item.author for item in posts + comments], key=lambda user: user.id) platforms = [ PlatformType.ocw, PlatformType.mitx, PlatformType.xpro, PlatformType.micromasters, PlatformType.bootcamps, PlatformType.oll, PlatformType.youtube, ] courses = sorted( [CourseFactory.create(platform=platform.value) for platform in platforms], key=lambda course: course.id, ) videos = sorted(VideoFactory.create_batch(4), key=lambda video: video.id) podcasts = sorted(PodcastFactory.create_batch(4), key=lambda podcast: podcast.id) podcast_episodes = sorted( PodcastEpisodeFactory.create_batch(4), key=lambda podcast_episode: podcast_episode.id, ) index_posts_mock = mocker.patch("search.tasks.index_posts", autospec=True) index_comments_mock = mocker.patch("search.tasks.index_comments", autospec=True) index_profiles_mock = mocker.patch("search.tasks.index_profiles", autospec=True) index_courses_mock = mocker.patch("search.tasks.index_courses", autospec=True) index_videos_mock = mocker.patch("search.tasks.index_videos", autospec=True) index_podcasts_mock = mocker.patch("search.tasks.index_podcasts", autospec=True) index_podcast_episodes_mock = mocker.patch( "search.tasks.index_podcast_episodes", autospec=True ) index_course_content_mock = mocker.patch( "search.tasks.index_course_content_files", autospec=True ) backing_index = "backing" create_backing_index_mock = mocker.patch( "search.indexing_api.create_backing_index", autospec=True, return_value=backing_index, ) finish_recreate_index_mock = mocker.patch( "search.tasks.finish_recreate_index", autospec=True ) with pytest.raises(mocked_celery.replace_exception_class): start_recreate_index.delay(indexes) finish_recreate_index_dict = {} for doctype in VALID_OBJECT_TYPES: if doctype in indexes: finish_recreate_index_dict[doctype] = backing_index create_backing_index_mock.assert_any_call(doctype) finish_recreate_index_mock.s.assert_called_once_with(finish_recreate_index_dict) assert mocked_celery.group.call_count == 1 # Celery's 'group' function takes a generator as an argument. In order to make assertions about the items # in that generator, 'list' is being called to force iteration through all of those items. list(mocked_celery.group.call_args[0][0]) if POST_TYPE in indexes: assert index_posts_mock.si.call_count == 2 index_posts_mock.si.assert_any_call([posts[0].id, posts[1].id]) index_posts_mock.si.assert_any_call([posts[2].id, posts[3].id]) if COMMENT_TYPE in indexes: assert index_comments_mock.si.call_count == 2 index_comments_mock.si.assert_any_call([comments[0].id, comments[1].id]) index_comments_mock.si.assert_any_call([comments[2].id, comments[3].id]) if PROFILE_TYPE in indexes: assert index_profiles_mock.si.call_count == 4 for offset in range(4): index_profiles_mock.si.assert_any_call( [users[offset * 2].profile.id, users[offset * 2 + 1].profile.id] ) if COURSE_TYPE in indexes: mock_blocklist.assert_called_once() assert index_courses_mock.si.call_count == 4 index_courses_mock.si.assert_any_call([courses[0].id, courses[1].id]) index_courses_mock.si.assert_any_call([courses[2].id, courses[3].id]) index_courses_mock.si.assert_any_call([courses[4].id, courses[5].id]) index_courses_mock.si.assert_any_call([courses[6].id]) # chunk size is 2 and there is only one course each for ocw and xpro assert index_course_content_mock.si.call_count == 1 index_course_content_mock.si.assert_any_call( [ *[ course.id for course in courses if course.platform == PlatformType.ocw.value ], *[ course.id for course in courses if course.platform == PlatformType.xpro.value ], ] ) if VIDEO_TYPE in indexes: assert index_videos_mock.si.call_count == 2 index_videos_mock.si.assert_any_call([videos[0].id, videos[1].id]) index_videos_mock.si.assert_any_call([videos[2].id, videos[3].id]) if PODCAST_TYPE in indexes: assert index_podcasts_mock.si.call_count == 4 index_podcasts_mock.si.assert_any_call([podcasts[0].id, podcasts[1].id]) index_podcasts_mock.si.assert_any_call([podcasts[2].id, podcasts[3].id]) if PODCAST_EPISODE_TYPE in indexes: assert index_podcast_episodes_mock.si.call_count == 2 index_podcast_episodes_mock.si.assert_any_call( [podcast_episodes[0].id, podcast_episodes[1].id] ) index_podcast_episodes_mock.si.assert_any_call( [podcast_episodes[2].id, podcast_episodes[3].id] ) assert mocked_celery.replace.call_count == 1 assert mocked_celery.replace.call_args[0][1] == mocked_celery.chain.return_value @pytest.mark.parametrize("with_error", [True, False]) def test_finish_recreate_index(mocker, with_error): """ finish_recreate_index should attach the backing index to the default alias """ backing_indices = {"post": "backing", "comment": "backing", "profile": "backing"} results = ["error"] if with_error else [] switch_indices_mock = mocker.patch( "search.indexing_api.switch_indices", autospec=True ) if with_error: with pytest.raises(ReindexException): finish_recreate_index.delay(results, backing_indices) assert switch_indices_mock.call_count == 0 else: finish_recreate_index.delay(results, backing_indices) switch_indices_mock.assert_any_call("backing", POST_TYPE) switch_indices_mock.assert_any_call("backing", COMMENT_TYPE) @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize("update_only", [True, False]) def test_index_courses(mocker, with_error, update_only): """index_courses should call the api function of the same name""" index_courses_mock = mocker.patch("search.indexing_api.index_courses") if with_error: index_courses_mock.side_effect = TabError result = index_courses.delay([1, 2, 3], update_only).get() assert result == ("index_courses threw an error" if with_error else None) index_courses_mock.assert_called_once_with([1, 2, 3], update_only) def test_delete_document(mocker): """delete_document should call the api function of the same name""" delete_document_mock = mocker.patch("search.indexing_api.delete_document") delete_document.delay(1, "course").get() delete_document_mock.assert_called_once_with(1, "course") @pytest.mark.parametrize("is_indexing_user", [True, False]) def test_upsert_profile_task(mocked_api, user, settings, is_indexing_user): """Test that upsert_profile will serialize the profile data and upsert it to the ES index""" if is_indexing_user: user.username = settings.INDEXING_API_USERNAME user.save() upsert_profile(user.profile.id) if is_indexing_user: mocked_api.upsert_document.assert_not_called() else: data = ESProfileSerializer().serialize(user.profile) mocked_api.upsert_document.assert_called_once_with( gen_profile_id(user.username), data, PROFILE_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, ) def test_upsert_content_file_task(mocked_api): """Test that upsert_content_file will serialize the content file data and upsert it to the ES index""" content_file = ContentFileFactory.create( run=LearningResourceRunFactory.create(platform=PlatformType.ocw.value) ) course = content_file.run.content_object upsert_content_file(content_file.id) data = ESContentFileSerializer(content_file).data mocked_api.upsert_document.assert_called_once_with( gen_content_file_id(content_file.key), data, COURSE_TYPE, retry_on_conflict=settings.INDEXING_ERROR_RETRIES, routing=gen_course_id(course.platform, course.course_id), ) @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize("update_only", [True, False]) def test_index_course_content_files(mocker, with_error, update_only): """index_course_content_files should call the api function of the same name""" index_content_files_mock = mocker.patch( "search.indexing_api.index_course_content_files" ) if with_error: index_content_files_mock.side_effect = TabError result = index_course_content_files.delay([1, 2, 3], update_only).get() assert result == ( "index_course_content_files threw an error" if with_error else None ) index_content_files_mock.assert_called_once_with([1, 2, 3], update_only) @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize("update_only", [True, False]) def test_index_run_content_files(mocker, with_error, update_only): """index_run_content_files should call the api function of the same name""" index_run_content_files_mock = mocker.patch( "search.indexing_api.index_run_content_files" ) if with_error: index_run_content_files_mock.side_effect = TabError result = index_run_content_files.delay(1, update_only).get() assert result == ("index_run_content_files threw an error" if with_error else None) index_run_content_files_mock.assert_called_once_with(1, update_only) @pytest.mark.parametrize("with_error", [True, False]) def test_delete_run_content_files(mocker, with_error): """delete_run_content_files should call the api function of the same name""" delete_run_content_files_mock = mocker.patch( "search.indexing_api.delete_run_content_files" ) if with_error: delete_run_content_files_mock.side_effect = TabError result = delete_run_content_files.delay(1).get() assert result == ("delete_run_content_files threw an error" if with_error else None) delete_run_content_files_mock.assert_called_once_with(1) @pytest.mark.parametrize("with_error", [True, False]) @pytest.mark.parametrize( "tasks_func_name, indexing_func_name", [ ("bulk_delete_profiles", "delete_profiles"), ("bulk_delete_courses", "delete_courses"), ("bulk_delete_programs", "delete_programs"), ("bulk_delete_user_lists", "delete_user_lists"), ("bulk_delete_videos", "delete_videos"), ("bulk_delete_podcasts", "delete_podcasts"), ("bulk_delete_podcast_episodes", "delete_podcast_episodes"), ], ) def test_bulk_deletion_tasks(mocker, with_error, tasks_func_name, indexing_func_name): """bulk deletion tasks should call corresponding indexing api function""" indexing_api_task_mock = mocker.patch(f"search.indexing_api.{indexing_func_name}") task = getattr(tasks, tasks_func_name) if with_error: indexing_api_task_mock.side_effect = TabError result = task.delay([1]).get() assert result == (f"{tasks_func_name} threw an error" if with_error else None) indexing_api_task_mock.assert_called_once_with([1]) @pytest.mark.parametrize( "indexes, platform", [ ( [ "post", "comment", "profile", "program", "video", "podcast", "podcastepisode", ], None, ), (["course", "resourcefile"], None), (["course", "resourcefile"], PlatformType.ocw.value), (["course", "resourcefile"], PlatformType.mitx.value), (["course", "resourcefile"], PlatformType.xpro.value), ], ) def test_start_update_index( mocker, mocked_celery, user, indexes, platform ): # pylint:disable=too-many-locals,too-many-statements,too-many-branches """ recreate_index should recreate the elasticsearch index and reindex all data with it """ settings.INDEXING_API_USERNAME = user.username settings.ELASTICSEARCH_INDEXING_CHUNK_SIZE = 2 mock_blocklist = mocker.patch("search.tasks.load_course_blocklist", return_value=[]) inactive_users = UserFactory.create_batch(4, is_active=False) comments = sorted(CommentFactory.create_batch(4), key=lambda comment: comment.id) posts = sorted([comment.post for comment in comments], key=lambda post: post.id) users = sorted([item.author for item in posts + comments], key=lambda user: user.id) platforms = [ PlatformType.ocw, PlatformType.mitx, PlatformType.xpro, PlatformType.micromasters, PlatformType.bootcamps, PlatformType.oll, PlatformType.youtube, ] courses = sorted( [CourseFactory.create(platform=platform.value) for platform in platforms], key=lambda course: course.id, ) unpublished_courses = sorted( [ CourseFactory.create(platform=platform.value, published=False) for platform in platforms ], key=lambda course: course.id, ) videos = sorted(VideoFactory.create_batch(4), key=lambda video: video.id) unpublished_video = VideoFactory.create(published=False) podcasts = sorted(PodcastFactory.create_batch(4), key=lambda podcast: podcast.id) unpublished_podcast = PodcastFactory.create(published=False) podcast_episodes = sorted( PodcastEpisodeFactory.create_batch(4), key=lambda podcast_episode: podcast_episode.id, ) unpublished_podcast_episode = PodcastEpisodeFactory.create(published=False) index_posts_mock = mocker.patch("search.tasks.index_posts", autospec=True) index_comments_mock = mocker.patch("search.tasks.index_comments", autospec=True) index_profiles_mock = mocker.patch("search.tasks.index_profiles", autospec=True) delete_profiles_mock = mocker.patch( "search.tasks.bulk_delete_profiles", autospec=True ) index_courses_mock = mocker.patch("search.tasks.index_courses", autospec=True) delete_courses_mock = mocker.patch( "search.tasks.bulk_delete_courses", autospec=True ) index_videos_mock = mocker.patch("search.tasks.index_videos", autospec=True) delete_videos_mock = mocker.patch("search.tasks.bulk_delete_videos", autospec=True) index_podcasts_mock = mocker.patch("search.tasks.index_podcasts", autospec=True) delete_podcasts_mock = mocker.patch( "search.tasks.bulk_delete_podcasts", autospec=True ) index_podcast_episodes_mock = mocker.patch( "search.tasks.index_podcast_episodes", autospec=True ) delete_podcast_episodes_mock = mocker.patch( "search.tasks.bulk_delete_podcast_episodes", autospec=True ) index_course_content_mock = mocker.patch( "search.tasks.index_course_content_files", autospec=True ) with pytest.raises(mocked_celery.replace_exception_class): start_update_index.delay(indexes, platform) assert mocked_celery.group.call_count == 1 # Celery's 'group' function takes a generator as an argument. In order to make assertions about the items # in that generator, 'list' is being called to force iteration through all of those items. list(mocked_celery.group.call_args[0][0]) if POST_TYPE in indexes: assert index_posts_mock.si.call_count == 2 index_posts_mock.si.assert_any_call([posts[0].id, posts[1].id], True) index_posts_mock.si.assert_any_call([posts[2].id, posts[3].id], True) if COMMENT_TYPE in indexes: assert index_comments_mock.si.call_count == 2 index_comments_mock.si.assert_any_call([comments[0].id, comments[1].id], True) index_comments_mock.si.assert_any_call([comments[2].id, comments[3].id], True) if PROFILE_TYPE in indexes: assert index_profiles_mock.si.call_count == 4 for offset in range(4): index_profiles_mock.si.assert_any_call( [users[offset * 2].profile.id, users[offset * 2 + 1].profile.id], True ) assert delete_profiles_mock.si.call_count == 2 for offset in range(2): delete_profiles_mock.si.assert_any_call( [ inactive_users[offset * 2].profile.id, inactive_users[offset * 2 + 1].profile.id, ] ) if COURSE_TYPE in indexes: mock_blocklist.assert_called_once() if platform: assert index_courses_mock.si.call_count == 1 course = next(course for course in courses if course.platform == platform) index_courses_mock.si.assert_any_call([course.id], True) assert delete_courses_mock.si.call_count == 1 unpublished_course = next( course for course in unpublished_courses if course.platform == platform ) delete_courses_mock.si.assert_any_call([unpublished_course.id]) else: assert index_courses_mock.si.call_count == 4 index_courses_mock.si.assert_any_call([courses[0].id, courses[1].id], True) index_courses_mock.si.assert_any_call([courses[2].id, courses[3].id], True) index_courses_mock.si.assert_any_call([courses[4].id, courses[5].id], True) index_courses_mock.si.assert_any_call([courses[6].id], True) assert delete_courses_mock.si.call_count == 4 delete_courses_mock.si.assert_any_call( [unpublished_courses[0].id, unpublished_courses[1].id] ) delete_courses_mock.si.assert_any_call( [unpublished_courses[2].id, unpublished_courses[3].id] ) delete_courses_mock.si.assert_any_call( [unpublished_courses[4].id, unpublished_courses[5].id] ) delete_courses_mock.si.assert_any_call([unpublished_courses[6].id]) if RESOURCE_FILE_TYPE in indexes: if platform in (PlatformType.ocw.value, PlatformType.xpro.value): assert index_course_content_mock.si.call_count == 1 course = next(course for course in courses if course.platform == platform) index_course_content_mock.si.assert_any_call([course.id], True) elif platform: assert index_course_content_mock.si.call_count == 0 else: assert index_course_content_mock.si.call_count == 1 index_course_content_mock.si.assert_any_call( [ *[ course.id for course in courses if course.platform == PlatformType.ocw.value ], *[ course.id for course in courses if course.platform == PlatformType.xpro.value ], ], True, ) if VIDEO_TYPE in indexes: assert index_videos_mock.si.call_count == 2 index_videos_mock.si.assert_any_call([videos[0].id, videos[1].id], True) index_videos_mock.si.assert_any_call([videos[2].id, videos[3].id], True) assert delete_videos_mock.si.call_count == 1 delete_videos_mock.si.assert_any_call([unpublished_video.id]) if PODCAST_TYPE in indexes: assert index_podcasts_mock.si.call_count == 5 index_podcasts_mock.si.assert_any_call([podcasts[0].id, podcasts[1].id], True) index_podcasts_mock.si.assert_any_call([podcasts[2].id, podcasts[3].id], True) assert delete_podcasts_mock.si.call_count == 1 delete_podcasts_mock.si.assert_any_call([unpublished_podcast.id]) if PODCAST_EPISODE_TYPE in indexes: assert index_podcast_episodes_mock.si.call_count == 2 index_podcast_episodes_mock.si.assert_any_call( [podcast_episodes[0].id, podcast_episodes[1].id], True ) index_podcast_episodes_mock.si.assert_any_call( [podcast_episodes[2].id, podcast_episodes[3].id], True ) assert delete_podcast_episodes_mock.si.call_count == 1 delete_podcast_episodes_mock.si.assert_any_call( [unpublished_podcast_episode.id] ) assert mocked_celery.replace.call_count == 1 assert mocked_celery.replace.call_args[0][1] == mocked_celery.group.return_value
"""Tests related to file-writing functionality.""" import shutil import os from os.path import join, exists, islink, dirname import pytest from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from bids.layout.writing import build_path, _PATTERN_FIND from bids.tests import get_test_data_path from bids import BIDSLayout from bids.layout.models import BIDSFile, Entity, Tag, Base @pytest.fixture def writable_file(tmpdir): engine = create_engine('sqlite://') Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() testfile = 'sub-03_ses-2_task-rest_acq-fullbrain_run-2_bold.nii.gz' fn = tmpdir.mkdir("tmp").join(testfile) fn.write('###') bf = BIDSFile(os.path.join(str(fn))) tag_dict = { 'task': 'rest', 'run': 2, 'subject': '3' } ents = {name: Entity(name) for name in tag_dict.keys()} tags = [Tag(bf, ents[k], value=v) for k, v in tag_dict.items()] session.add_all(list(ents.values()) + tags + [bf]) session.commit() return bf @pytest.fixture(scope='module') def tmp_bids(tmpdir_factory): tmp_bids = tmpdir_factory.mktemp("tmp_bids") yield tmp_bids shutil.rmtree(str(tmp_bids)) # Ugly hack try: shutil.rmtree(join(get_test_data_path(), '7t_trt', 'sub-Bob')) except: pass @pytest.fixture(scope='module') def layout(tmp_bids): orig_dir = join(get_test_data_path(), '7t_trt') # return BIDSLayout(data_dir, absolute_paths=False) new_dir = join(str(tmp_bids), 'bids') os.symlink(orig_dir, new_dir) return BIDSLayout(new_dir) class TestWritableFile: def test_parse_pattern_re(self): """Unit tests on the strict entity pattern finder regex.""" assert _PATTERN_FIND.findall('{extension<nii|nii.gz|json>|nii.gz}') == [ ('{extension<nii|nii.gz|json>|nii.gz}', 'extension', 'nii|nii.gz|json', 'nii.gz') ] assert _PATTERN_FIND.findall('{extension<.nii|.nii.gz|.json>|.nii.gz}') == [ ('{extension<.nii|.nii.gz|.json>|.nii.gz}', 'extension', '.nii|.nii.gz|.json', '.nii.gz') ] assert _PATTERN_FIND.findall('{extension<json|jsld>|json}') == [ ('{extension<json|jsld>|json}', 'extension', 'json|jsld', 'json') ] assert _PATTERN_FIND.findall('{task<func|rest>}/r-{run}.nii.gz') == [ ('{task<func|rest>}', 'task', 'func|rest', ''), ('{run}', 'run', '', '') ] pattern = """\ sub-{subject}[/ses-{session}]/anat/sub-{subject}[_ses-{session}][_acq-{acquisition}][_ce-{ceagent}][_rec-{reconstruction}]\ [_space-{space}]_{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio>}.\ {extension<nii|nii.gz|json>|nii.gz}""" assert sorted(_PATTERN_FIND.findall(pattern)) == [ ('{acquisition}', 'acquisition', '', ''), ('{ceagent}', 'ceagent', '', ''), ('{extension<nii|nii.gz|json>|nii.gz}', 'extension', 'nii|nii.gz|json', 'nii.gz'), ('{reconstruction}', 'reconstruction', '', ''), ('{session}', 'session', '', ''), ('{session}', 'session', '', ''), ('{space}', 'space', '', ''), ('{subject}', 'subject', '', ''), ('{subject}', 'subject', '', ''), ( '{suffix<T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|' 'PD|PDT2|inplaneT[12]|angio>}', 'suffix', 'T1w|T2w|T1rho|T1map|T2map|T2star|FLAIR|FLASH|PDmap|PD|PDT2|inplaneT[12]|angio', '' ) ] def test_build_path(self, writable_file): # Single simple pattern with pytest.raises(TypeError): build_path(writable_file.entities) pat = join(writable_file.dirname, '{task}/sub-{subject}/run-{run}.nii.gz') target = join(writable_file.dirname, 'rest/sub-3/run-2.nii.gz') assert build_path(writable_file.entities, pat) == target # Multiple simple patterns pats = ['{session}/{task}/r-{run}.nii.gz', 't-{task}/{subject}-{run}.nii.gz', '{subject}/{task}.nii.gz'] pats = [join(writable_file.dirname, p) for p in pats] target = join(writable_file.dirname, 't-rest/3-2.nii.gz') assert build_path(writable_file.entities, pats) == target # Pattern with optional entity pats = ['[{session}/]{task}/r-{run}.nii.gz', 't-{task}/{subject}-{run}.nii.gz'] pats = [join(writable_file.dirname, p) for p in pats] target = join(writable_file.dirname, 'rest/r-2.nii.gz') assert build_path(writable_file.entities, pats) == target # Pattern with conditional values pats = ['{task<func|acq>}/r-{run}.nii.gz', 't-{task}/{subject}-{run}.nii.gz'] pats = [join(writable_file.dirname, p) for p in pats] target = join(writable_file.dirname, 't-rest/3-2.nii.gz') assert build_path(writable_file.entities, pats) == target # Pattern with valid conditional values pats = ['{task<func|rest>}/r-{run}.nii.gz', 't-{task}/{subject}-{run}.nii.gz'] pats = [join(writable_file.dirname, p) for p in pats] target = join(writable_file.dirname, 'rest/r-2.nii.gz') assert build_path(writable_file.entities, pats) == target # Pattern with optional entity with conditional values pats = ['[{task<func|acq>}/]r-{run}.nii.gz', 't-{task}/{subject}-{run}.nii.gz'] pats = [join(writable_file.dirname, p) for p in pats] target = join(writable_file.dirname, 'r-2.nii.gz') assert build_path(writable_file.entities, pats) == target # Pattern with default value pats = ['ses-{session|A}/r-{run}.nii.gz'] assert build_path({'run': 3}, pats) == 'ses-A/r-3.nii.gz' # Pattern with both valid and default values pats = ['ses-{session<A|B|C|D>|D}/r-{run}.nii.gz'] assert build_path({'run': 3}, pats) == 'ses-D/r-3.nii.gz' pats = ['ses-{session<A|B|C|D>|D}/r-{run}.nii.gz'] assert build_path({'session': 'B', 'run': 3}, pats) == 'ses-B/r-3.nii.gz' # Test extensions with dot and warning is issued pats = ['ses-{session<A|B|C>|D}/r-{run}.{extension}'] with pytest.warns(UserWarning) as record: assert build_path({'session': 'B', 'run': 3, 'extension': '.nii'}, pats) == 'ses-B/r-3.nii' assert "defines an invalid default value" in record[0].message.args[0] # Test expansion of optional characters pats = ['ses-{session<[ABCD]>|D}/r-{run}.{extension}'] assert build_path({'session': 'B', 'run': 3, 'extension': '.nii'}, pats) == 'ses-B/r-3.nii' # Test default-only patterns are correctly overriden by setting entity entities = { 'subject': '01', 'extension': 'bvec', 'suffix': 'T1rho', } pats = ( "sub-{subject}[/ses-{session}]/{datatype|dwi}/sub-{subject}[_ses-{session}]" "[_acq-{acquisition}]_{suffix|dwi}.{extension<bval|bvec|json|nii.gz|nii>|nii.gz}" ) assert build_path(entities, pats) == 'sub-01/dwi/sub-01_T1rho.bvec' assert build_path(entities, pats, strict=True) == 'sub-01/dwi/sub-01_T1rho.bvec' # Test multiple paths pats = ['ses-{session<A|B|C>|D}/r-{run}.{extension<json|nii|nii.gz>|nii.gz}'] assert sorted( build_path({ 'session': ['A', 'B'], 'run': [1, 2], 'extension': ['.nii.gz', 'json'] }, pats)) == [ 'ses-A/r-1.json', 'ses-A/r-1.nii.gz', 'ses-A/r-2.json', 'ses-A/r-2.nii.gz', 'ses-B/r-1.json', 'ses-B/r-1.nii.gz', 'ses-B/r-2.json', 'ses-B/r-2.nii.gz', ] def test_strict_build_path(self): # Test with strict matching--should fail pats = ['[{session}/]{task}/r-{run}.nii.gz', 't-{task}/{subject}-{run}.nii.gz'] entities = {'subject': 1, 'task': "A", 'run': 2} assert build_path(entities, pats, True) entities = {'subject': 1, 'task': "A", 'age': 22} assert not build_path(entities, pats, True) def test_build_file(self, writable_file, tmp_bids, caplog): # Simple write out new_dir = join(writable_file.dirname, 'rest') pat = join(writable_file.dirname, '{task}/sub-{subject}/run-{run}.nii.gz') target = join(writable_file.dirname, 'rest/sub-3/run-2.nii.gz') writable_file.copy(pat) assert exists(target) # Conflict handling with pytest.raises(ValueError): writable_file.copy(pat) with pytest.raises(ValueError): writable_file.copy(pat, conflicts='fail') with pytest.warns(UserWarning) as record: writable_file.copy(pat, conflicts='skip') log_message = record[0].message.args[0] assert log_message == 'A file at path {} already exists, ' \ 'skipping writing file.'.format(target) writable_file.copy(pat, conflicts='append') append_target = join(writable_file.dirname, 'rest/sub-3/run-2_1.nii.gz') assert exists(append_target) writable_file.copy(pat, conflicts='overwrite') assert exists(target) shutil.rmtree(new_dir) # Symbolic linking writable_file.copy(pat, symbolic_link=True) assert islink(target) shutil.rmtree(new_dir) # Using different root root = str(tmp_bids.mkdir('tmp2')) pat = join(root, '{task}/sub-{subject}/run-{run}.nii.gz') target = join(root, 'rest/sub-3/run-2.nii.gz') writable_file.copy(pat, root=root) assert exists(target) # Copy into directory functionality pat = join(writable_file.dirname, '{task}/') writable_file.copy(pat) target = join(writable_file.dirname, 'rest', writable_file.filename) assert exists(target) shutil.rmtree(new_dir) class TestWritableLayout: def test_write_files(self, tmp_bids, layout): tmpdir = str(tmp_bids) pat = join(str(tmpdir), 'sub-{subject<02>}' '/ses-{session}' '/r-{run}' '/suffix-{suffix}' '/acq-{acquisition}' '/task-{task}.nii.gz') layout.copy_files(path_patterns=pat) example_file = join(str(tmpdir), 'sub-02' '/ses-2' '/r-1' '/suffix-bold' '/acq-fullbrain' '/task-rest.nii.gz') example_file2 = join(str(tmpdir), 'sub-01' '/ses-2' '/r-1' '/suffix-bold' '/acq-fullbrain' '/task-rest.nii.gz') assert exists(example_file) assert not exists(example_file2) pat = join(str(tmpdir), 'sub-{subject<01>}' '/ses-{session}' '/r-{run}' '/suffix-{suffix}' '/task-{task}.nii.gz') example_file = join(str(tmpdir), 'sub-01' '/ses-2' '/r-1' '/suffix-bold' '/task-rest.nii.gz') # Should fail without the 'overwrite' because there are multiple # files that produce the same path. with pytest.raises(ValueError): layout.copy_files(path_patterns=pat) try: os.remove(example_file) except OSError: pass layout.copy_files(path_patterns=pat, conflicts='overwrite') assert exists(example_file) def test_write_to_file(self, tmp_bids, layout): contents = 'test' entities = {'subject': 'Bob', 'session': '01'} pat = join('sub-{subject}/ses-{session}/desc.txt') layout.write_to_file(entities, path_patterns=pat, contents=contents, validate=False) target = join(str(tmp_bids), 'bids', 'sub-Bob/ses-01/desc.txt') assert exists(target) with open(target) as f: written = f.read() assert written == contents assert target not in layout.files def test_write_to_file_defaults(self, tmp_bids, layout): contents = 'test' entities = {'subject': 'Bob', 'session': '01', 'run': '1', 'suffix': 'bold', 'task': 'test', 'acquisition': 'test', 'bval': 0} layout.write_to_file(entities, contents=contents) target = join(str(tmp_bids), 'bids', 'sub-Bob', 'ses-01', 'func', 'sub-Bob_ses-01_task-test_acq-test_run-1_bold.nii.gz') assert exists(target) with open(target) as f: written = f.read() assert written == contents def test_build_file_from_layout(self, tmpdir, layout): entities = {'subject': 'Bob', 'session': '01', 'run': '1'} pat = join(str(tmpdir), 'sub-{subject}' '/ses-{session}' '/r-{run}.nii.gz') path = layout.build_path(entities, path_patterns=pat, validate=False) assert path == join(str(tmpdir), 'sub-Bob/ses-01/r-1.nii.gz') data_dir = join(dirname(__file__), 'data', '7t_trt') filename = 'sub-04_ses-1_task-rest_acq-fullbrain_run-1_physio.tsv.gz' file = join('sub-04', 'ses-1', 'func', filename) path = layout.build_path(file, path_patterns=pat, validate=False) assert path.endswith('sub-04/ses-1/r-1.nii.gz')
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for sync_replicas_optimizer.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import portpicker from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent from tensorflow.python.training import server_lib from tensorflow.python.training import training def create_local_cluster(num_workers, num_ps, protocol="grpc"): """Create local GRPC servers and return them.""" worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)] ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)] cluster_dict = { "worker": ["localhost:%s" % port for port in worker_ports], "ps": ["localhost:%s" % port for port in ps_ports] } cs = server_lib.ClusterSpec(cluster_dict) workers = [ server_lib.Server( cs, job_name="worker", protocol=protocol, task_index=ix, start=True) for ix in range(num_workers) ] ps_servers = [ server_lib.Server( cs, job_name="ps", protocol=protocol, task_index=ix, start=True) for ix in range(num_ps) ] return workers, ps_servers # Creates the workers and return their sessions, graphs, train_ops. def get_workers(num_workers, replicas_to_aggregate, workers): sessions = [] graphs = [] train_ops = [] for worker_id in range(num_workers): graph = ops.Graph() is_chief = (worker_id == 0) with graph.as_default(): with ops.device("/job:ps/task:0"): global_step = variables.Variable(0, name="global_step", trainable=False) var_0 = variables.Variable(0.0, name="v0") with ops.device("/job:ps/task:1"): var_1 = variables.Variable(1.0, name="v1") var_sparse = variables.Variable([[3.0], [4.0]], name="v_sparse") with ops.device("/job:worker/task:" + str(worker_id)): grads_0 = constant_op.constant(0.1 + worker_id * 0.2) grads_1 = constant_op.constant(0.9 + worker_id * 0.2) # This is to test against sparse gradients. grads_sparse = ops.IndexedSlices( constant_op.constant( [0.1 + worker_id * 0.2], shape=[1, 1]), constant_op.constant([1]), constant_op.constant([2, 1])) sgd_opt = gradient_descent.GradientDescentOptimizer(2.0) sync_rep_opt = training.SyncReplicasOptimizer( sgd_opt, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=num_workers) train_op = [ sync_rep_opt.apply_gradients( zip([grads_0, grads_1, grads_sparse], [var_0, var_1, var_sparse]), global_step=global_step) ] sync_replicas_hook = sync_rep_opt.make_session_run_hook( is_chief, num_tokens=num_workers) # Creates MonitoredSession session = training.MonitoredTrainingSession( master=workers[worker_id].target, is_chief=is_chief, hooks=[sync_replicas_hook]) sessions.append(session) graphs.append(graph) train_ops.append(train_op) return sessions, graphs, train_ops class SyncReplicasOptimizerTest(test.TestCase): def _run(self, train_op, sess): sess.run(train_op) def test2Workers(self): num_workers = 2 replicas_to_aggregate = 2 num_ps = 2 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) # Creates and returns all the workers. sessions, graphs, train_ops = get_workers(num_workers, replicas_to_aggregate, workers) # Chief should have already initialized all the variables. var_0_g_0 = graphs[0].get_tensor_by_name("v0:0") var_1_g_0 = graphs[0].get_tensor_by_name("v1:0") local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0") self.assertAllEqual(0.0, sessions[0].run(var_0_g_0)) self.assertAllEqual(1.0, sessions[0].run(var_1_g_0)) self.assertAllEqual(0, sessions[0].run(local_step_0)) # Will just use session 1 to verify all the variables later. var_0_g_1 = graphs[1].get_tensor_by_name("v0:0") var_1_g_1 = graphs[1].get_tensor_by_name("v1:0") var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0") local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0") global_step = graphs[1].get_tensor_by_name("global_step:0") # The steps should also be initialized. self.assertAllEqual(0, sessions[1].run(global_step)) self.assertAllEqual(0, sessions[1].run(local_step_1)) self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1)) # We have initial tokens in the queue so we can call this one by one. After # the first step, this will no longer work as there will be no more extra # tokens in the queue. sessions[0].run(train_ops[0]) sessions[1].run(train_ops[1]) # The global step should have been updated and the variables should now have # the new values after the average of the gradients are applied. while sessions[1].run(global_step) != 1: time.sleep(0.01) self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]], sessions[1].run(var_sparse_g_1)) # The local step for both workers should still be 0 because the initial # tokens in the token queue are 0s. This means that the following # computation of the gradients will be wasted as local_step is smaller than # the current global step. However, this only happens once when the system # just starts and this is necessary to make the system robust for the case # when chief gets restarted by errors/preemption/... self.assertAllEqual(0, sessions[0].run(local_step_0)) self.assertAllEqual(0, sessions[1].run(local_step_1)) sessions[0].run(train_ops[0]) sessions[1].run(train_ops[1]) # Although the global step should still be 1 as explained above, the local # step should now be updated to 1. The variables are still the same. self.assertAllEqual(1, sessions[1].run(global_step)) self.assertAllEqual(1, sessions[0].run(local_step_0)) self.assertAllEqual(1, sessions[1].run(local_step_1)) self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) # At this step, the token queue is empty. So the 2 workers need to work # together to proceed. threads = [] threads.append( self.checkedThread( target=self._run, args=(train_ops[0], sessions[0]))) threads.append( self.checkedThread( target=self._run, args=(train_ops[1], sessions[1]))) # The two workers starts to execute the train op. for thread in threads: thread.start() for thread in threads: thread.join() # The global step should now be 2 and the gradients should have been # applied twice. self.assertAllEqual(2, sessions[1].run(global_step)) self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) # 3 workers and one of them is backup. def test3Workers1Backup(self): num_workers = 3 replicas_to_aggregate = 2 num_ps = 2 workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps) # Creates and returns all the workers. sessions, graphs, train_ops = get_workers(num_workers, replicas_to_aggregate, workers) # Chief should have already initialized all the variables. var_0_g_1 = graphs[1].get_tensor_by_name("v0:0") var_1_g_1 = graphs[1].get_tensor_by_name("v1:0") local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0") global_step = graphs[1].get_tensor_by_name("global_step:0") # The steps should also be initilized. self.assertAllEqual(0, sessions[1].run(global_step)) self.assertAllEqual(0, sessions[1].run(local_step_1)) # We have initial tokens in the queue so we can call this one by one. After # the token queue becomes empty, they should be called concurrently. # Here worker 0 and worker 2 finished first. sessions[0].run(train_ops[0]) sessions[2].run(train_ops[2]) # The global step should have been updated since we only need to collect 2 # gradients. The variables should now have the new values after the average # of the gradients from worker 0/2 are applied. while sessions[1].run(global_step) != 1: time.sleep(0.01) self.assertAllEqual(1, sessions[1].run(global_step)) self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1)) # Worker 1 finished later and its gradients will now be dropped as it is # stale. sessions[1].run(train_ops[1]) # As shown in the previous test, the local_step for all workers should be # still 0 so their next computation will also be dropped. sessions[0].run(train_ops[0]) sessions[1].run(train_ops[1]) sessions[2].run(train_ops[2]) # Although the global step should still be 1 as explained above, the local # step should now be updated to 1. Just check worker 1 as an example. self.assertAllEqual(1, sessions[1].run(global_step)) self.assertAllEqual(1, sessions[1].run(local_step_1)) thread_0 = self.checkedThread( target=self._run, args=(train_ops[0], sessions[0])) thread_1 = self.checkedThread( target=self._run, args=(train_ops[1], sessions[1])) # Lets worker 0 execute first. # It will wait as we need 2 workers to finish this step and the global step # should be still 1. thread_0.start() self.assertAllEqual(1, sessions[1].run(global_step)) # Starts worker 1. thread_1.start() thread_1.join() thread_0.join() # The global step should now be 2 and the gradients should have been # applied again. self.assertAllEqual(2, sessions[1].run(global_step)) self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1)) self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1)) if __name__ == "__main__": test.main()
""" xbee.py By Paul Malmsten, 2010 Inspired by code written by Amit Synderman and Marco Sangalli pmalmsten@gmail.com XBee superclass module This class defines data and methods common to all XBee modules. This class should be subclassed in order to provide series-specific functionality. """ import struct, threading, time from xbee.frame import APIFrame from xbee.python2to3 import byteToInt, intToByte class ThreadQuitException(Exception): pass class CommandFrameException(KeyError): pass class XBeeBase(threading.Thread): """ Abstract base class providing command generation and response parsing methods for XBee modules. Constructor arguments: ser: The file-like serial port to use. shorthand: boolean flag which determines whether shorthand command calls (i.e. xbee.at(...) instead of xbee.send("at",...) are allowed. callback: function which should be called with frame data whenever a frame arrives from the serial port. When this is not None, a background thread to monitor the port and call the given function is automatically started. escaped: boolean flag which determines whether the library should operate in escaped mode. In this mode, certain data bytes in the output and input streams will be escaped and unescaped in accordance with the XBee API. This setting must match the appropriate api_mode setting of an XBee device; see your XBee device's documentation for more information. """ def __init__(self, ser, shorthand=True, callback=None, escaped=False): super(XBeeBase, self).__init__() self.serial = ser self.shorthand = shorthand self._callback = None self._thread_continue = False self._escaped = escaped if callback: self._callback = callback self._thread_continue = True self.start() def halt(self): """ halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning. """ if self._callback: self._thread_continue = False self.join() def _write(self, data): """ _write: binary data -> None Packages the given binary data in an API frame and writes the result to the serial port """ frame = APIFrame(data, self._escaped).output() self.serial.write(frame) def run(self): """ run: None -> None This method overrides threading.Thread.run() and is automatically called when an instance is created with threading enabled. """ while True: try: self._callback(self.wait_read_frame()) except ThreadQuitException: break def _wait_for_frame(self): """ _wait_for_frame: None -> binary data _wait_for_frame will read from the serial port until a valid API frame arrives. It will then return the binary data contained within the frame. If this method is called as a separate thread and self.thread_continue is set to False, the thread will exit by raising a ThreadQuitException. """ frame = APIFrame(escaped=self._escaped) while True: if self._callback and not self._thread_continue: raise ThreadQuitException if self.serial.inWaiting() == 0: time.sleep(.01) continue byte = self.serial.read() if byte != APIFrame.START_BYTE: continue # Save all following bytes, if they are not empty if len(byte) == 1: frame.fill(byte) while(frame.remaining_bytes() > 0): try: byte = self.serial.read() except serial.SerialException: #TODO: This is a hack to ignore serial exception and should be solved later pass if len(byte) == 1: frame.fill(byte) try: # Try to parse and return result frame.parse() # Ignore empty frames if len(frame.data) == 0: frame = APIFrame() continue return frame except ValueError: # Bad frame, so restart frame = APIFrame(escaped=self._escaped) def _build_command(self, cmd, **kwargs): """ _build_command: string (binary data) ... -> binary data _build_command will construct a command packet according to the specified command's specification in api_commands. It will expect named arguments for all fields other than those with a default value or a length of 'None'. Each field will be written out in the order they are defined in the command definition. """ try: cmd_spec = self.api_commands[cmd] except AttributeError: raise NotImplementedError("API command specifications could not be found; use a derived class which defines 'api_commands'.") packet = b'' for field in cmd_spec: try: # Read this field's name from the function arguments dict data = kwargs[field['name']] except KeyError: # Data wasn't given # Only a problem if the field has a specific length if field['len'] is not None: # Was a default value specified? default_value = field['default'] if default_value: # If so, use it data = default_value else: # Otherwise, fail raise KeyError( "The expected field %s of length %d was not provided" % (field['name'], field['len'])) else: # No specific length, ignore it data = None # Ensure that the proper number of elements will be written if field['len'] and len(data) != field['len']: raise ValueError( "The data provided for '%s' was not %d bytes long"\ % (field['name'], field['len'])) # Add the data to the packet, if it has been specified # Otherwise, the parameter was of variable length, and not # given if data: packet += data return packet def _split_response(self, data): """ _split_response: binary data -> {'id':str, 'param':binary data, ...} _split_response takes a data packet received from an XBee device and converts it into a dictionary. This dictionary provides names for each segment of binary data as specified in the api_responses spec. """ # Fetch the first byte, identify the packet # If the spec doesn't exist, raise exception packet_id = data[0:1] try: packet = self.api_responses[packet_id] except AttributeError: raise NotImplementedError("API response specifications could not be found; use a derived class which defines 'api_responses'.") except KeyError: # Check to see if this ID can be found among transmittible packets for cmd_name, cmd in list(self.api_commands.items()): if cmd[0]['default'] == data[0:1]: raise CommandFrameException("Incoming frame with id %s looks like a command frame of type '%s' (these should not be received). Are you sure your devices are in API mode?" % (data[0], cmd_name)) raise KeyError( "Unrecognized response packet with id byte {0}".format(data[0])) # Current byte index in the data stream index = 1 # Result info info = {'id':packet['name']} packet_spec = packet['structure'] # Parse the packet in the order specified for field in packet_spec: if field['len'] == 'null_terminated': field_data = b'' while data[index:index+1] != b'\x00': field_data += data[index:index+1] index += 1 index += 1 info[field['name']] = field_data elif field['len'] is not None: # Store the number of bytes specified # Are we trying to read beyond the last data element? if index + field['len'] > len(data): raise ValueError( "Response packet was shorter than expected") field_data = data[index:index + field['len']] info[field['name']] = field_data index += field['len'] # If the data field has no length specified, store any # leftover bytes and quit else: field_data = data[index:] # Were there any remaining bytes? if field_data: # If so, store them info[field['name']] = field_data index += len(field_data) break # If there are more bytes than expected, raise an exception if index < len(data): raise ValueError( "Response packet was longer than expected; expected: %d, got: %d bytes" % (index, len(data))) # Apply parsing rules if any exist if 'parsing' in packet: for parse_rule in packet['parsing']: # Only apply a rule if it is relevant (raw data is available) if parse_rule[0] in info: # Apply the parse function to the indicated field and # replace the raw data with the result info[parse_rule[0]] = parse_rule[1](self, info) return info def _parse_samples_header(self, io_bytes): """ _parse_samples_header: binary data in XBee IO data format -> (int, [int ...], [int ...], int, int) _parse_samples_header will read the first three bytes of the binary data given and will return the number of samples which follow, a list of enabled digital inputs, a list of enabled analog inputs, the dio_mask, and the size of the header in bytes """ header_size = 3 # number of samples (always 1?) is the first byte sample_count = byteToInt(io_bytes[0]) # part of byte 1 and byte 2 are the DIO mask ( 9 bits ) dio_mask = (byteToInt(io_bytes[1]) << 8 | byteToInt(io_bytes[2])) & 0x01FF # upper 7 bits of byte 1 is the AIO mask aio_mask = (byteToInt(io_bytes[1]) & 0xFE) >> 1 # sorted lists of enabled channels; value is position of bit in mask dio_chans = [] aio_chans = [] for i in range(0,9): if dio_mask & (1 << i): dio_chans.append(i) dio_chans.sort() for i in range(0,7): if aio_mask & (1 << i): aio_chans.append(i) aio_chans.sort() return (sample_count, dio_chans, aio_chans, dio_mask, header_size) def _parse_samples(self, io_bytes): """ _parse_samples: binary data in XBee IO data format -> [ {"dio-0":True, "dio-1":False, "adc-0":100"}, ...] _parse_samples reads binary data from an XBee device in the IO data format specified by the API. It will then return a dictionary indicating the status of each enabled IO port. """ sample_count, dio_chans, aio_chans, dio_mask, header_size = \ self._parse_samples_header(io_bytes) samples = [] # split the sample data into a list, so it can be pop()'d sample_bytes = [byteToInt(c) for c in io_bytes[header_size:]] # repeat for every sample provided for sample_ind in range(0, sample_count): tmp_samples = {} if dio_chans: # we have digital data digital_data_set = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0)) digital_values = dio_mask & digital_data_set for i in dio_chans: tmp_samples['dio-{0}'.format(i)] = True if (digital_values >> i) & 1 else False for i in aio_chans: analog_sample = (sample_bytes.pop(0) << 8 | sample_bytes.pop(0)) tmp_samples['adc-{0}'.format(i)] = analog_sample samples.append(tmp_samples) return samples def send(self, cmd, **kwargs): """ send: string param=binary data ... -> None When send is called with the proper arguments, an API command will be written to the serial port for this XBee device containing the proper instructions and data. This method must be called with named arguments in accordance with the api_command specification. Arguments matching all field names other than those in reserved_names (like 'id' and 'order') should be given, unless they are of variable length (of 'None' in the specification. Those are optional). """ # Pass through the keyword arguments self._write(self._build_command(cmd, **kwargs)) def wait_read_frame(self): """ wait_read_frame: None -> frame info dictionary wait_read_frame calls XBee._wait_for_frame() and waits until a valid frame appears on the serial port. Once it receives a frame, wait_read_frame attempts to parse the data contained within it and returns the resulting dictionary """ frame = self._wait_for_frame() return self._split_response(frame.data) def __getattr__(self, name): """ If a method by the name of a valid api command is called, the arguments will be automatically sent to an appropriate send() call """ # If api_commands is not defined, raise NotImplementedError\ # If its not defined, _getattr__ will be called with its name if name == 'api_commands': raise NotImplementedError("API command specifications could not be found; use a derived class which defines 'api_commands'.") # Is shorthand enabled, and is the called name a command? if self.shorthand and name in self.api_commands: # If so, simply return a function which passes its arguments # to an appropriate send() call return lambda **kwargs: self.send(name, **kwargs) else: raise AttributeError("XBee has no attribute '%s'" % name)
''' Action Bar ========== .. versionadded:: 1.8.0 .. image:: images/actionbar.png :align: right The ActionBar widget is like Android's ActionBar, where items are stacked horizontally. The :class:`ActionBar` will contain one :class:`ActionView` and many :class:`ContextualActionView`\s. An :class:`ActionView` will contain an :class:`ActionPrevious` having title, app_icon and previous_icon properties. An :class:`ActionView` will contain subclasses of :class:`ActionItem`\s. Some predefined ones inlcude an :class:`ActionButton`, an :class:`ActionToggleButton`, an :class:`ActionCheck`, an :class:`ActionSeparator` and an :class:`ActionGroup`. An :class:`ActionGroup` is used to display :class:`ActionItem`\s in a group. An :class:`ActionView` will always display an :class:`ActionGroup` after other :class:`ActionItem`\s. An :class:`ActionView` will contain an :class:`ActionOverflow`. A :class:`ContextualActionView` is a subclass of an :class:`ActionView`. ''' __all__ = ('ActionBarException', 'ActionItem', 'ActionButton', 'ActionToggleButton', 'ActionCheck', 'ActionSeparator', 'ActionDropDown', 'ActionGroup', 'ActionOverflow', 'ActionView', 'ContextualActionView', 'ActionPrevious', 'ActionBar') from kivy.uix.boxlayout import BoxLayout from kivy.uix.dropdown import DropDown from kivy.uix.widget import Widget from kivy.uix.button import Button from kivy.uix.togglebutton import ToggleButton from kivy.uix.checkbox import CheckBox from kivy.config import Config from kivy.properties import ObjectProperty, NumericProperty, \ BooleanProperty, StringProperty, ListProperty, OptionProperty from kivy.uix.spinner import Spinner from kivy.lang import Builder from functools import partial window_icon = '' if Config: window_icon = Config.get('kivy', 'window_icon') class ActionBarException(Exception): '''ActionBarException class ''' pass class ActionItem(object): '''ActionItem class, an abstract class for all ActionBar widgets. To create a custom widget for an ActionBar, inherit from this class. See module documentation for more information. ''' minimum_width = NumericProperty('90sp') '''Minimum Width required by an ActionItem. :data:`minimum_width` is a :class:`~kivy.properties.NumericProperty` and defaults to '90sp'. ''' important = BooleanProperty(False) '''Determines if an ActionItem is important or not. :data:`important` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' inside_group = BooleanProperty(False) '''(internal) Determines if an ActionItem is displayed inside an ActionGroup or not. :data:`inside_group` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' background_normal = StringProperty( 'atlas://data/images/defaulttheme/action_item') '''Background image of the ActionItem used for the default graphical representation when the ActionItem is not pressed. :data:`background_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/action_item'. ''' background_down = StringProperty( 'atlas://data/images/defaulttheme/action_item_down') '''Background image of the ActionItem used for default graphical representation when an ActionItem is pressed. :data:`background_down` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/action_item_down'. ''' mipmap = BooleanProperty(True) '''Defines whether the image/icon dispayed on top of the button uses a mipmap or not. :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults to `True`. ''' class ActionButton(Button, ActionItem): '''ActionButton class, see module documentation for more information. The text color, width and size_hint_x are set manually via the Kv language file. It covers a lot of cases: with/without an icon, with/without a group and takes care of the padding between elements. You don't have much control over these properties, so if you want to customize it's appearance, we suggest you create you own button representation. You can do this by creating a class that subclasses an existing widget and an :class:`ActionItem`:: class MyOwnActionButton(Button, ActionItem): pass You can then create your own style using the Kv language. ''' icon = StringProperty(None, allownone=True) '''Source image to use when the Button is part of the ActionBar. If the Button is in a group, the text will be preferred. ''' class ActionPrevious(ActionButton): '''ActionPrevious class, see module documentation for more information. ''' with_previous = BooleanProperty(True) '''Specifies whether clicking on ActionPrevious will load the previous screen or not. If True, the previous_icon will be shown otherwise it will not. :data:`with_previous` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' app_icon = StringProperty(window_icon) '''Application icon for the ActionView. :data:`app_icon` is a :class:`~kivy.properties.StringProperty` and defaults to the window icon if set, otherwise 'data/logo/kivy-icon-32.png'. ''' previous_image = StringProperty( 'atlas://data/images/defaulttheme/previous_normal') '''Image for the 'previous' ActionButtons default graphical representation. :data:`previous_image` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/previous_normal'. ''' title = StringProperty('') '''Title for ActionView. :data:`title` is a :class:`~kivy.properties.StringProperty` and defaults to ''. ''' def __init__(self, **kwargs): super(ActionPrevious, self).__init__(**kwargs) if not self.app_icon: self.app_icon = 'data/logo/kivy-icon-32.png' class ActionToggleButton(ActionItem, ToggleButton): '''ActionToggleButton class, see module documentation for more information. ''' icon = StringProperty(None, allownone=True) '''Source image to use when the Button is part of the ActionBar. If the Button is in a group, the text will be preferred. ''' class ActionCheck(ActionItem, CheckBox): '''ActionCheck class, see module documentation for more information. ''' pass class ActionSeparator(ActionItem, Widget): '''ActionSeparator class, see module documentation for more information. ''' background_image = StringProperty( 'atlas://data/images/defaulttheme/separator') '''Background image for the separators default graphical representation. :data:`background_image` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/separator'. ''' class ActionDropDown(DropDown): '''ActionDropDown class, see module documentation for more information. ''' pass class ActionGroup(ActionItem, Spinner): '''ActionGroup class, see module documentation for more information. ''' use_separator = BooleanProperty(False) '''Specifies whether to use a separator after/before this group or not. :data:`use_separator` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' separator_image = StringProperty( 'atlas://data/images/defaulttheme/separator') '''Background Image for an ActionSeparator in an ActionView. :data:`separator_image` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/separator'. ''' separator_width = NumericProperty(0) '''Width of the ActionSeparator in an ActionView. :data:`separator_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' mode = OptionProperty('normal', options=('normal', 'spinner')) '''Sets the current mode of an ActionGroup. If mode is 'normal', the ActionGroups children will be displayed normally if there is enough space, otherwise they will be displayed in a spinner. If mode is 'spinner', then the children will always be displayed in a spinner. :data:`mode` is a :class:`~kivy.properties.OptionProperty` and defaults to 'normal'. ''' def __init__(self, **kwargs): self.list_action_item = [] self._list_overflow_items = [] super(ActionGroup, self).__init__(**kwargs) self.dropdown_cls = ActionDropDown def add_widget(self, item): if isinstance(item, ActionSeparator): super(ActionGroup, self).add_widget(item) return if not isinstance(item, ActionItem): raise ActionBarException('ActionGroup only accepts ActionItem') self.list_action_item.append(item) def show_group(self): self.clear_widgets() for item in self._list_overflow_items + self.list_action_item: item.inside_group = True self._dropdown.add_widget(item) def _build_dropdown(self, *largs): if self._dropdown: self._dropdown.unbind(on_dismiss=self._toggle_dropdown) self._dropdown.dismiss() self._dropdown = None self._dropdown = self.dropdown_cls() self._dropdown.bind(on_dismiss=self._toggle_dropdown) def _update_dropdown(self, *largs): pass def _toggle_dropdown(self, *largs): self.is_open = not self.is_open ddn = self._dropdown ddn.size_hint_x = None if not ddn.container: return children = ddn.container.children ddn.width = max([self.width, children[0].minimum_width]) for item in children: item.size_hint_y = None item.height = max([self.height, '48sp']) def clear_widgets(self): self._dropdown.clear_widgets() class ActionOverflow(ActionGroup): '''ActionOverflow class, see module documentation for more information. ''' overflow_image = StringProperty( 'atlas://data/images/defaulttheme/overflow') '''Image to be used as an Overflow Image. :data:`overflow_image` is an :class:`~kivy.properties.ObjectProperty` and defaults to 'atlas://data/images/defaulttheme/overflow'. ''' def add_widget(self, action_item, index=0): if action_item is None: return if isinstance(action_item, ActionSeparator): return if not isinstance(action_item, ActionItem): raise ActionBarException('ActionView only accepts ActionItem' ' (got {!r}'.format(action_item)) else: if index == 0: index = len(self._list_overflow_items) self._list_overflow_items.insert(index, action_item) def show_default_items(self, parent): # display overflow and it's items if widget's directly added to it if self._list_overflow_items == []: return self.show_group() super(ActionView, parent).add_widget(self) class ActionView(BoxLayout): '''ActionView class, see module documentation for more information. ''' action_previous = ObjectProperty(None) '''Previous button for an ActionView. :data:`action_previous` is an :class:`~kivy.properties.ObjectProperty` and defaults to None. ''' background_color = ListProperty([1, 1, 1, 1]) '''Background color in the format (r, g, b, a). :data:`background_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1]. ''' background_image = StringProperty( 'atlas://data/images/defaulttheme/action_view') '''Background image of an ActionViews default graphical representation. :data:`background_image` is an :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/action_view'. ''' use_separator = BooleanProperty(False) '''Specify whether to use a separator before every ActionGroup or not. :data:`use_separator` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' overflow_group = ObjectProperty(None) '''Widget to be used for the overflow. :data:`overflow_group` is an :class:`~kivy.properties.ObjectProperty` and defaults to an instance of :class:`ActionOverflow`. ''' def __init__(self, **kwargs): self._list_action_items = [] self._list_action_group = [] super(ActionView, self).__init__(**kwargs) self._state = '' if not self.overflow_group: self.overflow_group = ActionOverflow( use_separator=self.use_separator) def on_action_previous(self, instance, value): self._list_action_items.insert(0, value) def add_widget(self, action_item, index=0): if action_item is None: return if not isinstance(action_item, ActionItem): raise ActionBarException('ActionView only accepts ActionItem' ' (got {!r}'.format(action_item)) elif isinstance(action_item, ActionOverflow): self.overflow_group = action_item action_item.use_separator = self.use_separator elif isinstance(action_item, ActionGroup): self._list_action_group.append(action_item) action_item.use_separator = self.use_separator elif isinstance(action_item, ActionPrevious): self.action_previous = action_item else: super(ActionView, self).add_widget(action_item, index) if index == 0: index = len(self._list_action_items) self._list_action_items.insert(index, action_item) def on_use_separator(self, instance, value): for group in self._list_action_group: group.use_separator = value self.overflow_group.use_separator = value def _clear_all(self): self.clear_widgets() for group in self._list_action_group: group.clear_widgets() self.overflow_group.clear_widgets() self.overflow_group.list_action_item = [] def _layout_all(self): # all the items can fit to the view, so expand everything super_add = super(ActionView, self).add_widget self._state = 'all' self._clear_all() super_add(self.action_previous) if len(self._list_action_items) > 1: for child in self._list_action_items[1:]: child.inside_group = False super_add(child) for group in self._list_action_group: if group.mode == 'spinner': super_add(group) group.show_group() else: if group.list_action_item != []: super_add(ActionSeparator()) for child in group.list_action_item: child.inside_group = False super_add(child) self.overflow_group.show_default_items(self) def _layout_group(self): # layout all the items in order to pack them per group super_add = super(ActionView, self).add_widget self._state = 'group' self._clear_all() super_add(self.action_previous) if len(self._list_action_items) > 1: for child in self._list_action_items[1:]: super_add(child) child.inside_group = False for group in self._list_action_group: super_add(group) group.show_group() self.overflow_group.show_default_items(self) def _layout_random(self): # layout the items in order to pack all of them grouped, and display # only the action items having 'important' super_add = super(ActionView, self).add_widget self._state = 'random' self._clear_all() hidden_items = [] hidden_groups = [] total_width = 0 super_add(self.action_previous) width = (self.width - self.overflow_group.minimum_width - self.action_previous.minimum_width) if len(self._list_action_items): for child in self._list_action_items[1:]: if child.important: if child.minimum_width + total_width < width: super_add(child) child.inside_group = False total_width += child.minimum_width else: hidden_items.append(child) else: hidden_items.append(child) # if space is left then display ActionItem inside their # ActionGroup if total_width < self.width: for group in self._list_action_group: if group.minimum_width + total_width +\ group.separator_width < width: super_add(group) group.show_group() total_width += group.minimum_width +\ group.separator_width else: hidden_groups.append(group) group_index = len(self.children) - 1 # if space is left then display other ActionItems if total_width < self.width: for child in hidden_items[:]: if child.minimum_width + total_width < width: super_add(child, group_index) total_width += child.minimum_width child.inside_group = False hidden_items.remove(child) # for all the remaining ActionItems and ActionItems with in # ActionGroups, Display them inside overflow_group extend_hidden = hidden_items.extend for group in hidden_groups: extend_hidden(group.list_action_item) overflow_group = self.overflow_group if hidden_items != []: over_add = super(overflow_group.__class__, overflow_group).add_widget for child in hidden_items: over_add(child) overflow_group.show_group() super_add(overflow_group) def on_width(self, width, *args): # determine the layout to use # can we display all of them? total_width = 0 for child in self._list_action_items: total_width += child.minimum_width for group in self._list_action_group: for child in group.list_action_item: total_width += child.minimum_width if total_width <= self.width: if self._state != 'all': self._layout_all() return # can we display them per group? total_width = 0 for child in self._list_action_items: total_width += child.minimum_width for group in self._list_action_group: total_width += group.minimum_width if total_width < self.width: # ok, we can display all the items grouped if self._state != 'group': self._layout_group() return # none of the solutions worked, display them in pack mode self._layout_random() class ContextualActionView(ActionView): '''ContextualActionView class, see the module documentation for more information. ''' pass class ActionBar(BoxLayout): '''ActionBar, see the module documentation for more information. :Events: `on_previous` Fired when action_previous of action_view is pressed. ''' action_view = ObjectProperty(None) '''action_view of ActionBar. :data:`action_view` is an :class:`~kivy.properties.ObjectProperty` and defaults to an instance of ActionView. ''' background_color = ListProperty([1, 1, 1, 1]) '''Background color, in the format (r, g, b, a). :data:`background_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1]. ''' background_image = StringProperty( 'atlas://data/images/defaulttheme/action_bar') '''Background image of the ActionBars default graphical representation. :data:`background_image` is an :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/action_bar'. ''' border = ListProperty([2, 2, 2, 2]) ''':data:`border` to be applied to the :data:`background_image`. ''' __events__ = ('on_previous',) def __init__(self, **kwargs): super(ActionBar, self).__init__(**kwargs) self._stack_cont_action_view = [] self._emit_previous = partial(self.dispatch, 'on_previous') def add_widget(self, view): if isinstance(view, ContextualActionView): self._stack_cont_action_view.append(view) if view.action_previous is not None: view.action_previous.unbind(on_release=self._emit_previous) view.action_previous.bind(on_release=self._emit_previous) self.clear_widgets() super(ActionBar, self).add_widget(view) elif isinstance(view, ActionView): self.action_view = view super(ActionBar, self).add_widget(view) else: raise ActionBarException( 'ActionBar can only add ContextualActionView or ActionView') def on_previous(self, *args): self._pop_contextual_action_view() def _pop_contextual_action_view(self): '''Remove the current ContextualActionView and display either the previous one or the ActionView. ''' self._stack_cont_action_view.pop() self.clear_widgets() if self._stack_cont_action_view == []: super(ActionBar, self).add_widget(self.action_view) else: super(ActionBar, self).add_widget(self._stack_cont_action_view[-1]) if __name__ == "__main__": from kivy.base import runTouchApp from kivy.uix.floatlayout import FloatLayout from kivy.lang import Builder from kivy.factory import Factory # XXX clean the first registration done from '__main__' here. # otherwise kivy.uix.actionbar.ActionPrevious != __main__.ActionPrevious Factory.unregister('ActionPrevious') Builder.load_string(''' <MainWindow>: ActionBar: pos_hint: {'top':1} ActionView: use_separator: True ActionPrevious: title: 'Action Bar' with_previous: False ActionOverflow: ActionButton: text: 'Btn0' icon: 'atlas://data/images/defaulttheme/audio-volume-high' ActionButton: text: 'Btn1' ActionButton: text: 'Btn2' ActionGroup: text: 'Group 2' ActionButton: text: 'Btn3' ActionButton: text: 'Btn4' ActionGroup: text: 'Group1' ActionButton: text: 'Btn5' ActionButton: text: 'Btn6' ActionButton: text: 'Btn7' ''') class MainWindow(FloatLayout): pass float_layout = MainWindow() runTouchApp(float_layout)
"""Functions shared between different beamformer types.""" # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Roman Goj <roman.goj@gmail.com> # Britta Westner <britta.wstnr@gmail.com> # # License: BSD (3-clause) import numpy as np from scipy import linalg from ..io.constants import FIFF from ..io.proj import make_projector from ..io.pick import (pick_channels_forward, pick_info) from ..minimum_norm.inverse import _get_vertno from ..source_space import label_src_vertno_sel from ..utils import logger, warn, estimate_rank from ..channels.channels import _contains_ch_type def _reg_pinv(x, reg, rcond=1e-15): """Compute a regularized pseudoinverse of a square array. Parameters ---------- x : ndarray, shape (n, n) Square array to invert. reg : float Regularization parameter. rcond : float | 'auto' Cutoff for small singular values. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Use 'auto' to attempt to automatically set a sane value. Defaults to 1e-15. """ covrank, s = estimate_rank(x, tol='auto', norm=False, return_singular=True) # This adds the regularization without using np.eye d = reg * np.trace(x) / len(x) x = x.copy() x.flat[::x.shape[0] + 1] += d if covrank < len(x): if reg == 0: warn('Covariance matrix is rank-deficient and no regularization ' 'is done.') if rcond == 'auto': # Reduce the toleration of the pseudo-inverse to force a solution s = linalg.svd(x, compute_uv=False) tol = s[covrank - 1:covrank + 1].mean() tol = max( tol, len(x) * linalg.norm(x) * np.finfo(float).eps ) rcond = tol / s.max() if rcond == 'auto': rcond = 1e-15 return linalg.pinv(x, rcond=rcond), d def _eig_inv(x, rank): """Compute a pseudoinverse with smallest component set to zero.""" U, s, V = linalg.svd(x) # pseudoinverse is computed by setting eigenvalues not included in # signalspace to zero s_inv = np.zeros(s.shape) s_inv[:rank] = 1. / s[:rank] x_inv = np.dot(V.T, s_inv[:, np.newaxis] * U.T) return x_inv def _setup_picks(info, forward, data_cov=None, noise_cov=None): """Return good channels common to forward model and covariance matrices.""" # get a list of all channel names: fwd_ch_names = forward['info']['ch_names'] # handle channels from forward model and info: ch_names = _compare_ch_names(info['ch_names'], fwd_ch_names, info['bads']) # inform about excluding channels: if (data_cov is not None and set(info['bads']) != set(data_cov['bads']) and (len(set(ch_names).intersection(data_cov['bads'])) > 0)): logger.info('info["bads"] and data_cov["bads"] do not match, ' 'excluding bad channels from both.') if (noise_cov is not None and set(info['bads']) != set(noise_cov['bads']) and (len(set(ch_names).intersection(noise_cov['bads'])) > 0)): logger.info('info["bads"] and noise_cov["bads"] do not match, ' 'excluding bad channels from both.') # handle channels from data cov if data cov is not None # Note: data cov is supposed to be None in tf_lcmv if data_cov is not None: ch_names = _compare_ch_names(ch_names, data_cov.ch_names, data_cov['bads']) # handle channels from noise cov if noise cov available: if noise_cov is not None: ch_names = _compare_ch_names(ch_names, noise_cov.ch_names, noise_cov['bads']) picks = [info['ch_names'].index(k) for k in ch_names if k in info['ch_names']] return picks def _compare_ch_names(names1, names2, bads): """Return channel names of common and good channels.""" ch_names = [ch for ch in names1 if ch not in bads and ch in names2] return ch_names def _check_one_ch_type(info, picks, noise_cov, method): """Check number of sensor types and presence of noise covariance matrix.""" # XXX : ugly hack to avoid picking subset of info with applied comps comps = info['comps'] info['comps'] = [] info_pick = pick_info(info, sel=picks) info['comps'] = comps ch_types =\ [_contains_ch_type(info_pick, tt) for tt in ('mag', 'grad', 'eeg')] if method == 'lcmv' and sum(ch_types) > 1 and noise_cov is None: raise ValueError('Source reconstruction with several sensor types ' 'requires a noise covariance matrix to be ' 'able to apply whitening.') elif method == 'dics' and sum(ch_types) > 1: warn('The use of several sensor types with the DICS beamformer is ' 'not heavily tested yet.') def _pick_channels_spatial_filter(ch_names, filters): """Return data channel indices to be used with spatial filter. Unlike ``pick_channels``, this respects the order of ch_names. """ sel = [] # first check for channel discrepancies between filter and data: for ch_name in filters['ch_names']: if ch_name not in ch_names: raise ValueError('The spatial filter was computed with channel %s ' 'which is not present in the data. You should ' 'compute a new spatial filter restricted to the ' 'good data channels.' % ch_name) # then compare list of channels and get selection based on data: sel = [ii for ii, ch_name in enumerate(ch_names) if ch_name in filters['ch_names']] return sel def _check_proj_match(info, filters): """Check whether SSP projections in data and spatial filter match.""" proj_data, _, _ = make_projector(info['projs'], filters['ch_names']) if not np.array_equal(proj_data, filters['proj']): raise ValueError('The SSP projections present in the data ' 'do not match the projections used when ' 'calculating the spatial filter.') def _check_src_type(filters): """Check whether src_type is in filters and set custom warning.""" if 'src_type' not in filters: filters['src_type'] = None warn_text = ('The spatial filter does not contain src_type and a robust ' 'guess of src_type is not possible without src. Consider ' 'recomputing the filter.') return filters, warn_text def _prepare_beamformer_input(info, forward, label, picks, pick_ori, fwd_norm=None): """Input preparation common for all beamformer functions. Check input values, prepare channel list and gain matrix. For documentation of parameters, please refer to _apply_lcmv. """ is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI if pick_ori in ['normal', 'max-power', 'vector']: if not is_free_ori: raise ValueError( 'Normal or max-power orientation can only be picked ' 'when a forward operator with free orientation is used.') elif pick_ori is not None: raise ValueError('pick_ori must be one of "normal", "max-power", ' '"vector", or None, got %s' % (pick_ori,)) if pick_ori == 'normal' and not forward['surf_ori']: # XXX eventually this could just call convert_forward_solution raise ValueError('Normal orientation can only be picked when a ' 'forward operator oriented in surface coordinates is ' 'used.') if pick_ori == 'normal' and not forward['src'][0]['type'] == 'surf': raise ValueError('Normal orientation can only be picked when a ' 'forward operator with a surface-based source space ' 'is used.') # Restrict forward solution to selected channels info_ch_names = [ch['ch_name'] for ch in info['chs']] ch_names = [info_ch_names[k] for k in picks] fwd_ch_names = forward['sol']['row_names'] # Keep channels in forward present in info: fwd_ch_names = [ch for ch in fwd_ch_names if ch in info_ch_names] forward = pick_channels_forward(forward, fwd_ch_names) picks_forward = [fwd_ch_names.index(ch) for ch in ch_names] # Get gain matrix (forward operator) if label is not None: vertno, src_sel = label_src_vertno_sel(label, forward['src']) if is_free_ori: src_sel = 3 * src_sel src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2] src_sel = src_sel.ravel() G = forward['sol']['data'][:, src_sel] else: vertno = _get_vertno(forward['src']) G = forward['sol']['data'] # Apply SSPs proj, ncomp, _ = make_projector(info['projs'], fwd_ch_names) if info['projs']: G = np.dot(proj, G) # Pick after applying the projections. This makes a copy of G, so further # operations can be safely done in-place. G = G[picks_forward] proj = proj[np.ix_(picks_forward, picks_forward)] # Normalize the leadfield if requested if fwd_norm == 'dipole': # each orientation separately G /= np.linalg.norm(G, axis=0) elif fwd_norm == 'vertex': # all three orientations per loc jointly depth_prior = np.sum(G ** 2, axis=0) if is_free_ori: depth_prior = depth_prior.reshape(-1, 3).sum(axis=1) # Spherical leadfield can be zero at the center depth_prior[depth_prior == 0.] = np.min( depth_prior[depth_prior != 0.]) if is_free_ori: depth_prior = np.repeat(depth_prior, 3) source_weighting = np.sqrt(1. / depth_prior) G *= source_weighting[np.newaxis, :] elif fwd_norm is not None: raise ValueError('Got invalid value for "fwd_norm". Valid ' 'values are: "dipole", "vertex" or None.') return is_free_ori, ch_names, proj, vertno, G def _compute_beamformer(method, G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank, is_free_ori, inversion=None): """Compute a spatial filter (LCMV or DICS).""" # Tikhonov regularization using reg parameter d to control for # trade-off between spatial resolution and noise sensitivity if method == 'lcmv': Cm_inv, d = _reg_pinv(Cm.copy(), reg) elif method == 'dics': Cm_inv, _ = _reg_pinv(Cm, reg, rcond='auto') if weight_norm is not None and inversion is not 'single': # Compute square of Cm_inv used for weight normalization Cm_inv_sq = np.dot(Cm_inv, Cm_inv) if weight_norm == 'nai': # estimate noise level based on covariance matrix, taking the # smallest eigenvalue that is not zero noise, _ = linalg.eigh(Cm) if rank is not None: rank_Cm = rank else: rank_Cm = estimate_rank(Cm, tol='auto', norm=False, return_singular=False) noise = noise[len(noise) - rank_Cm] # use either noise floor or regularization parameter d noise = max(noise, d) # compute spatial filter W = np.dot(G.T, Cm_inv) n_sources = G.shape[1] // n_orient for k in range(n_sources): Wk = W[n_orient * k: n_orient * k + n_orient] Gk = G[:, n_orient * k: n_orient * k + n_orient] if method == 'lcmv' and np.all(Gk == 0.): continue Ck = np.dot(Wk, Gk) if method == 'dics': # Normalize the spatial filters: if Wk.ndim == 2 and len(Wk) > 1: # Free source orientation if inversion == 'single': # Invert for each dipole separately using plain division Wk /= np.diag(Ck)[:, np.newaxis] elif inversion == 'matrix': # Invert for all dipoles simultaneously using matrix # inversion. Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk) else: # Fixed source orientation Wk /= Ck # compute scalar beamformer by finding the source orientation # which maximizes output source power if pick_ori == 'max-power': if weight_norm is not None and inversion is not 'single': # finding optimal orientation for NAI and unit-noise-gain # based on [2]_, Eq. 4.47 tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk)) if reduce_rank: # use pseudo inverse computation setting smallest component # to zero if the leadfield is not full rank tmp_inv = _eig_inv(tmp, tmp.shape[0] - 1) else: # use straight inverse with full rank leadfield try: tmp_inv = linalg.inv(tmp) except np.linalg.linalg.LinAlgError: raise ValueError('Singular matrix detected when ' 'estimating spatial filters. ' 'Consider reducing the rank of the ' 'leadfield by using ' 'reduce_rank=True.') power = np.dot(tmp_inv, np.dot(Wk, Gk)) elif weight_norm is not None and inversion == 'single': # First make the filters unit gain, then apply them to the # CSD matrix to compute power. norm = 1 / np.sqrt(np.sum(Wk ** 2, axis=1)) Wk_norm = Wk / norm[:, np.newaxis] power = Wk_norm.dot(Cm).dot(Wk_norm.T) else: if method == 'dics': # Compute spectral power by applying the spatial filters to # the CSD matrix. power = Wk.dot(Cm).dot(Wk.T) elif method == 'lcmv': # no weight-normalization and max-power is not implemented # yet for lcmv beamformer: raise NotImplementedError('The max-power orientation ' 'selection is not yet ' 'implemented with weight_norm ' 'set to None.') # compute the orientation: if method == 'lcmv': eig_vals, eig_vecs = linalg.eig(power) if np.iscomplex(eig_vecs).any(): raise ValueError('The eigenspectrum of the leadfield ' 'at this voxel is complex. Consider ' 'reducing the rank of the leadfield ' 'by using reduce_rank=True.') idx_max = eig_vals.argmax() max_ori = eig_vecs[:, idx_max] Wk[:] = np.dot(max_ori, Wk) Gk = np.dot(Gk, max_ori) # compute spatial filter for NAI or unit-noise-gain tmp = np.dot(Gk.T, np.dot(Cm_inv_sq, Gk)) denom = np.sqrt(tmp) Wk /= denom if weight_norm == 'nai': Wk /= np.sqrt(noise) is_free_ori = False elif method == 'dics': # Compute the direction of max power u, s, _ = np.linalg.svd(power.real) max_ori = u[:, 0] Wk[:] = np.dot(max_ori, Wk) else: # do vector beamformer if method == 'lcmv': # compute the filters: if is_free_ori: # Free source orientation Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk) else: # Fixed source orientation Wk /= Ck # handle noise normalization with free/normal source # orientation: if weight_norm == 'nai': raise NotImplementedError('Weight normalization with ' 'neural activity index is not ' 'implemented yet with free or ' 'fixed orientation.') elif weight_norm == 'unit-noise-gain': noise_norm = np.sum(Wk ** 2, axis=1) if is_free_ori: noise_norm = np.sum(noise_norm) noise_norm = np.sqrt(noise_norm) if noise_norm == 0.: noise_norm_inv = 0. # avoid division by 0 else: noise_norm_inv = 1. / noise_norm Wk[:] *= noise_norm_inv # picking source orientation maximizing output source power if pick_ori == 'max-power': W = W[0::3] elif pick_ori == 'normal': W = W[2::3] is_free_ori = False if method == 'dics': if weight_norm == 'unit-noise-gain': # Scale weights so that W @ I @ W.T == I if pick_ori is None and n_orient > 1: # Compute the norm for each set of 3 dipoles W = W.reshape(-1, 3, W.shape[1]) norm = np.sqrt(np.sum(W ** 2, axis=(1, 2))) W /= norm[:, np.newaxis, np.newaxis] W = W.reshape(-1, W.shape[2]) else: # Compute the norm for each dipole norm = np.sqrt(np.sum(W ** 2, axis=1)) W /= norm[:, np.newaxis] return W, is_free_ori
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import itertools import numpy import warnings from . import _ni_support from . import _nd_image from ._ni_docstrings import docdict from scipy._lib import doccer # Change the default 'reflect' to 'constant' via modifying a copy of docdict docdict_copy = docdict.copy() del docdict docdict_copy['mode'] = docdict_copy['mode'].replace("Default is 'reflect'", "Default is 'constant'") docfiller = doccer.filldoc(docdict_copy) __all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] @docfiller def spline_filter1d(input, order=3, axis=-1, output=numpy.float64, mode='mirror'): """ Calculate a one-dimensional spline filter along the given axis. The lines of the array along the given axis are filtered by a spline filter. The order of the spline must be >= 2 and <= 5. Parameters ---------- %(input)s order : int, optional The order of the spline, default is 3. axis : int, optional The axis along which the spline filter is applied. Default is the last axis. output : ndarray or dtype, optional The array in which to place the output, or the dtype of the returned array. Default is ``numpy.float64``. %(mode)s Returns ------- spline_filter1d : ndarray The filtered input. Notes ----- All functions in `ndimage.interpolation` do spline interpolation of the input image. If using b-splines of `order > 1`, the input image values have to be converted to b-spline coefficients first, which is done by applying this one-dimensional filter sequentially along all axes of the input. All functions that require b-spline coefficients will automatically filter their inputs, a behavior controllable with the `prefilter` keyword argument. For functions that accept a `mode` parameter, the result will only be correct if it matches the `mode` used when filtering. """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) if order in [0, 1]: output[...] = numpy.array(input) else: mode = _ni_support._extend_mode_to_code(mode) axis = _ni_support._check_axis(axis, input.ndim) _nd_image.spline_filter1d(input, order, axis, output, mode) return output def spline_filter(input, order=3, output=numpy.float64, mode='mirror'): """ Multi-dimensional spline filter. For more details, see `spline_filter1d`. See Also -------- spline_filter1d Notes ----- The multi-dimensional filter is implemented as a sequence of one-dimensional spline filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. """ if order < 2 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) if order not in [0, 1] and input.ndim > 0: for axis in range(input.ndim): spline_filter1d(input, order, axis, output=output, mode=mode) input = output else: output[...] = input[...] return output @docfiller def geometric_transform(input, mapping, output_shape=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True, extra_arguments=(), extra_keywords={}): """ Apply an arbitrary geometric transform. The given mapping function is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. Parameters ---------- %(input)s mapping : {callable, scipy.LowLevelCallable} A callable object that accepts a tuple of length equal to the output array rank, and returns the corresponding input coordinates as a tuple of length equal to the input array rank. output_shape : tuple of ints, optional Shape tuple. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s extra_arguments : tuple, optional Extra arguments passed to `mapping`. extra_keywords : dict, optional Extra keywords passed to `mapping`. Returns ------- output : ndarray The filtered input. See Also -------- map_coordinates, affine_transform, spline_filter1d Notes ----- This function also accepts low-level callback functions with one the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int mapping(npy_intp *output_coordinates, double *input_coordinates, int output_rank, int input_rank, void *user_data) int mapping(intptr_t *output_coordinates, double *input_coordinates, int output_rank, int input_rank, void *user_data) The calling function iterates over the elements of the output array, calling the callback function at each element. The coordinates of the current output element are passed through ``output_coordinates``. The callback function must return the coordinates at which the input must be interpolated in ``input_coordinates``. The rank of the input and output arrays are given by ``input_rank`` and ``output_rank`` respectively. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. Examples -------- >>> import numpy as np >>> from scipy.ndimage import geometric_transform >>> a = np.arange(12.).reshape((4, 3)) >>> def shift_func(output_coords): ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) ... >>> geometric_transform(a, shift_func) array([[ 0. , 0. , 0. ], [ 0. , 1.362, 2.738], [ 0. , 4.812, 6.187], [ 0. , 8.263, 9.637]]) >>> b = [1, 2, 3, 4, 5] >>> def shift_func(output_coords): ... return (output_coords[0] - 3,) ... >>> geometric_transform(b, shift_func, mode='constant') array([0, 0, 0, 1, 2]) >>> geometric_transform(b, shift_func, mode='nearest') array([1, 1, 1, 1, 2]) >>> geometric_transform(b, shift_func, mode='reflect') array([3, 2, 1, 1, 2]) >>> geometric_transform(b, shift_func, mode='wrap') array([2, 3, 4, 1, 2]) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if output_shape is None: output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') mode = _ni_support._extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input output = _ni_support._get_output(output, input, shape=output_shape) _nd_image.geometric_transform(filtered, mapping, None, None, None, output, order, mode, cval, extra_arguments, extra_keywords) return output @docfiller def map_coordinates(input, coordinates, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Map the input array to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. The shape of the output is derived from that of the coordinate array by dropping the first axis. The values of the array along the first axis are the coordinates in the input array at which the output value is found. Parameters ---------- %(input)s coordinates : array_like The coordinates at which `input` is evaluated. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s Returns ------- map_coordinates : ndarray The result of transforming the input. The shape of the output is derived from that of `coordinates` by dropping the first axis. See Also -------- spline_filter, geometric_transform, scipy.interpolate Examples -------- >>> from scipy import ndimage >>> a = np.arange(12.).reshape((4, 3)) >>> a array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.], [ 9., 10., 11.]]) >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) array([ 2., 7.]) Above, the interpolated value of a[0.5, 0.5] gives output[0], while a[2, 1] is output[1]. >>> inds = np.array([[0.5, 2], [0.5, 4]]) >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) array([ 2. , -33.3]) >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') array([ 2., 8.]) >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) array([ True, False], dtype=bool) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') coordinates = numpy.asarray(coordinates) if numpy.iscomplexobj(coordinates): raise TypeError('Complex type not supported') output_shape = coordinates.shape[1:] if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') if coordinates.shape[0] != input.ndim: raise RuntimeError('invalid shape for coordinate array') mode = _ni_support._extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input output = _ni_support._get_output(output, input, shape=output_shape) _nd_image.geometric_transform(filtered, None, coordinates, None, None, output, order, mode, cval, None, None) return output @docfiller def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Apply an affine transformation. Given an output image pixel index vector ``o``, the pixel value is determined from the input image at position ``np.dot(matrix, o) + offset``. This does 'pull' (or 'backward') resampling, transforming the output space to the input to locate data. Affine transformations are often described in the 'push' (or 'forward') direction, transforming input to output. If you have a matrix for the 'push' transformation, use its inverse (:func:`numpy.linalg.inv`) in this function. Parameters ---------- %(input)s matrix : ndarray The inverse coordinate transformation matrix, mapping output coordinates to input coordinates. If ``ndim`` is the number of dimensions of ``input``, the given matrix must have one of the following shapes: - ``(ndim, ndim)``: the linear transformation matrix for each output coordinate. - ``(ndim,)``: assume that the 2D transformation matrix is diagonal, with the diagonal specified by the given value. A more efficient algorithm is then used that exploits the separability of the problem. - ``(ndim + 1, ndim + 1)``: assume that the transformation is specified using homogeneous coordinates [1]_. In this case, any value passed to ``offset`` is ignored. - ``(ndim, ndim + 1)``: as above, but the bottom row of a homogeneous transformation matrix is always ``[0, 0, ..., 1]``, and may be omitted. offset : float or sequence, optional The offset into the array where the transform is applied. If a float, `offset` is the same for each axis. If a sequence, `offset` should contain one value for each axis. output_shape : tuple of ints, optional Shape tuple. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s Returns ------- affine_transform : ndarray The transformed input. Notes ----- The given matrix and offset are used to find for each point in the output the corresponding coordinates in the input by an affine transformation. The value of the input at those coordinates is determined by spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. .. versionchanged:: 0.18.0 Previously, the exact interpretation of the affine transformation depended on whether the matrix was supplied as a one-dimensional or two-dimensional array. If a one-dimensional array was supplied to the matrix parameter, the output pixel value at index ``o`` was determined from the input image at position ``matrix * (o + offset)``. References ---------- .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if output_shape is None: output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') mode = _ni_support._extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input output = _ni_support._get_output(output, input, shape=output_shape) matrix = numpy.asarray(matrix, dtype=numpy.float64) if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: raise RuntimeError('no proper affine matrix provided') if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and (matrix.shape[0] in [input.ndim, input.ndim + 1])): if matrix.shape[0] == input.ndim + 1: exptd = [0] * input.ndim + [1] if not numpy.all(matrix[input.ndim] == exptd): msg = ('Expected homogeneous transformation matrix with ' 'shape %s for image shape %s, but bottom row was ' 'not equal to %s' % (matrix.shape, input.shape, exptd)) raise ValueError(msg) # assume input is homogeneous coordinate transformation matrix offset = matrix[:input.ndim, input.ndim] matrix = matrix[:input.ndim, :input.ndim] if matrix.shape[0] != input.ndim: raise RuntimeError('affine matrix has wrong number of rows') if matrix.ndim == 2 and matrix.shape[1] != output.ndim: raise RuntimeError('affine matrix has wrong number of columns') if not matrix.flags.contiguous: matrix = matrix.copy() offset = _ni_support._normalize_sequence(offset, input.ndim) offset = numpy.asarray(offset, dtype=numpy.float64) if offset.ndim != 1 or offset.shape[0] < 1: raise RuntimeError('no proper offset provided') if not offset.flags.contiguous: offset = offset.copy() if matrix.ndim == 1: warnings.warn( "The behaviour of affine_transform with a one-dimensional " "array supplied for the matrix parameter has changed in " "scipy 0.18.0." ) _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order, mode, cval) else: _nd_image.geometric_transform(filtered, None, None, matrix, offset, output, order, mode, cval, None, None) return output @docfiller def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Shift an array. The array is shifted using spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. Parameters ---------- %(input)s shift : float or sequence The shift along the axes. If a float, `shift` is the same for each axis. If a sequence, `shift` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s Returns ------- shift : ndarray The shifted input. """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') mode = _ni_support._extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input output = _ni_support._get_output(output, input) shift = _ni_support._normalize_sequence(shift, input.ndim) shift = [-ii for ii in shift] shift = numpy.asarray(shift, dtype=numpy.float64) if not shift.flags.contiguous: shift = shift.copy() _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval) return output @docfiller def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Zoom an array. The array is zoomed using spline interpolation of the requested order. Parameters ---------- %(input)s zoom : float or sequence The zoom factor along the axes. If a float, `zoom` is the same for each axis. If a sequence, `zoom` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s Returns ------- zoom : ndarray The zoomed input. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = misc.ascent() >>> result = ndimage.zoom(ascent, 3.0) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() >>> print(ascent.shape) (512, 512) >>> print(result.shape) (1536, 1536) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') mode = _ni_support._extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output=numpy.float64) else: filtered = input zoom = _ni_support._normalize_sequence(zoom, input.ndim) output_shape = tuple( [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) zoom_div = numpy.array(output_shape, float) - 1 # Zooming to infinite values is unpredictable, so just choose # zoom factor 1 instead zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div, out=numpy.ones_like(input.shape, dtype=numpy.float64), where=zoom_div != 0) output = _ni_support._get_output(output, input, shape=output_shape) zoom = numpy.ascontiguousarray(zoom) _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval) return output @docfiller def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Rotate an array. The array is rotated in the plane defined by the two axes given by the `axes` parameter using spline interpolation of the requested order. Parameters ---------- %(input)s angle : float The rotation angle in degrees. axes : tuple of 2 ints, optional The two axes that define the plane of rotation. Default is the first two axes. reshape : bool, optional If `reshape` is true, the output shape is adapted so that the input array is contained completely in the output. Default is True. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode)s %(cval)s %(prefilter)s Returns ------- rotate : ndarray The rotated input. Examples -------- >>> from scipy import ndimage, misc >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(10, 3)) >>> ax1, ax2, ax3 = fig.subplots(1, 3) >>> img = misc.ascent() >>> img_45 = ndimage.rotate(img, 45, reshape=False) >>> full_img_45 = ndimage.rotate(img, 45, reshape=True) >>> ax1.imshow(img, cmap='gray') >>> ax1.set_axis_off() >>> ax2.imshow(img_45, cmap='gray') >>> ax2.set_axis_off() >>> ax3.imshow(full_img_45, cmap='gray') >>> ax3.set_axis_off() >>> fig.set_tight_layout(True) >>> plt.show() >>> print(img.shape) (512, 512) >>> print(img_45.shape) (512, 512) >>> print(full_img_45.shape) (724, 724) """ input_arr = numpy.asarray(input) ndim = input_arr.ndim if ndim < 2: raise ValueError('input array should be at least two-dimensional') axes = list(axes) if len(axes) != 2: raise ValueError('axes should contain exactly two values') if not all([float(ax).is_integer() for ax in axes]): raise ValueError('axes should contain only integer values') if axes[0] < 0: axes[0] += ndim if axes[1] < 0: axes[1] += ndim if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim: raise ValueError('invalid rotation plane specified') axes.sort() angle_rad = numpy.deg2rad(angle) c, s = numpy.cos(angle_rad), numpy.sin(angle_rad) rot_matrix = numpy.array([[c, s], [-s, c]]) img_shape = numpy.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]] # Compute the shape of the transformed input plane out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int) else: out_plane_shape = img_shape[axes] out_center = rot_matrix @ ((out_plane_shape - 1) / 2) in_center = (in_plane_shape - 1) / 2 offset = in_center - out_center output_shape = img_shape output_shape[axes] = out_plane_shape output_shape = tuple(output_shape) output = _ni_support._get_output(output, input_arr, shape=output_shape) if ndim <= 2: affine_transform(input_arr, rot_matrix, offset, output_shape, output, order, mode, cval, prefilter) else: # If ndim > 2, the rotation is applied over all the planes # parallel to axes planes_coord = itertools.product( *[[slice(None)] if ax in axes else range(img_shape[ax]) for ax in range(ndim)]) out_plane_shape = tuple(out_plane_shape) for coordinates in planes_coord: ia = input_arr[coordinates] oa = output[coordinates] affine_transform(ia, rot_matrix, offset, out_plane_shape, oa, order, mode, cval, prefilter) return output
# This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>. import struct import base64 import hashlib import hmac import random import re import binascii from socket import gethostname from .. import six from . import des NTLM_NegotiateUnicode = 0x00000001 NTLM_NegotiateOEM = 0x00000002 NTLM_RequestTarget = 0x00000004 NTLM_Unknown9 = 0x00000008 NTLM_NegotiateSign = 0x00000010 NTLM_NegotiateSeal = 0x00000020 NTLM_NegotiateDatagram = 0x00000040 NTLM_NegotiateLanManagerKey = 0x00000080 NTLM_Unknown8 = 0x00000100 NTLM_NegotiateNTLM = 0x00000200 NTLM_NegotiateNTOnly = 0x00000400 NTLM_Anonymous = 0x00000800 NTLM_NegotiateOemDomainSupplied = 0x00001000 NTLM_NegotiateOemWorkstationSupplied = 0x00002000 NTLM_Unknown6 = 0x00004000 NTLM_NegotiateAlwaysSign = 0x00008000 NTLM_TargetTypeDomain = 0x00010000 NTLM_TargetTypeServer = 0x00020000 NTLM_TargetTypeShare = 0x00040000 NTLM_NegotiateExtendedSecurity = 0x00080000 NTLM_NegotiateIdentify = 0x00100000 NTLM_Unknown5 = 0x00200000 NTLM_RequestNonNTSessionKey = 0x00400000 NTLM_NegotiateTargetInfo = 0x00800000 NTLM_Unknown4 = 0x01000000 NTLM_NegotiateVersion = 0x02000000 NTLM_Unknown3 = 0x04000000 NTLM_Unknown2 = 0x08000000 NTLM_Unknown1 = 0x10000000 NTLM_Negotiate128 = 0x20000000 NTLM_NegotiateKeyExchange = 0x40000000 NTLM_Negotiate56 = 0x80000000 # we send these flags with our type 1 message NTLM_TYPE1_FLAGS = (NTLM_NegotiateUnicode | NTLM_NegotiateOEM | NTLM_RequestTarget | NTLM_NegotiateNTLM | NTLM_NegotiateOemDomainSupplied | NTLM_NegotiateOemWorkstationSupplied | NTLM_NegotiateAlwaysSign | NTLM_NegotiateExtendedSecurity | NTLM_NegotiateVersion | NTLM_Negotiate128 | NTLM_Negotiate56) NTLM_TYPE2_FLAGS = (NTLM_NegotiateUnicode | NTLM_RequestTarget | NTLM_NegotiateNTLM | NTLM_NegotiateAlwaysSign | NTLM_NegotiateExtendedSecurity | NTLM_NegotiateTargetInfo | NTLM_NegotiateVersion | NTLM_Negotiate128 | NTLM_Negotiate56) # Indicates that this is the last AV_PAIR in the list. AvLen MUST be 0. # This type of information MUST be present in the AV pair list. NTLM_MsvAvEOL = 0 # The server's NetBIOS computer name. The name MUST be in Unicode, and is not null-terminated. # This type of information MUST be present in the AV_pair list. NTLM_MsvAvNbComputerName = 1 # The server's NetBIOS domain name. The name MUST be in Unicode, and is not null-terminated. # This type of information MUST be present in the AV_pair list. NTLM_MsvAvNbDomainName = 2 # The server's Active Directory DNS computer name. The name MUST be in Unicode, and is not null-terminated. NTLM_MsvAvDnsComputerName = 3 # The server's Active Directory DNS domain name. The name MUST be in Unicode, and is not null-terminated. NTLM_MsvAvDnsDomainName = 4 # The server's Active Directory (AD) DNS forest tree name. The name MUST be in Unicode, and is not null-terminated. NTLM_MsvAvDnsTreeName = 5 # A field containing a 32-bit value indicating server or client configuration. 0x00000001: indicates to the # client that the account authentication is constrained. 0x00000002: indicates that the client is providing message # integrity in the MIC field (section 2.2.1.3) in the AUTHENTICATE_MESSAGE. NTLM_MsvAvFlags = 6 # A FILETIME structure ([MS-DTYP] section 2.3.1) in little-endian byte order that contains the server local time.<12> NTLM_MsvAvTimestamp = 7 # A Restriction_Encoding structure (section 2.2.2.2). The Value field contains a structure representing # the integrity level of the security principal, as well as a MachineID created at computer startup # to identify the calling machine. <13> NTLM_MsAvRestrictions = 8 """ utility functions for Microsoft NTLM authentication References: [MS-NLMP]: NT LAN Manager (NTLM) Authentication Protocol Specification http://download.microsoft.com/download/a/e/6/ae6e4142-aa58-45c6-8dcf-a657e5900cd3/%5BMS-NLMP%5D.pdf [MS-NTHT]: NTLM Over HTTP Protocol Specification http://download.microsoft.com/download/a/e/6/ae6e4142-aa58-45c6-8dcf-a657e5900cd3/%5BMS-NTHT%5D.pdf Cntlm Authentication Proxy http://cntlm.awk.cz/ NTLM Authorization Proxy Server http://sourceforge.net/projects/ntlmaps/ Optimized Attack for NTLM2 Session Response http://www.blackhat.com/presentations/bh-asia-04/bh-jp-04-pdfs/bh-jp-04-seki.pdf """ def create_NTLM_NEGOTIATE_MESSAGE(user, type1_flags=NTLM_TYPE1_FLAGS): BODY_LENGTH = 40 Payload_start = BODY_LENGTH # in bytes protocol = b'NTLMSSP\0' # name type = struct.pack('<I', 1) # type 1 flags = struct.pack('<I', type1_flags) Workstation = gethostname().upper().encode('ascii') user_parts = user.split('\\', 1) DomainName = user_parts[0].upper().encode('ascii') # TODO - this variable isn't used EncryptedRandomSessionKey = "" # noqa WorkstationLen = struct.pack('<H', len(Workstation)) WorkstationMaxLen = struct.pack('<H', len(Workstation)) WorkstationBufferOffset = struct.pack('<I', Payload_start) Payload_start += len(Workstation) DomainNameLen = struct.pack('<H', len(DomainName)) DomainNameMaxLen = struct.pack('<H', len(DomainName)) DomainNameBufferOffset = struct.pack('<I', Payload_start) Payload_start += len(DomainName) ProductMajorVersion = struct.pack('<B', 5) ProductMinorVersion = struct.pack('<B', 1) ProductBuild = struct.pack('<H', 2600) VersionReserved1 = struct.pack('<B', 0) VersionReserved2 = struct.pack('<B', 0) VersionReserved3 = struct.pack('<B', 0) NTLMRevisionCurrent = struct.pack('<B', 15) msg1 = protocol + type + flags + \ DomainNameLen + DomainNameMaxLen + DomainNameBufferOffset + \ WorkstationLen + WorkstationMaxLen + WorkstationBufferOffset + \ ProductMajorVersion + ProductMinorVersion + ProductBuild + \ VersionReserved1 + VersionReserved2 + VersionReserved3 + NTLMRevisionCurrent assert BODY_LENGTH == len(msg1), "BODY_LENGTH: %d != msg1: %d" % (BODY_LENGTH, len(msg1)) msg1 += Workstation + DomainName msg1 = base64.b64encode(msg1) return msg1 def parse_NTLM_CHALLENGE_MESSAGE(msg2): "" msg2 = base64.b64decode(msg2) # TODO - this variable isn't used Signature = msg2[0:8] # noqa msg_type = struct.unpack("<I", msg2[8:12])[0] assert (msg_type == 2) # TODO - this variable isn't used TargetNameLen = struct.unpack("<H", msg2[12:14])[0] # noqa TargetNameMaxLen = struct.unpack("<H", msg2[14:16])[0] TargetNameOffset = struct.unpack("<I", msg2[16:20])[0] # TODO - this variable isn't used TargetName = msg2[TargetNameOffset:TargetNameOffset + TargetNameMaxLen] # noqa NegotiateFlags = struct.unpack("<I", msg2[20:24])[0] ServerChallenge = msg2[24:32] # TODO - this variable isn't used Reserved = msg2[32:40] # noqa if NegotiateFlags & NTLM_NegotiateTargetInfo: TargetInfoLen = struct.unpack("<H", msg2[40:42])[0] # TODO - this variable isn't used TargetInfoMaxLen = struct.unpack("<H", msg2[42:44])[0] # noqa TargetInfoOffset = struct.unpack("<I", msg2[44:48])[0] TargetInfo = msg2[TargetInfoOffset:TargetInfoOffset + TargetInfoLen] i = 0 TimeStamp = '\0' * 8 while (i < TargetInfoLen): AvId = struct.unpack("<H", TargetInfo[i:i + 2])[0] AvLen = struct.unpack("<H", TargetInfo[i + 2:i + 4])[0] AvValue = TargetInfo[i + 4:i + 4 + AvLen] i = i + 4 + AvLen if AvId == NTLM_MsvAvTimestamp: # TODO - this variable isn't used TimeStamp = AvValue # noqa # ~ print AvId, AvValue.decode('utf-16') return (ServerChallenge, NegotiateFlags) def create_NTLM_AUTHENTICATE_MESSAGE(nonce, user, domain, password, NegotiateFlags): is_unicode = NegotiateFlags & NTLM_NegotiateUnicode is_NegotiateExtendedSecurity = NegotiateFlags & NTLM_NegotiateExtendedSecurity flags = struct.pack('<I', NTLM_TYPE2_FLAGS) BODY_LENGTH = 72 Payload_start = BODY_LENGTH # in bytes Workstation = gethostname().upper().encode('ascii') DomainName = domain.upper().encode('ascii') UserName = user.encode('ascii') EncryptedRandomSessionKey = b"" if is_unicode: Workstation = gethostname().upper().encode('utf-16-le') DomainName = domain.upper().encode('utf-16-le') UserName = user.encode('utf-16-le') EncryptedRandomSessionKey = "".encode('utf-16-le') LmChallengeResponse = calc_resp(create_LM_hashed_password_v1(password), nonce) NtChallengeResponse = calc_resp(create_NT_hashed_password_v1(password), nonce) if is_NegotiateExtendedSecurity: pwhash = create_NT_hashed_password_v1(password, UserName, DomainName) ClientChallenge = b"" for i in range(8): ClientChallenge += six.int2byte(random.getrandbits(8)) (NtChallengeResponse, LmChallengeResponse) = ntlm2sr_calc_resp(pwhash, nonce, ClientChallenge) # ='\x39 e3 f4 cd 59 c5 d8 60') Signature = b'NTLMSSP\0' MessageType = struct.pack('<I', 3) # type 3 DomainNameLen = struct.pack('<H', len(DomainName)) DomainNameMaxLen = struct.pack('<H', len(DomainName)) DomainNameOffset = struct.pack('<I', Payload_start) Payload_start += len(DomainName) UserNameLen = struct.pack('<H', len(UserName)) UserNameMaxLen = struct.pack('<H', len(UserName)) UserNameOffset = struct.pack('<I', Payload_start) Payload_start += len(UserName) WorkstationLen = struct.pack('<H', len(Workstation)) WorkstationMaxLen = struct.pack('<H', len(Workstation)) WorkstationOffset = struct.pack('<I', Payload_start) Payload_start += len(Workstation) LmChallengeResponseLen = struct.pack('<H', len(LmChallengeResponse)) LmChallengeResponseMaxLen = struct.pack('<H', len(LmChallengeResponse)) LmChallengeResponseOffset = struct.pack('<I', Payload_start) Payload_start += len(LmChallengeResponse) NtChallengeResponseLen = struct.pack('<H', len(NtChallengeResponse)) NtChallengeResponseMaxLen = struct.pack('<H', len(NtChallengeResponse)) NtChallengeResponseOffset = struct.pack('<I', Payload_start) Payload_start += len(NtChallengeResponse) EncryptedRandomSessionKeyLen = struct.pack('<H', len(EncryptedRandomSessionKey)) EncryptedRandomSessionKeyMaxLen = struct.pack('<H', len(EncryptedRandomSessionKey)) EncryptedRandomSessionKeyOffset = struct.pack('<I', Payload_start) Payload_start += len(EncryptedRandomSessionKey) NegotiateFlags = flags ProductMajorVersion = struct.pack('<B', 5) ProductMinorVersion = struct.pack('<B', 1) ProductBuild = struct.pack('<H', 2600) VersionReserved1 = struct.pack('<B', 0) VersionReserved2 = struct.pack('<B', 0) VersionReserved3 = struct.pack('<B', 0) NTLMRevisionCurrent = struct.pack('<B', 15) # TODO - This variable isn't used MIC = struct.pack('<IIII', 0, 0, 0, 0) # noqa msg3 = Signature + MessageType + \ LmChallengeResponseLen + LmChallengeResponseMaxLen + LmChallengeResponseOffset + \ NtChallengeResponseLen + NtChallengeResponseMaxLen + NtChallengeResponseOffset + \ DomainNameLen + DomainNameMaxLen + DomainNameOffset + \ UserNameLen + UserNameMaxLen + UserNameOffset + \ WorkstationLen + WorkstationMaxLen + WorkstationOffset + \ EncryptedRandomSessionKeyLen + EncryptedRandomSessionKeyMaxLen + EncryptedRandomSessionKeyOffset + \ NegotiateFlags + \ ProductMajorVersion + ProductMinorVersion + ProductBuild + \ VersionReserved1 + VersionReserved2 + VersionReserved3 + NTLMRevisionCurrent assert BODY_LENGTH == len(msg3), "BODY_LENGTH: %d != msg3: %d" % (BODY_LENGTH, len(msg3)) Payload = DomainName + UserName + Workstation + LmChallengeResponse + NtChallengeResponse + EncryptedRandomSessionKey msg3 += Payload msg3 = base64.b64encode(msg3) return msg3 def calc_resp(password_hash, server_challenge): """calc_resp generates the LM response given a 16-byte password hash and the challenge from the Type-2 message. @param password_hash 16-byte password hash @param server_challenge 8-byte challenge from Type-2 message returns 24-byte buffer to contain the LM response upon return """ # padding with zeros to make the hash 21 bytes long password_hash += b'\0' * (21 - len(password_hash)) res = b'' dobj = des.DES(password_hash[0:7]) res = res + dobj.encrypt(server_challenge[0:8]) dobj = des.DES(password_hash[7:14]) res = res + dobj.encrypt(server_challenge[0:8]) dobj = des.DES(password_hash[14:21]) res = res + dobj.encrypt(server_challenge[0:8]) return res def ComputeResponse(ResponseKeyNT, ResponseKeyLM, ServerChallenge, ServerName, ClientChallenge=b'\xaa' * 8, Time=b'\0' * 8): LmChallengeResponse = hmac.new(ResponseKeyLM, ServerChallenge + ClientChallenge).digest() + ClientChallenge Responserversion = b'\x01' HiResponserversion = b'\x01' temp = Responserversion + HiResponserversion + b'\0' * 6 + Time + ClientChallenge + b'\0' * 4 + ServerChallenge + b'\0' * 4 NTProofStr = hmac.new(ResponseKeyNT, ServerChallenge + temp).digest() NtChallengeResponse = NTProofStr + temp # TODO - This variable isn't used SessionBaseKey = hmac.new(ResponseKeyNT, NTProofStr).digest() # noqa return (NtChallengeResponse, LmChallengeResponse) def ntlm2sr_calc_resp(ResponseKeyNT, ServerChallenge, ClientChallenge=b'\xaa' * 8): LmChallengeResponse = ClientChallenge + b'\0' * 16 sess = hashlib.md5(ServerChallenge + ClientChallenge).digest() NtChallengeResponse = calc_resp(ResponseKeyNT, sess[0:8]) return (NtChallengeResponse, LmChallengeResponse) def create_LM_hashed_password_v1(passwd): """create LanManager hashed password""" # if the passwd provided is already a hash, we just return the first half if re.match(r'^[\w]{32}:[\w]{32}$', passwd): return binascii.unhexlify(passwd.split(':')[0]) # fix the password length to 14 bytes passwd = passwd.upper() lm_pw = passwd + '\0' * (14 - len(passwd)) lm_pw = passwd[0:14] # do hash magic_str = b"KGS!@#$%" # page 57 in [MS-NLMP] res = b'' dobj = des.DES(lm_pw[0:7]) res = res + dobj.encrypt(magic_str) dobj = des.DES(lm_pw[7:14]) res = res + dobj.encrypt(magic_str) return res def create_NT_hashed_password_v1(passwd, user=None, domain=None): "create NT hashed password" # if the passwd provided is already a hash, we just return the second half if re.match(r'^[\w]{32}:[\w]{32}$', passwd): return binascii.unhexlify(passwd.split(':')[1]) digest = hashlib.new('md4', passwd.encode('utf-16le')).digest() return digest def create_NT_hashed_password_v2(passwd, user, domain): "create NT hashed password" digest = create_NT_hashed_password_v1(passwd) return hmac.new(digest, (user.upper() + domain).encode('utf-16le')).digest() def create_sessionbasekey(password): return hashlib.new('md4', create_NT_hashed_password_v1(password)).digest()
# -*- coding: utf-8 -*- import logging import httplib import httplib as http # TODO: Inconsistent usage of aliased import from dateutil.parser import parse as parse_date from django.utils import timezone from django.core.exceptions import ValidationError from flask import request import mailchimp from framework import sentry from framework.auth import utils as auth_utils from framework.auth import cas from framework.auth import logout as osf_logout from framework.auth.decorators import collect_auth from framework.auth.decorators import must_be_logged_in from framework.auth.decorators import must_be_confirmed from framework.auth.exceptions import ChangePasswordError from framework.auth.views import send_confirm_email from framework.auth.signals import user_merged from framework.exceptions import HTTPError, PermissionsError from framework.flask import redirect # VOL-aware redirect from framework.status import push_status_message from framework.utils import throttle_period_expired from osf.models import ApiOAuth2Application, ApiOAuth2PersonalToken, OSFUser, QuickFilesNode from osf.exceptions import BlacklistedEmailError from website import mails from website import mailchimp_utils from website import settings from website import language from website.ember_osf_web.decorators import ember_flag_is_active from website.oauth.utils import get_available_scopes from website.profile import utils as profile_utils from website.util import api_v2_url, web_url_for, paths from website.util.sanitize import escape_html from addons.base import utils as addon_utils logger = logging.getLogger(__name__) def date_or_none(date): try: return parse_date(date) except Exception as error: logger.exception(error) return None def validate_user(data, user): """Check if the user in request is the user who log in """ if 'id' in data: if data['id'] != user._id: raise HTTPError(httplib.FORBIDDEN) else: # raise an error if request doesn't have user id raise HTTPError(httplib.BAD_REQUEST, data={'message_long': '"id" is required'}) @must_be_logged_in def resend_confirmation(auth): user = auth.user data = request.get_json() validate_user(data, user) if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE): raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Too many requests. Please wait a while before sending another confirmation email.'}) try: primary = data['email']['primary'] confirmed = data['email']['confirmed'] address = data['email']['address'].strip().lower() except KeyError: raise HTTPError(httplib.BAD_REQUEST) if primary or confirmed: raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Cannnot resend confirmation for confirmed emails'}) user.add_unconfirmed_email(address) # TODO: This setting is now named incorrectly. if settings.CONFIRM_REGISTRATIONS_BY_EMAIL: send_confirm_email(user, email=address) user.email_last_sent = timezone.now() user.save() return _profile_view(user, is_profile=True) @must_be_logged_in def update_user(auth): """Update the logged-in user's profile.""" # trust the decorator to handle auth user = auth.user data = request.get_json() validate_user(data, user) # TODO: Expand this to support other user attributes ########## # Emails # ########## if 'emails' in data: emails_list = [x['address'].strip().lower() for x in data['emails']] if user.username.strip().lower() not in emails_list: raise HTTPError(httplib.FORBIDDEN) available_emails = [ each.strip().lower() for each in list(user.emails.values_list('address', flat=True)) + user.unconfirmed_emails ] # removals removed_emails = [ each.strip().lower() for each in available_emails if each not in emails_list ] if user.username.strip().lower() in removed_emails: raise HTTPError(httplib.FORBIDDEN) for address in removed_emails: if user.emails.filter(address=address): try: user.remove_email(address) except PermissionsError as e: raise HTTPError(httplib.FORBIDDEN, e.message) user.remove_unconfirmed_email(address) # additions added_emails = [ each['address'].strip().lower() for each in data['emails'] if each['address'].strip().lower() not in available_emails ] for address in added_emails: try: user.add_unconfirmed_email(address) except (ValidationError, ValueError): raise HTTPError(http.BAD_REQUEST, data=dict( message_long='Invalid Email') ) except BlacklistedEmailError: raise HTTPError(http.BAD_REQUEST, data=dict( message_long=language.BLACKLISTED_EMAIL) ) # TODO: This setting is now named incorrectly. if settings.CONFIRM_REGISTRATIONS_BY_EMAIL: send_confirm_email(user, email=address) ############ # Username # ############ # get the first email that is set to primary and has an address primary_email = next( ( each for each in data['emails'] # email is primary if each.get('primary') and each.get('confirmed') # an address is specified (can't trust those sneaky users!) and each.get('address') ) ) if primary_email: primary_email_address = primary_email['address'].strip().lower() if primary_email_address not in [each.strip().lower() for each in user.emails.values_list('address', flat=True)]: raise HTTPError(httplib.FORBIDDEN) username = primary_email_address # make sure the new username has already been confirmed if username and username != user.username and user.emails.filter(address=username).exists(): mails.send_mail( user.username, mails.PRIMARY_EMAIL_CHANGED, user=user, new_address=username, can_change_preferences=False, osf_contact_email=settings.OSF_CONTACT_EMAIL ) # Remove old primary email from subscribed mailing lists for list_name, subscription in user.mailchimp_mailing_lists.iteritems(): if subscription: mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username) user.username = username ################### # Timezone/Locale # ################### if 'locale' in data: if data['locale']: locale = data['locale'].replace('-', '_') user.locale = locale # TODO: Refactor to something like: # user.timezone = data.get('timezone', user.timezone) if 'timezone' in data: if data['timezone']: user.timezone = data['timezone'] user.save() # Update subscribed mailing lists with new primary email # TODO: move to user.save() for list_name, subscription in user.mailchimp_mailing_lists.iteritems(): if subscription: mailchimp_utils.subscribe_mailchimp(list_name, user._id) return _profile_view(user, is_profile=True) def _profile_view(profile, is_profile=False, include_node_counts=False): if profile and profile.is_disabled: raise HTTPError(http.GONE) if profile: profile_quickfilesnode = QuickFilesNode.objects.get_for_user(profile) profile_user_data = profile_utils.serialize_user(profile, full=True, is_profile=is_profile, include_node_counts=include_node_counts) ret = { 'profile': profile_user_data, 'user': { '_id': profile._id, 'is_profile': is_profile, 'can_edit': None, # necessary for rendering nodes 'permissions': [], # necessary for rendering nodes 'has_quickfiles': profile_quickfilesnode.files.filter(type='osf.osfstoragefile').exists() }, } return ret raise HTTPError(http.NOT_FOUND) @must_be_logged_in def profile_view_json(auth): return _profile_view(auth.user, True) @collect_auth @must_be_confirmed def profile_view_id_json(uid, auth): user = OSFUser.load(uid) is_profile = auth and auth.user == user # Do NOT embed nodes, they aren't necessary return _profile_view(user, is_profile) @must_be_logged_in @ember_flag_is_active('ember_user_profile_page') def profile_view(auth): # Embed node data, so profile node lists can be rendered return _profile_view(auth.user, True, include_node_counts=True) @collect_auth @must_be_confirmed def profile_view_id(uid, auth): user = OSFUser.load(uid) is_profile = auth and auth.user == user # Embed node data, so profile node lists can be rendered return _profile_view(user, is_profile, include_node_counts=True) @must_be_logged_in @ember_flag_is_active('ember_user_settings_page') def user_profile(auth, **kwargs): user = auth.user return { 'user_id': user._id, 'user_api_url': user.api_url, } @must_be_logged_in def user_account(auth, **kwargs): user = auth.user user_addons = addon_utils.get_addons_by_config_type('user', user) if 'password_reset' in request.args: push_status_message('Password updated successfully.', kind='success', trust=False) return { 'user_id': user._id, 'addons': user_addons, 'addons_js': collect_user_config_js([addon for addon in settings.ADDONS_AVAILABLE if 'user' in addon.configs]), 'addons_css': [], 'requested_deactivation': user.requested_deactivation, 'external_identity': user.external_identity } @must_be_logged_in def user_account_password(auth, **kwargs): user = auth.user old_password = request.form.get('old_password', None) new_password = request.form.get('new_password', None) confirm_password = request.form.get('confirm_password', None) # It has been more than 1 hour since last invalid attempt to change password. Reset the counter for invalid attempts. if throttle_period_expired(user.change_password_last_attempt, settings.TIME_RESET_CHANGE_PASSWORD_ATTEMPTS): user.reset_old_password_invalid_attempts() # There have been more than 3 failed attempts and throttle hasn't expired. if user.old_password_invalid_attempts >= settings.INCORRECT_PASSWORD_ATTEMPTS_ALLOWED and not throttle_period_expired(user.change_password_last_attempt, settings.CHANGE_PASSWORD_THROTTLE): push_status_message( message='Too many failed attempts. Please wait a while before attempting to change your password.', kind='warning', trust=False ) return redirect(web_url_for('user_account')) try: user.change_password(old_password, new_password, confirm_password) except ChangePasswordError as error: for m in error.messages: push_status_message(m, kind='warning', trust=False) else: # We have to logout the user first so all CAS sessions are invalid user.save() osf_logout() return redirect(cas.get_logout_url(cas.get_login_url( web_url_for('user_account', _absolute=True) + '?password_reset=True', username=user.username, verification_key=user.verification_key, ))) user.save() return redirect(web_url_for('user_account')) @must_be_logged_in def user_addons(auth, **kwargs): user = auth.user ret = { 'addon_settings': addon_utils.get_addons_by_config_type('accounts', user), } accounts_addons = [addon for addon in settings.ADDONS_AVAILABLE if 'accounts' in addon.configs] ret.update({ 'addon_enabled_settings': [addon.short_name for addon in accounts_addons], 'addons_js': collect_user_config_js(accounts_addons), 'addon_capabilities': settings.ADDON_CAPABILITIES, 'addons_css': [] }) return ret @must_be_logged_in def user_notifications(auth, **kwargs): """Get subscribe data from user""" return { 'mailing_lists': dict(auth.user.mailchimp_mailing_lists.items() + auth.user.osf_mailing_lists.items()) } @must_be_logged_in def oauth_application_list(auth, **kwargs): """Return app creation page with list of known apps. API is responsible for tying list to current user.""" app_list_url = api_v2_url('applications/') return { 'app_list_url': app_list_url } @must_be_logged_in def oauth_application_register(auth, **kwargs): """Register an API application: blank form view""" app_list_url = api_v2_url('applications/') # POST request to this url return {'app_list_url': app_list_url, 'app_detail_url': ''} @must_be_logged_in def oauth_application_detail(auth, **kwargs): """Show detail for a single OAuth application""" client_id = kwargs.get('client_id') # The client ID must be an active and existing record, and the logged-in user must have permission to view it. try: record = ApiOAuth2Application.objects.get(client_id=client_id) except ApiOAuth2Application.DoesNotExist: raise HTTPError(http.NOT_FOUND) except ValueError: # Invalid client ID -- ApiOAuth2Application will not exist raise HTTPError(http.NOT_FOUND) if record.owner != auth.user: raise HTTPError(http.FORBIDDEN) if record.is_active is False: raise HTTPError(http.GONE) app_detail_url = api_v2_url('applications/{}/'.format(client_id)) # Send request to this URL return {'app_list_url': '', 'app_detail_url': app_detail_url} @must_be_logged_in def personal_access_token_list(auth, **kwargs): """Return token creation page with list of known tokens. API is responsible for tying list to current user.""" token_list_url = api_v2_url('tokens/') return { 'token_list_url': token_list_url } @must_be_logged_in def personal_access_token_register(auth, **kwargs): """Register a personal access token: blank form view""" token_list_url = api_v2_url('tokens/') # POST request to this url return {'token_list_url': token_list_url, 'token_detail_url': '', 'scope_options': get_available_scopes()} @must_be_logged_in def personal_access_token_detail(auth, **kwargs): """Show detail for a single personal access token""" _id = kwargs.get('_id') # The ID must be an active and existing record, and the logged-in user must have permission to view it. try: record = ApiOAuth2PersonalToken.objects.get(_id=_id) except ApiOAuth2PersonalToken.DoesNotExist: raise HTTPError(http.NOT_FOUND) if record.owner != auth.user: raise HTTPError(http.FORBIDDEN) if record.is_active is False: raise HTTPError(http.GONE) token_detail_url = api_v2_url('tokens/{}/'.format(_id)) # Send request to this URL return {'token_list_url': '', 'token_detail_url': token_detail_url, 'scope_options': get_available_scopes()} @must_be_logged_in def delete_external_identity(auth, **kwargs): """Removes single external identity from user""" data = request.get_json() identity = data.get('identity') if not identity: raise HTTPError(http.BAD_REQUEST) for service in auth.user.external_identity: if identity in auth.user.external_identity[service]: auth.user.external_identity[service].pop(identity) if len(auth.user.external_identity[service]) == 0: auth.user.external_identity.pop(service) auth.user.save() return raise HTTPError(http.NOT_FOUND, 'Unable to find requested identity') def collect_user_config_js(addon_configs): """Collect webpack bundles for each of the addons' user-cfg.js modules. Return the URLs for each of the JS modules to be included on the user addons config page. :param list addons: List of user's addon config records. """ js_modules = [] for addon_config in addon_configs: js_path = paths.resolve_addon_path(addon_config, 'user-cfg.js') if js_path: js_modules.append(js_path) return js_modules @must_be_logged_in def user_choose_addons(**kwargs): auth = kwargs['auth'] json_data = escape_html(request.get_json()) auth.user.config_addons(json_data, auth) @must_be_logged_in def user_choose_mailing_lists(auth, **kwargs): """ Update mailing list subscription on user model and in mailchimp Example input: { "Open Science Framework General": true, ... } """ user = auth.user json_data = escape_html(request.get_json()) if json_data: for list_name, subscribe in json_data.items(): # TO DO: change this to take in any potential non-mailchimp, something like try: update_subscription(), except IndexNotFound: update_mailchimp_subscription() if list_name == settings.OSF_HELP_LIST: update_osf_help_mails_subscription(user=user, subscribe=subscribe) else: update_mailchimp_subscription(user, list_name, subscribe) else: raise HTTPError(http.BAD_REQUEST, data=dict( message_long="Must provide a dictionary of the format {'mailing list name': Boolean}") ) user.save() all_mailing_lists = {} all_mailing_lists.update(user.mailchimp_mailing_lists) all_mailing_lists.update(user.osf_mailing_lists) return {'message': 'Successfully updated mailing lists', 'result': all_mailing_lists}, 200 @user_merged.connect def update_mailchimp_subscription(user, list_name, subscription, send_goodbye=True): """ Update mailing list subscription in mailchimp. :param obj user: current user :param str list_name: mailing list :param boolean subscription: true if user is subscribed """ if subscription: try: mailchimp_utils.subscribe_mailchimp(list_name, user._id) except mailchimp.Error: pass else: try: mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username, send_goodbye=send_goodbye) except mailchimp.Error: # User has already unsubscribed, so nothing to do pass def mailchimp_get_endpoint(**kwargs): """Endpoint that the mailchimp webhook hits to check that the OSF is responding""" return {}, http.OK def sync_data_from_mailchimp(**kwargs): """Endpoint that the mailchimp webhook sends its data to""" key = request.args.get('key') if key == settings.MAILCHIMP_WEBHOOK_SECRET_KEY: r = request action = r.values['type'] list_name = mailchimp_utils.get_list_name_from_id(list_id=r.values['data[list_id]']) username = r.values['data[email]'] try: user = OSFUser.objects.get(username=username) except OSFUser.DoesNotExist: sentry.log_exception() sentry.log_message('A user with this username does not exist.') raise HTTPError(404, data=dict(message_short='User not found', message_long='A user with this username does not exist')) if action == 'unsubscribe': user.mailchimp_mailing_lists[list_name] = False user.save() elif action == 'subscribe': user.mailchimp_mailing_lists[list_name] = True user.save() else: # TODO: get tests to pass with sentry logging # sentry.log_exception() # sentry.log_message("Unauthorized request to the OSF.") raise HTTPError(http.UNAUTHORIZED) @must_be_logged_in def impute_names(**kwargs): name = request.args.get('name', '') return auth_utils.impute_names(name) def update_osf_help_mails_subscription(user, subscribe): user.osf_mailing_lists[settings.OSF_HELP_LIST] = subscribe user.save() @must_be_logged_in def serialize_names(**kwargs): user = kwargs['auth'].user return { 'full': user.fullname, 'given': user.given_name, 'middle': user.middle_names, 'family': user.family_name, 'suffix': user.suffix, } def get_target_user(auth, uid=None): target = OSFUser.load(uid) if uid else auth.user if target is None: raise HTTPError(http.NOT_FOUND) return target def fmt_date_or_none(date, fmt='%Y-%m-%d'): if date: try: return date.strftime(fmt) except ValueError: raise HTTPError( http.BAD_REQUEST, data=dict(message_long='Year entered must be after 1900') ) return None def append_editable(data, auth, uid=None): target = get_target_user(auth, uid) data['editable'] = auth.user == target def serialize_social_addons(user): ret = {} for user_settings in user.get_addons(): config = user_settings.config if user_settings.public_id: ret[config.short_name] = user_settings.public_id return ret @collect_auth def serialize_social(auth, uid=None, **kwargs): target = get_target_user(auth, uid) ret = target.social append_editable(ret, auth, uid) if ret['editable']: ret['addons'] = serialize_social_addons(target) return ret def serialize_job(job): return { 'institution': job.get('institution'), 'department': job.get('department'), 'title': job.get('title'), 'startMonth': job.get('startMonth'), 'startYear': job.get('startYear'), 'endMonth': job.get('endMonth'), 'endYear': job.get('endYear'), 'ongoing': job.get('ongoing', False), } def serialize_school(school): return { 'institution': school.get('institution'), 'department': school.get('department'), 'degree': school.get('degree'), 'startMonth': school.get('startMonth'), 'startYear': school.get('startYear'), 'endMonth': school.get('endMonth'), 'endYear': school.get('endYear'), 'ongoing': school.get('ongoing', False), } def serialize_contents(field, func, auth, uid=None): target = get_target_user(auth, uid) ret = { 'contents': [ func(content) for content in getattr(target, field) ] } append_editable(ret, auth, uid) return ret @collect_auth def serialize_jobs(auth, uid=None, **kwargs): ret = serialize_contents('jobs', serialize_job, auth, uid) append_editable(ret, auth, uid) return ret @collect_auth def serialize_schools(auth, uid=None, **kwargs): ret = serialize_contents('schools', serialize_school, auth, uid) append_editable(ret, auth, uid) return ret @must_be_logged_in def unserialize_names(**kwargs): user = kwargs['auth'].user json_data = escape_html(request.get_json()) # json get can return None, use `or` here to ensure we always strip a string user.fullname = (json_data.get('full') or '').strip() user.given_name = (json_data.get('given') or '').strip() user.middle_names = (json_data.get('middle') or '').strip() user.family_name = (json_data.get('family') or '').strip() user.suffix = (json_data.get('suffix') or '').strip() user.save() def verify_user_match(auth, **kwargs): uid = kwargs.get('uid') if uid and uid != auth.user._id: raise HTTPError(http.FORBIDDEN) @must_be_logged_in def unserialize_social(auth, **kwargs): verify_user_match(auth, **kwargs) user = auth.user json_data = escape_html(request.get_json()) for soc in user.SOCIAL_FIELDS.keys(): user.social[soc] = json_data.get(soc) try: user.save() except ValidationError as exc: raise HTTPError(http.BAD_REQUEST, data=dict( message_long=exc.messages[0] )) def unserialize_job(job): return { 'institution': job.get('institution'), 'department': job.get('department'), 'title': job.get('title'), 'startMonth': job.get('startMonth'), 'startYear': job.get('startYear'), 'endMonth': job.get('endMonth'), 'endYear': job.get('endYear'), 'ongoing': job.get('ongoing'), } def unserialize_school(school): return { 'institution': school.get('institution'), 'department': school.get('department'), 'degree': school.get('degree'), 'startMonth': school.get('startMonth'), 'startYear': school.get('startYear'), 'endMonth': school.get('endMonth'), 'endYear': school.get('endYear'), 'ongoing': school.get('ongoing'), } def unserialize_contents(field, func, auth): user = auth.user json_data = escape_html(request.get_json()) setattr( user, field, [ func(content) for content in json_data.get('contents', []) ] ) user.save() @must_be_logged_in def unserialize_jobs(auth, **kwargs): verify_user_match(auth, **kwargs) unserialize_contents('jobs', unserialize_job, auth) # TODO: Add return value @must_be_logged_in def unserialize_schools(auth, **kwargs): verify_user_match(auth, **kwargs) unserialize_contents('schools', unserialize_school, auth) # TODO: Add return value @must_be_logged_in def request_export(auth): user = auth.user if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE): raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Too many requests. Please wait a while before sending another account export request.', 'error_type': 'throttle_error'}) mails.send_mail( to_addr=settings.OSF_SUPPORT_EMAIL, mail=mails.REQUEST_EXPORT, user=auth.user, can_change_preferences=False, ) user.email_last_sent = timezone.now() user.save() return {'message': 'Sent account export request'} @must_be_logged_in def request_deactivation(auth): user = auth.user if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE): raise HTTPError(http.BAD_REQUEST, data={ 'message_long': 'Too many requests. Please wait a while before sending another account deactivation request.', 'error_type': 'throttle_error' }) mails.send_mail( to_addr=settings.OSF_SUPPORT_EMAIL, mail=mails.REQUEST_DEACTIVATION, user=auth.user, can_change_preferences=False, ) user.email_last_sent = timezone.now() user.requested_deactivation = True user.save() return {'message': 'Sent account deactivation request'} @must_be_logged_in def cancel_request_deactivation(auth): user = auth.user user.requested_deactivation = False user.save() return {'message': 'You have canceled your deactivation request'}
######################################################################## # amara/xpath/locationpaths/predicates.py """ A parsed token that represents a predicate list. """ from __future__ import absolute_import from itertools import count, izip from amara.xpath import datatypes from amara.xpath.expressions.basics import literal, variable_reference from amara.xpath.expressions.booleans import equality_expr, relational_expr from amara.xpath.functions import position_function from ._nodetests import positionfilter from ._paths import pathiter __all__ = ['predicates', 'predicate'] class predicates(tuple): def __init__(self, *args): self.select = pathiter(pred.select for pred in self).select return def filter(self, nodes, context, reverse): if self: state = context.node, context.position, context.size for predicate in self: nodes = datatypes.nodeset(predicate.select(context, nodes)) context.node, context.position, context.size = state else: nodes = datatypes.nodeset(nodes) if reverse: nodes.reverse() return nodes def pprint(self, indent='', stream=None): print >> stream, indent + repr(self) for pred in self: pred.pprint(indent + ' ', stream) def __str__(self): return self.__unicode__().encode('utf-8') def __repr__(self): ptr = id(self) if ptr < 0: ptr += 0x100000000L return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self) def __unicode__(self): return u''.join(map(unicode, self)) #FIXME: should this derive from boolean_expression? class predicate: def __init__(self, expression): self._expr = expression self._provide_context_size = False #See http://trac.xml3k.org/ticket/62 #FIXME: There are probably many code paths which need self._provide_context_size set # Check for just "Number" if isinstance(expression, literal): const = datatypes.number(expression._literal) index = int(const) if index == const and index >= 1: self.select = positionfilter(index) else: # FIXME: add warning that expression will not select anything self.select = izip() return # Check for "position() = Expr" elif isinstance(expression, equality_expr) and expression._op == '=': if isinstance(expression._left, position_function): expression = expression._right if isinstance(expression, literal): const = datatypes.number(expression._literal) index = int(const) if index == const and index >= 1: self.select = positionfilter(index) else: self.select = izip() else: #FIXME: This will kick in the non-lazy behavior too broadly, e.g. in the case of [position = 1+1] #See: http://trac.xml3k.org/ticket/62 self._provide_context_size = True self._expr = expression self.select = self._number return elif isinstance(expression._right, position_function): expression = expression._left if isinstance(expression, literal): const = datatypes.number(expression._literal) index = int(const) if index == const and index >= 1: self.select = positionfilter(index) else: self.select = izip() else: self._expr = expression self.select = self._number return # Check for "position() [>,>=] Expr" or "Expr [<,<=] position()" # FIXME - do full slice-type notation elif isinstance(expression, relational_expr): op = expression._op if (isinstance(expression._left, position_function) and isinstance(expression._right, (literal, variable_reference)) and op in ('>', '>=')): self._start = expression._right self._position = (op == '>') self.select = self._slice return elif (isinstance(expression._left, (literal, variable_reference)) and isinstance(expression._right, Position) and op in ('<', '<=')): self._start = expression._left self._position = (op == '<') self.select = self._slice return if issubclass(expression.return_type, datatypes.number): self.select = self._number elif expression.return_type is not datatypes.xpathobject: assert issubclass(expression.return_type, datatypes.xpathobject) self.select = self._boolean return def _slice(self, context, nodes): start = self._start.evaluate_as_number(context) position = self._position if position > start: return nodes position += 1 nodes = iter(nodes) for node in nodes: if position > start: break position += 1 return nodes def _number(self, context, nodes): expr = self._expr position = 1 if self._provide_context_size: nodes = list(nodes) context.size = len(nodes) context.current_node = context.node for node in nodes: context.node, context.position = node, position if expr.evaluate_as_number(context) == position: yield node position += 1 return def _boolean(self, context, nodes): expr = self._expr position = 1 context.current_node = context.node for node in nodes: context.node, context.position = node, position if expr.evaluate_as_boolean(context): yield node position += 1 return def select(self, context, nodes): expr = self._expr position = 1 context.current_node = context.node for node in nodes: context.node, context.position = node, position result = expr.evaluate(context) if isinstance(result, datatypes.number): # This must be separate to prevent falling into # the boolean check. if result == position: yield node elif result: yield node position += 1 return def pprint(self, indent='', stream=None): print >> stream, indent + repr(self) self._expr.pprint(indent + ' ', stream) def __str__(self): return self.__unicode__().encode('utf-8') def __repr__(self): ptr = id(self) if ptr < 0: ptr += 0x100000000L return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self) def __unicode__(self): return u'[%s]' % self._expr @property def children(self): 'Child of the parse tree of a predicate is its expression' return (self._expr,)
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Functions to apply slice sampling update in one dimension.""" import tensorflow.compat.v2 as tf from tensorflow_probability.python.distributions import bernoulli as bernoulli_lib from tensorflow_probability.python.distributions import gamma as gamma_lib from tensorflow_probability.python.internal import dtype_util from tensorflow_probability.python.internal import prefer_static as ps from tensorflow_probability.python.internal import samplers JAX_MODE = False def _left_doubling_increments(batch_shape, max_doublings, step_size, seed=None, name=None): """Computes the doubling increments for the left end point. The doubling procedure expands an initial interval to find a superset of the true slice. At each doubling iteration, the interval width is doubled to either the left or the right hand side with equal probability. If, initially, the left end point is at `L(0)` and the width of the interval is `w(0)`, then the left end point and the width at the k-th iteration (denoted L(k) and w(k) respectively) are given by the following recursions: ```none w(k) = 2 * w(k-1) L(k) = L(k-1) - w(k-1) * X_k, X_k ~ Bernoulli(0.5) or, L(0) - L(k) = w(0) Sum(2^i * X(i+1), 0 <= i < k) ``` This function computes the sequence of `L(0)-L(k)` and `w(k)` for k between 0 and `max_doublings` independently for each chain. Args: batch_shape: Positive int32 `tf.Tensor`. The batch shape. max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of doublings to consider. step_size: A real `tf.Tensor` with shape compatible with [num_chains]. The size of the initial interval. seed: PRNG seed; see `tfp.random.sanitize_seed` for details. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: left_increments: A tensor of shape (max_doublings+1, batch_shape). The relative position of the left end point after the doublings. widths: A tensor of shape (max_doublings+1, ones_like(batch_shape)). The widths of the intervals at each stage of the doubling. """ with tf.name_scope(name or 'left_doubling_increments'): step_size = tf.convert_to_tensor(value=step_size) dtype = dtype_util.base_dtype(step_size.dtype) # Output shape of the left increments tensor. output_shape = ps.concat(([max_doublings + 1], batch_shape), axis=0) # A sample realization of X_k. expand_left = bernoulli_lib.Bernoulli( 0.5, dtype=dtype).sample( sample_shape=output_shape, seed=seed) # The widths of the successive intervals. Starts with 1.0 and ends with # 2^max_doublings. width_multipliers = tf.cast(2 ** tf.range(0, max_doublings+1), dtype=dtype) # Output shape of the `widths` tensor. widths_shape = ps.concat(([max_doublings + 1], ps.ones_like(batch_shape)), axis=0) width_multipliers = tf.reshape(width_multipliers, shape=widths_shape) # Widths shape is [max_doublings + 1, 1, 1, 1...]. widths = width_multipliers * step_size # Take the cumulative sum of the left side increments in slice width to give # the resulting distance from the inital lower bound. left_increments = tf.cumsum(widths * expand_left, exclusive=True, axis=0) return left_increments, widths def _find_best_interval_idx(x, name=None): """Finds the index of the optimal set of bounds for each chain. For each chain, finds the smallest set of bounds for which both edges lie outside the slice. This is equivalent to the point at which a for loop implementation (P715 of Neal (2003)) of the algorithm would terminate. Performs the following calculation, where i is the number of doublings that have been performed and k is the max number of doublings: (2 * k - i) * flag + i The argmax of the above returns the earliest index where the bounds were outside the slice and if there is no such point, the widest bounds. Args: x: A tensor of shape (max_doublings+1, batch_shape). Type int32, with value 0 or 1. Indicates if this set of bounds is outside the slice. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: indices: A tensor of shape batch_shape. Type int32, with the index of the first set of bounds outside the slice and if there are none, the index of the widest set. """ with tf.name_scope(name or 'find_best_interval_idx'): # Returns max_doublings + 1. Positive int32. k = ps.shape(x)[0] dtype = dtype_util.base_dtype(x.dtype) # Factors by which to multiply the flag. Corresponds to (2 * k - i) above. mults = ps.range(2 * k, k, -1, dtype=dtype)[:, tf.newaxis] # Factors by which to shift the flag. Corresponds to i above. Ensures the # widest bounds are selected if there are no bounds outside the slice. shifts = ps.range(k, dtype=dtype)[:, tf.newaxis] indices = tf.argmax(mults * x + shifts, axis=0, output_type=dtype) return indices def slice_bounds_by_doubling(x_initial, target_log_prob, log_slice_heights, max_doublings, step_size, seed=None, name=None): """Returns the bounds of the slice at each stage of doubling procedure. Precomputes the x coordinates of the left (L) and right (R) endpoints of the interval `I` produced in the "doubling" algorithm [Neal 2003][1] P713. Note that we simultaneously compute all possible doubling values for each chain, for the reason that at small-medium densities, the gains from parallel evaluation might cause a speed-up, but this will be benchmarked against the while loop implementation. Args: x_initial: `tf.Tensor` of any shape and any real dtype consumable by `target_log_prob`. The initial points. target_log_prob: A callable taking a `tf.Tensor` of shape and dtype as `x_initial` and returning a tensor of the same shape. The log density of the target distribution. log_slice_heights: `tf.Tensor` with the same shape as `x_initial` and the same dtype as returned by `target_log_prob`. The log of the height of the slice for each chain. The values must be bounded above by `target_log_prob(x_initial)`. max_doublings: Scalar positive int32 `tf.Tensor`. The maximum number of doublings to consider. step_size: `tf.Tensor` with same dtype as and shape compatible with `x_initial`. The size of the initial interval. seed: (Optional) positive int or Tensor seed pair. The random seed. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: upper_bounds: A tensor of same shape and dtype as `x_initial`. Slice upper bounds for each chain. lower_bounds: A tensor of same shape and dtype as `x_initial`. Slice lower bounds for each chain. both_ok: A tensor of shape `x_initial` and boolean dtype. Indicates if both the chosen upper and lower bound lie outside of the slice. #### References [1]: Radford M. Neal. Slice Sampling. The Annals of Statistics. 2003, Vol 31, No. 3 , 705-767. https://projecteuclid.org/download/pdf_1/euclid.aos/1056562461 """ with tf.name_scope(name or 'slice_bounds_by_doubling'): left_seed, increments_seed = samplers.split_seed( seed, salt='slice_bounds_by_doubling') x_initial = tf.convert_to_tensor(value=x_initial) batch_shape = ps.shape(x_initial) dtype = dtype_util.base_dtype(step_size.dtype) left_endpoints = x_initial + step_size * samplers.uniform( batch_shape, minval=-1.0, maxval=0.0, dtype=dtype, seed=left_seed) # Compute the increments by which we need to step the upper and lower bounds # part of the doubling procedure. left_increments, widths = _left_doubling_increments( batch_shape, max_doublings, step_size, seed=increments_seed) # The left and right end points. Shape (max_doublings+1,) + batch_shape. left_endpoints = left_endpoints - left_increments right_endpoints = left_endpoints + widths # Test if these end points lie outside of the slice. # Checks if the end points of the slice are outside the graph of the pdf. left_ep_values = tf.map_fn(target_log_prob, left_endpoints) right_ep_values = tf.map_fn(target_log_prob, right_endpoints) left_ok = left_ep_values < log_slice_heights right_ok = right_ep_values < log_slice_heights both_ok = left_ok & right_ok both_ok_f = tf.reshape(both_ok, [max_doublings + 1, -1]) best_interval_idx = _find_best_interval_idx( tf.cast(both_ok_f, dtype=tf.int32)) # Formats the above index as required to use with gather_nd. point_index_gather = tf.stack( [best_interval_idx, ps.range(ps.size(best_interval_idx))], axis=1, name='point_index_gather') left_ep_f = tf.reshape(left_endpoints, [max_doublings + 1, -1]) right_ep_f = tf.reshape(right_endpoints, [max_doublings + 1, -1]) # The x values of the uppper and lower bounds of the slices for each chain. lower_bounds = tf.reshape(tf.gather_nd(left_ep_f, point_index_gather), batch_shape) upper_bounds = tf.reshape(tf.gather_nd(right_ep_f, point_index_gather), batch_shape) both_ok = tf.reduce_any(both_ok, axis=0) return upper_bounds, lower_bounds, both_ok def _test_acceptance(x_initial, target_log_prob, decided, log_slice_heights, x_proposed, step_size, lower_bounds, upper_bounds, name=None): """Ensures the chosen point does not violate reversibility. Implements Fig 6 of Neal 2003 page 717, which checks that the path from the existing point to the new point would also have been possible in reverse. This is done by checking that the algorithm would not have been terminated before reaching the old point. Args: x_initial: A tensor of any shape and real dtype. The initial positions of the chains. This function assumes that all the dimensions of `x_initial` are batch dimensions (i.e. the event shape is `[]`). target_log_prob: Callable accepting a tensor like `x_initial` and returning a tensor containing the log density at that point of the same shape. decided: A `tf.bool` tensor of the same shape as `x_initial`. Indicates whether the acceptance has already been decided. A point is tested only if `decided` for that point is False. log_slice_heights: Tensor of the same shape and dtype as the return value of `target_log_prob` when applied to `x_initial`. The log of the height of the chosen slice. x_proposed: A tensor of the same shape and dtype as `x_initial`. The proposed points. step_size: A tensor of shape and dtype compatible with `x_initial`. The min interval size in the doubling algorithm. lower_bounds: Tensor of same shape and dtype as `x_initial`. Slice lower bounds for each chain. upper_bounds: Tensor of same shape and dtype as `x_initial`. Slice upper bounds for each chain. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: acceptable: A boolean tensor of same shape as `x_initial` indicating whether the proposed points are acceptable for reversibility or not. """ with tf.name_scope(name or 'test_acceptance'): d = tf.zeros_like(x_initial, dtype=tf.bool) # Keeps track of points for which the loop has "effectively terminated". # Termination is when either their interval width has shrunk to the minimum # value (step_size) or if the point has already been rejected. def cond(_, decided, *ignored_args): # pylint: disable=unused-argument # Continue until all the points have been decided. return ~tf.reduce_all(decided) acceptable = tf.ones_like(x_initial, dtype=tf.bool) def body(acceptable, decided, left, right, d): """Checks reversibility as described on P717 of Neal 2003.""" midpoint = (left + right) / 2 divided = (((x_initial < midpoint) & (x_proposed >= midpoint)) | ((x_proposed < midpoint) & (x_initial >= midpoint))) next_d = d | divided next_right = tf.where(x_proposed < midpoint, midpoint, right) next_left = tf.where(x_proposed >= midpoint, midpoint, left) left_test = (log_slice_heights >= target_log_prob(next_left)) right_test = (log_slice_heights >= target_log_prob(next_right)) unacceptable = next_d & left_test & right_test # Logic here: For points which have not already been decided, # and are unacceptable, set acceptable to False. For others, let them # be as they were. now_decided = ~decided & unacceptable next_acceptable = tf.where(now_decided, ~unacceptable, acceptable) # Decided if (a) was already decided, or # (b) the new width is less than 1.1 step_size, or # (c) was marked unacceptable. next_decided = (decided | (next_right - next_left <= 1.1 * step_size) | now_decided) return (next_acceptable, next_decided, next_left, next_right, next_d) return tf.while_loop( cond=cond, body=body, loop_vars=(acceptable, decided, lower_bounds, upper_bounds, d))[0] def _sample_with_shrinkage(x_initial, target_log_prob, log_slice_heights, step_size, lower_bounds, upper_bounds, seed, name=None): """Samples from the slice by applying shrinkage for rejected points. Implements the one dimensional slice sampling algorithm of Neal (2003), with a doubling algorithm (Neal 2003 P715 Fig. 4), which doubles the size of the interval at each iteration and shrinkage (Neal 2003 P716 Fig. 5), which reduces the width of the slice when a selected point is rejected, by setting the relevant bound that that value. Randomly sampled points are checked for two criteria: that they lie within the slice and that they pass the acceptability check (Neal 2003 P717 Fig. 6), which tests that the new state could have generated the previous one. Args: x_initial: A tensor of any shape. The initial positions of the chains. This function assumes that all the dimensions of `x_initial` are batch dimensions (i.e. the event shape is `[]`). target_log_prob: Callable accepting a tensor like `x_initial` and returning a tensor containing the log density at that point of the same shape. log_slice_heights: Tensor of the same shape and dtype as the return value of `target_log_prob` when applied to `x_initial`. The log of the height of the chosen slice. step_size: A tensor of shape and dtype compatible with `x_initial`. The min interval size in the doubling algorithm. lower_bounds: Tensor of same shape and dtype as `x_initial`. Slice lower bounds for each chain. upper_bounds: Tensor of same shape and dtype as `x_initial`. Slice upper bounds for each chain. seed: PRNG seed; see `tfp.random.sanitize_seed` for details. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: x_proposed: A tensor of the same shape and dtype as `x_initial`. The next proposed state of the chain. """ with tf.name_scope(name or 'sample_with_shrinkage'): seed = samplers.sanitize_seed(seed) # Keeps track of whether an acceptable sample has been found for the chain. found = tf.zeros_like(x_initial, dtype=tf.bool) cond = lambda found, *ignored_args: ~tf.reduce_all(found) x_next = tf.identity(x_initial) x_initial_shape = ps.shape(x_initial) x_initial_dtype = dtype_util.base_dtype(x_initial.dtype) def _body(found, seed, left, right, x_next): """Iterates until every chain has found a suitable next state.""" proportions_seed, next_seed = samplers.split_seed(seed) proportions = samplers.uniform( x_initial_shape, dtype=x_initial_dtype, seed=proportions_seed) x_proposed = tf.where(~found, left + proportions * (right - left), x_next) accept_res = _test_acceptance(x_initial, target_log_prob=target_log_prob, decided=found, log_slice_heights=log_slice_heights, x_proposed=x_proposed, step_size=step_size, lower_bounds=left, upper_bounds=right) boundary_test = log_slice_heights < target_log_prob(x_proposed) can_accept = boundary_test & accept_res next_found = found | can_accept # Note that it might seem that we are moving the left and right end points # even if the point has been accepted (which is contrary to the stated # algorithm in Neal). However, this does not matter because the endpoints # for points that have been already accepted are not used again so it # doesn't matter what we do with them. next_left = tf.where(x_proposed < x_initial, x_proposed, left) next_right = tf.where(x_proposed >= x_initial, x_proposed, right) return (next_found, next_seed, next_left, next_right, x_proposed) return tf.while_loop( cond=cond, body=_body, loop_vars=(found, seed, lower_bounds, upper_bounds, x_next))[-1] def slice_sampler_one_dim(target_log_prob, x_initial, step_size=0.01, max_doublings=30, seed=None, name=None): """For a given x position in each Markov chain, returns the next x. Applies the one dimensional slice sampling algorithm as defined in Neal (2003) to an input tensor x of shape (num_chains,) where num_chains is the number of simulataneous Markov chains, and returns the next tensor x of shape (num_chains,) when these chains are evolved by the slice sampling algorithm. Args: target_log_prob: Callable accepting a tensor like `x_initial` and returning a tensor containing the log density at that point of the same shape. x_initial: A tensor of any shape. The initial positions of the chains. This function assumes that all the dimensions of `x_initial` are batch dimensions (i.e. the event shape is `[]`). step_size: A tensor of shape and dtype compatible with `x_initial`. The min interval size in the doubling algorithm. max_doublings: Scalar tensor of dtype `tf.int32`. The maximum number of doublings to try to find the slice bounds. seed: (Optional) positive int, or Tensor seed pair. The random seed. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., 'find_slice_bounds'). Returns: retval: A tensor of the same shape and dtype as `x_initial`. The next state of the Markov chain. next_target_log_prob: The target log density evaluated at `retval`. bounds_satisfied: A tensor of bool dtype and shape batch dimensions. upper_bounds: Tensor of the same shape and dtype as `x_initial`. The upper bounds for the slice found. lower_bounds: Tensor of the same shape and dtype as `x_initial`. The lower bounds for the slice found. """ gamma_seed, bounds_seed, sample_seed = samplers.split_seed( seed, n=3, salt='ssu.slice_sampler_one_dim') with tf.name_scope(name or 'slice_sampler_one_dim'): dtype = dtype_util.common_dtype([x_initial, step_size], dtype_hint=tf.float32) x_initial = tf.convert_to_tensor(x_initial, dtype=dtype) step_size = tf.convert_to_tensor(step_size, dtype=dtype) # Obtain the input dtype of the array. # Select the height of the slice. Tensor of shape x_initial.shape. log_slice_heights = target_log_prob(x_initial) - gamma_lib.random_gamma( ps.shape(x_initial), concentration=tf.ones([], dtype=dtype), seed=gamma_seed) # Given the above x and slice heights, compute the bounds of the slice for # each chain. upper_bounds, lower_bounds, bounds_satisfied = slice_bounds_by_doubling( x_initial, target_log_prob, log_slice_heights, max_doublings, step_size, seed=bounds_seed) retval = _sample_with_shrinkage(x_initial, target_log_prob=target_log_prob, log_slice_heights=log_slice_heights, step_size=step_size, lower_bounds=lower_bounds, upper_bounds=upper_bounds, seed=sample_seed) return (retval, target_log_prob(retval), bounds_satisfied, upper_bounds, lower_bounds)
# Copyright 2014 Dirk Pranke. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from glop.fakes.host_fake import FakeHost from glop.host import Host from glop.tool import main, VERSION SIMPLE_GRAMMAR = "grammar = anything*:as end -> ''.join(as) ," class CheckMixin(object): def _write_files(self, host, files): for path, contents in list(files.items()): host.write_text_file(path, contents) def _read_files(self, host, tmpdir): out_files = {} for f in host.files_under(tmpdir): out_files[f] = host.read_text_file(host.join(tmpdir, f)) return out_files def assert_files(self, expected_files, actual_files): for k, v in actual_files.items(): self.assertEqual(expected_files[k], v) self.assertEqual(set(actual_files.keys()), set(expected_files.keys())) def check_match(self, grammar, input_txt, returncode=0, out=None, err=None): host = self._host() try: tmpdir = host.mkdtemp() input_path = host.join(tmpdir, 'input.txt') grammar_path = host.join(tmpdir, 'grammar.g') host.write_text_file(input_path, input_txt) host.write_text_file(grammar_path, grammar) args = ['-i', input_path, grammar_path] self._call(host, args, None, returncode, out, err) finally: host.rmtree(tmpdir) def check_cmd(self, args, stdin=None, files=None, returncode=None, out=None, err=None, output_files=None): host = self._host() orig_wd, tmpdir = None, None try: orig_wd = host.getcwd() tmpdir = host.mkdtemp() host.chdir(tmpdir) if files: self._write_files(host, files) rv = self._call(host, args, stdin, returncode, out, err) actual_ret, actual_out, actual_err = rv actual_output_files = self._read_files(host, host.getcwd()) finally: if tmpdir: host.rmtree(tmpdir) if orig_wd: host.chdir(orig_wd) if output_files: self.assert_files(output_files, actual_output_files) return actual_ret, actual_out, actual_err class UnitTestMixin(object): def _host(self): return FakeHost() def _call(self, host, args, stdin=None, returncode=None, out=None, err=None): if stdin is not None: host.stdin.write(unicode(stdin)) host.stdin.seek(0) actual_ret = main(host, args) actual_out = host.stdout.getvalue() actual_err = host.stderr.getvalue() if returncode is not None: self.assertEqual(returncode, actual_ret) if out is not None: self.assertEqual(out, actual_out) if err is not None: self.assertEqual(err, actual_err) return actual_ret, actual_out, actual_err class TestGrammarPrettyPrinter(UnitTestMixin, CheckMixin, unittest.TestCase): maxDiff = None def test_glop(self): h = Host() glop_contents = h.read_text_file( h.join(h.dirname(h.path_to_host_module()), '..', 'grammars', 'glop.g')) files = {'glop.g': glop_contents} host = self._host() orig_wd, tmpdir = None, None try: orig_wd = host.getcwd() tmpdir = host.mkdtemp() host.chdir(tmpdir) if files: self._write_files(host, files) ret, _, _ = self._call(host, ['--pretty-print', 'glop.g', '-o', 'glop2.g']) self.assertEqual(0, ret) ret, _, _ = self._call(host, ['--pretty-print', 'glop2.g', '-o', 'glop3.g']) self.assertEqual(0, ret) actual_output_files = self._read_files(host, host.getcwd()) self.assertMultiLineEqual(actual_output_files['glop2.g'], actual_output_files['glop3.g']) finally: if tmpdir: host.rmtree(tmpdir) if orig_wd: host.chdir(orig_wd) class UnitTestMain(UnitTestMixin, CheckMixin, unittest.TestCase): def test_ctrl_c(self): host = FakeHost() def raise_ctrl_c(*_comps): raise KeyboardInterrupt host.read_text_file = raise_ctrl_c host.write_text_file('simple.g', SIMPLE_GRAMMAR) self._call(host, ['simple.g'], returncode=130, out='', err='Interrupted, exiting ...\n') class TestMain(UnitTestMixin, CheckMixin, unittest.TestCase): def test_files(self): files = { 'simple.g': SIMPLE_GRAMMAR, 'input.txt': 'hello, world\n', } out_files = files.copy() out_files['output.txt'] = 'hello, world\n' self.check_cmd(['-i', 'input.txt', '-o', 'output.txt', 'simple.g'], files=files, returncode=0, out='', err='', output_files=out_files) def test_no_grammar(self): self.check_cmd([], returncode=2) def test_grammar_file_not_found(self): self.check_cmd(['missing.g'], returncode=1, err='Error: no such file: "missing.g"\n') def test_input_on_stdin(self): files = { 'simple.g': SIMPLE_GRAMMAR, } self.check_cmd(['simple.g'], stdin="hello, world\n", files=files, returncode=0, out="hello, world\n", err='') def test_pretty_print(self): files = { 'simple.g': SIMPLE_GRAMMAR, } out_files = files.copy() self.check_cmd(['-p', 'simple.g'], files=files, returncode=0, out="grammar = anything*:as end -> ''.join(as)\n", output_files=out_files) def test_parse_bad_grammar(self): files = { 'bad.g': 'grammar', } self.check_cmd(['bad.g'], files=files, returncode=1, out='', err=None) def test_version(self): self.check_cmd(['-V'], returncode=0, out=(VERSION + '\n'), err=None) self.check_cmd(['--version'], returncode=0, out=(VERSION + '\n'), err=None) class TestInterpreter(UnitTestMixin, CheckMixin, unittest.TestCase): def test_basic(self): self.check_match(SIMPLE_GRAMMAR, 'hello, world', returncode=0, out='hello, world', err='') def test_no_match(self): self.check_match("grammar = 'foo' | 'bar',", 'baz', returncode=1) def test_star(self): self.check_match("grammar = 'a'* end", '') self.check_match("grammar = 'a'* end", 'a') self.check_match("grammar = 'a'* end", 'aa') def test_plus(self): self.check_match("grammar = 'a'+ end", '', returncode=1) self.check_match("grammar = 'a'+ end", 'a') self.check_match("grammar = 'a'+ end", 'aa') def test_opt(self): self.check_match("grammar = 'a'? end ,", '') self.check_match("grammar = 'a'? end ,", 'a') self.check_match("grammar = 'a'? end ,", 'aa', returncode=1) def test_choice(self): self.check_match("grammar = 'foo' | 'bar',", 'foo', 0, 'foo', '') self.check_match("grammar = 'foo' | 'bar',", 'bar', 0, 'bar', '') def test_apply(self): self.check_match(""" grammar = (foo | bar) end , foo = 'foo' , bar = 'bar' , """, 'foo') def test_not(self): g = """grammar = '"' (~'"' anything)*:as '"' end -> ''.join(as) ,""" self.check_match(g, '""') self.check_match(g, '"hello"', out='hello') def test_pred(self): self.check_match("grammar = ?( 1 ) end ,", '') self.check_match("grammar = ?( 0 ) end ,", '', returncode=1) def test_py_plus(self): self.check_match("grammar = end -> 1 + 1 ,", '', returncode=0, out='2') def test_py_getitem(self): self.check_match("grammar = end -> 'bar'[1] ,", '', returncode=0, out='a') def test_escaping(self): self.check_match("grammar = '\\'' end -> 'ok'", '\'') self.check_match("grammar = '\\n' end -> 'ok'", '\n') self.check_match("grammar = '\\\\' end -> 'ok'", '\\') def test_double_quoted_literals(self): self.check_match('grammar = "a"+ end ,', 'aa') def test_no_trailing_commas_on_rules(self): self.check_match("grammar = a b end a = 'a' b = 'b'", 'ab') if __name__ == '__main__': unittest.main()
import sys import json import time import core.theme import core.event import util.format def dump_json(obj): return obj.dict() def assign(src, dst, key, src_key=None, default=None): if not src_key: if key.startswith("_"): src_key = key else: src_key = key.replace("_", "-") # automagically replace _ with - for k in src_key if isinstance(src_key, list) else [src_key]: if k in src: dst[key] = src[k] return if default is not None: dst[key] = default class block(object): __COMMON_THEME_FIELDS = [ "separator", "separator-block-width", "default-separators", "border-top", "border-left", "border-right", "border-bottom", "fg", "bg", "padding", "prefix", "suffix", ] def __init__(self, theme, module, widget): self.__attributes = {} for key in self.__COMMON_THEME_FIELDS: tmp = theme.get(key, widget) if tmp is not None: self.__attributes[key] = tmp self.__attributes["name"] = module.id self.__attributes["instance"] = widget.id self.__attributes["prev-bg"] = theme.get("bg", "previous") def set(self, key, value): self.__attributes[key] = value def get(self, key, default=None): return self.__attributes.get(key, default) def is_pango(self, attr): if isinstance(attr, dict) and "pango" in attr: return True return False def pangoize(self, text): if not self.is_pango(text): return text self.__attributes["markup"] = "pango" attr = dict(text["pango"]) text = attr.get("full_text", "") if "full_text" in attr: del attr["full_text"] result = "<span" for key, value in attr.items(): result = '{} {}="{}"'.format(result, key, value) result = "{}>{}</span>".format(result, text) return result def dict(self): result = {} assign(self.__attributes, result, "full_text", ["full_text", "separator"]) assign(self.__attributes, result, "separator", "default-separators") if "_decorator" in self.__attributes: assign(self.__attributes, result, "color", "bg") assign(self.__attributes, result, "background", "prev-bg") result["_decorator"] = True else: assign(self.__attributes, result, "color", "fg") assign(self.__attributes, result, "background", "bg") if "full_text" in self.__attributes: prefix = self.__pad(self.pangoize(self.__attributes.get("prefix"))) suffix = self.__pad(self.pangoize(self.__attributes.get("suffix"))) self.set("_prefix", prefix) self.set("_suffix", suffix) self.set("_raw", self.get("full_text")) result["full_text"] = self.pangoize(result["full_text"]) result["full_text"] = self.__format(self.__attributes["full_text"]) if "min-width" in self.__attributes and "padding" in self.__attributes: self.set("min-width", self.__format(self.get("min-width"))) for k in [ "name", "instance", "separator_block_width", "border", "border_top", "border_bottom", "border_left", "border_right", "markup", "_raw", "_suffix", "_prefix", "min_width", "align", ]: assign(self.__attributes, result, k) return result def __pad(self, text): padding = self.__attributes.get("padding", "") if not text: return padding return "{}{}{}".format(padding, text, padding) def __format(self, text): if text is None: return None prefix = self.get("_prefix") suffix = self.get("_suffix") return "{}{}{}".format(prefix, text, suffix) class i3(object): def __init__(self, theme=core.theme.Theme(), config=core.config.Config([])): self.__modules = [] self.__content = {} self.__theme = theme self.__config = config core.event.register("update", self.update) core.event.register("start", self.draw, "start") core.event.register("draw", self.draw, "statusline") core.event.register("stop", self.draw, "stop") def content(self): return self.__content def theme(self, new_theme=None): if new_theme: self.__theme = new_theme return self.__theme def modules(self, modules=None): if not modules: return self.__modules self.__modules = modules if isinstance(modules, list) else [modules] def toggle_minimize(self, event): widget_id = event["instance"] for module in self.__modules: if module.widget(widget_id=widget_id) and util.format.asbool(module.parameter("minimize", False)) == True: # this module can customly minimize module.minimized = not module.minimized return if widget_id in self.__content: self.__content[widget_id]["minimized"] = not self.__content[widget_id]["minimized"] def draw(self, what, args=None): cb = getattr(self, what) data = cb(args) if args else cb() if "blocks" in data: sys.stdout.write(json.dumps(data["blocks"], default=dump_json)) if "suffix" in data: sys.stdout.write(data["suffix"]) sys.stdout.write("\n") sys.stdout.flush() def start(self): return { "blocks": {"version": 1, "click_events": True}, "suffix": "\n[", } def stop(self): return {"suffix": "\n]"} def separator_block(self, module, widget): if not self.__theme.get("separator"): return [] blk = block(self.__theme, module, widget) blk.set("_decorator", True) return [blk] def __content_block(self, module, widget): blk = block(self.__theme, module, widget) minwidth = widget.theme("minwidth") if minwidth is not None: try: blk.set("min-width", "-" * int(minwidth)) except: blk.set("min-width", minwidth) blk.set("align", widget.theme("align")) blk.set("full_text", "\u2026" if self.__content[widget.id]["minimized"] else self.__content[widget.id]["text"]) if widget.get("pango", False): blk.set("markup", "pango") if self.__config.debug(): state = module.state(widget) if isinstance(state, list): state = ", ".join(state) blk.set("__state", state) return blk def blocks(self, module): blocks = [] if module.minimized: blocks.extend(self.separator_block(module, module.widgets()[0])) blocks.append(self.__content_block(module, module.widgets()[0])) return blocks for widget in module.widgets(): if widget.module and self.__config.autohide(widget.module.name): if not any( state in widget.state() for state in ["warning", "critical", "no-autohide"] ): continue if module.hidden(): continue if widget.hidden: continue if "critical" in widget.state() and self.__config.errorhide(widget.module.name): continue blocks.extend(self.separator_block(module, widget)) blocks.append(self.__content_block(module, widget)) core.event.trigger("next-widget") return blocks def update(self, affected_modules=None, redraw_only=False, force=False): now = time.time() for module in self.__modules: if affected_modules and not module.id in affected_modules: continue if not affected_modules and module.next_update: if now < module.next_update and not force: continue if not redraw_only: module.update_wrapper() if module.parameter("interval", "") != "never": module.next_update = now + util.format.seconds( module.parameter("interval", self.__config.interval()) ) else: module.next_update = sys.maxsize for widget in module.widgets(): if not widget.id in self.__content: self.__content[widget.id] = { "minimized": False } self.__content[widget.id]["text"] = widget.full_text() def statusline(self): blocks = [] for module in self.__modules: blocks.extend(self.blocks(module)) return {"blocks": blocks, "suffix": ","} def wait(self, interval): time.sleep(interval) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# -*- coding: utf-8 -*- """ The database schema . There are four entities: ScraperSearch: Represents a search job. SearchEngineResultsPage: Represents a SERP result page of a search_engine Link: Represents a LINK on a SERP Proxy: Stores all proxies and their statuses. """ import datetime from urllib.parse import urlparse from sqlalchemy import Boolean, Column, DateTime, Enum, ForeignKey from sqlalchemy import Integer, String, Table from sqlalchemy import create_engine, UniqueConstraint from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship, backref from sqlalchemy.orm import scoped_session from sqlalchemy.orm import sessionmaker Base = declarative_base() scraper_searches_serps = Table( 'scraper_searches_serps', Base.metadata, Column('scraper_search_id', Integer, ForeignKey('scraper_search.id')), Column('serp_id', Integer, ForeignKey('serp.id')) ) class ScraperSearch(Base): __tablename__ = 'scraper_search' id = Column(Integer, primary_key=True) number_search_engines_used = Column(Integer) used_search_engines = Column(String) number_proxies_used = Column(Integer) number_search_queries = Column(Integer) started_searching = Column(DateTime, default=datetime.datetime.utcnow) stopped_searching = Column(DateTime) serps = relationship( 'SearchEngineResultsPage', secondary=scraper_searches_serps, backref=backref('scraper_searches', uselist=True) ) def __str__(self): return ''' Search[{id}] scraped for {number_search_queries} unique keywords. Started: {started_searching} and stopped: {stopped_searching} '''.format(**self.__dict__) def __repr__(self): return self.__str__() class SearchEngineResultsPage(Base): __tablename__ = 'serp' id = Column(Integer, primary_key=True) status = Column(String, default='successful') search_engine_name = Column(String) scrape_method = Column(String) page_number = Column(Integer) requested_at = Column(DateTime, default=datetime.datetime.utcnow) requested_by = Column(String, default='127.0.0.1') # string in SERP indicates how many results we got for the search term. num_results_for_query = Column(String, default='') # Whether we got any results at all. This is the same as len(serp.links) num_results = Column(Integer, default=-1) query = Column(String) # if the query was modified by the search engine because there weren't any # results, this variable is set to the query that was used instead. # Otherwise it remains empty. effective_query = Column(String, default='') no_results = Column(Boolean, default=False) def __str__(self): return ''' {search_engine_name} has [{num_results}] link results for query "{query}"'''.format(**self.__dict__) def __repr__(self): return self.__str__() def has_no_results_for_query(self): """ Returns True if the original query did not yield any results. Returns False if either there are no serp entries, or the search engine auto corrected the query. """ return self.num_results == 0 or self.effective_query def set_values_from_parser(self, parser): """Populate itself from a parser object.""" self.num_results_for_query = parser.num_results_for_query self.num_results = parser.num_results self.effective_query = parser.effective_query self.no_results = parser.no_results for key, value in parser.search_results.items(): if isinstance(value, list): for link in value: parsed = urlparse(link['link']) if link['snippet'] is not None: # try to remove inline css, which is in some results since 12/2017 tmp_snipped = link['snippet'].split('}') if len(tmp_snipped) > 1: link['snippet'] = tmp_snipped[len(tmp_snipped)-1] # fill with nones to prevent key errors [link.update({key: None}) for key in ( 'snippet', 'title', 'visible_link', 'rating', 'sitelinks' ) if key not in link] Link( link=link['link'], snippet=link['snippet'], title=link['title'], visible_link=link['visible_link'], domain=parsed.netloc, rank=link['rank'], serp=self, link_type=key, rating=link['rating'], sitelinks=link['sitelinks'] ) for key, value in parser.related_keywords.items(): if isinstance(value, list) and len(value) > 0: for keyword in value: [keyword.update({key: None}) for key in ( 'keyword', ) if key not in keyword] RelatedKeyword( keyword=keyword['keyword'], rank=keyword['rank'], serp=self, ) def set_values_from_scraper(self, scraper): """Populate itself from a scraper object. A scraper may be any object of type: - SelScrape Args: A scraper object. """ self.query = scraper.query self.search_engine_name = scraper.search_engine_name self.scrape_method = scraper.scrape_method self.page_number = scraper.page_number self.requested_at = scraper.requested_at self.requested_by = scraper.requested_by self.status = scraper.status def was_correctly_requested(self): return self.status == 'successful' # Alias as a shorthand for working in the shell SERP = SearchEngineResultsPage class Link(Base): __tablename__ = 'link' id = Column(Integer, primary_key=True) title = Column(String) snippet = Column(String) link = Column(String) domain = Column(String) visible_link = Column(String) rank = Column(Integer) link_type = Column(String) rating = Column(String) sitelinks = Column(String) serp_id = Column(Integer, ForeignKey('serp.id')) serp = relationship( SearchEngineResultsPage, backref=backref('links', uselist=True) ) def __str__(self): return '<Link at rank {rank} has url: {link}>'.format(**self.__dict__) def __repr__(self): return self.__str__() class RelatedKeyword(Base): __tablename__ = 'related_keywords' id = Column(Integer, primary_key=True) keyword = Column(String) rank = Column(Integer) serp_id = Column(Integer, ForeignKey('serp.id')) serp = relationship( SearchEngineResultsPage, backref=backref('related_keywords', uselist=True) ) def __str__(self): return ''' <related keyword at rank {rank} : {keyword}> '''.format(**self.__dict__) def __repr__(self): return self.__str__() class Proxy(Base): __tablename__ = 'proxy' id = Column(Integer, primary_key=True) ip = Column(String) hostname = Column(String) port = Column(Integer) proto = Column(Enum('socks5', 'socks4', 'http')) username = Column(String) password = Column(String) online = Column(Boolean) status = Column(String) checked_at = Column(DateTime) created_at = Column(DateTime, default=datetime.datetime.utcnow) city = Column(String) region = Column(String) country = Column(String) loc = Column(String) org = Column(String) postal = Column(String) UniqueConstraint(ip, port, name='unique_proxy') def __str__(self): return '<Proxy {ip}>'.format(**self.__dict__) def __repr__(self): return self.__str__() db_Proxy = Proxy class SearchEngine(Base): __tablename__ = 'search_engine' id = Column(Integer, primary_key=True) name = Column(String, unique=True) http_url = Column(String) selenium_url = Column(String) image_url = Column(String) class SearchEngineProxyStatus(Base): """Stores last proxy status for the given search engine. A proxy can either work on a search engine or not. """ __tablename__ = 'search_engine_proxy_status' id = Column(Integer, primary_key=True) proxy_id = Column(Integer, ForeignKey('proxy.id')) search_engine_id = Column(Integer, ForeignKey('search_engine.id')) available = Column(Boolean) last_check = Column(DateTime) def get_engine(config, path=None): """Return the sqlalchemy engine.""" db_name = config.get('database_name', '/tmp/serpscrap') + '.db' db_path = path if path else db_name echo = config.get('log_sqlalchemy', False) engine = create_engine( 'sqlite:///' + db_path, echo=echo, connect_args={'check_same_thread': False} ) Base.metadata.create_all(engine) return engine def get_session(config, scoped=False, engine=None, path=None): if not engine: engine = get_engine(config, path=path) session_factory = sessionmaker( bind=engine, autoflush=True, autocommit=False, ) if scoped: ScopedSession = scoped_session(session_factory) return ScopedSession else: return session_factory def fixtures(config, session): """Add some base data.""" for se in config.get('supported_search_engines', []): if se: search_engine = session.query(SearchEngine).filter( SearchEngine.name == se ).first() if not search_engine: session.add(SearchEngine(name=se)) session.commit()
# Copyright 2009-2013, Peter A. Bigot # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain a # copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function from pyxb.bundles.wssplat.raw.wsdl11 import * import pyxb.bundles.wssplat.raw.wsdl11 as raw_wsdl11 import pyxb.namespace from pyxb.utils import domutils, six import xml.dom def ImportRelatedNamespaces (): """Import modules for related namespaces so they are available to create binding instances from the WSDL sources.""" try: import pyxb.bundles.wssplat.soapbind11 except ImportError: pass try: import pyxb.bundles.wssplat.soapbind12 except ImportError: pass try: import pyxb.bundles.wssplat.soap11 except ImportError: pass try: import pyxb.bundles.wssplat.soap12 except ImportError: pass try: import pyxb.bundles.wssplat.soapenv except ImportError: pass try: import pyxb.bundles.wssplat.httpbind except ImportError: pass try: import pyxb.bundles.wssplat.mimebind except ImportError: pass class _WSDL_binding_mixin (object): """Mix-in class to mark element Python bindings that are expected to be wildcard matches in WSDL binding elements.""" pass class _WSDL_port_mixin (object): """Mix-in class to mark element Python bindings that are expected to be wildcard matches in WSDL port elements.""" pass class _WSDL_operation_mixin (object): """Mix-in class to mark element Python bindings that are expected to be wildcard matches in WSDL (binding) operation elements.""" pass class tPort (raw_wsdl11.tPort): def __getBindingReference (self): return self.__bindingReference def _setBindingReference (self, binding_reference): self.__bindingReference = binding_reference __bindingReference = None bindingReference = property(__getBindingReference) def __getAddressReference (self): return self.__addressReference def _setAddressReference (self, address_reference): self.__addressReference = address_reference __addressReference = None addressReference = property(__getAddressReference) raw_wsdl11.tPort._SetSupersedingClass(tPort) class tBinding (raw_wsdl11.tBinding): def __getPortTypeReference (self): return self.__portTypeReference def setPortTypeReference (self, port_type_reference): self.__portTypeReference = port_type_reference __portTypeReference = None portTypeReference = property(__getPortTypeReference) def __getProtocolBinding (self): """Return the protocol-specific binding information.""" return self.__protocolBinding def _setProtocolBinding (self, protocol_binding): self.__protocolBinding = protocol_binding __protocolBinding = None protocolBinding = property(__getProtocolBinding) def operationMap (self): return self.__operationMap __operationMap = None def __init__ (self, *args, **kw): super(tBinding, self).__init__(*args, **kw) self.__operationMap = { } raw_wsdl11.tBinding._SetSupersedingClass(tBinding) class tPortType (raw_wsdl11.tPortType): def operationMap (self): return self.__operationMap __operationMap = None def __init__ (self, *args, **kw): super(tPortType, self).__init__(*args, **kw) self.__operationMap = { } raw_wsdl11.tPortType._SetSupersedingClass(tPortType) class tParam (raw_wsdl11.tParam): def __getMessageReference (self): return self.__messageReference def _setMessageReference (self, message_reference): self.__messageReference = message_reference __messageReference = None messageReference = property(__getMessageReference) raw_wsdl11.tParam._SetSupersedingClass(tParam) class tFault (raw_wsdl11.tFault): def __getMessageReference (self): return self.__messageReference def _setMessageReference (self, message_reference): self.__messageReference = message_reference __messageReference = None messageReference = property(__getMessageReference) raw_wsdl11.tFault._SetSupersedingClass(tFault) class tPart (raw_wsdl11.tPart): def __getElementReference (self): return self.__elementReference def _setElementReference (self, element_reference): self.__elementReference = element_reference __elementReference = None elementReference = property(__getElementReference) def __getTypeReference (self): return self.__typeReference def _setTypeReference (self, type_reference): self.__typeReference = type_reference __typeReference = None typeReference = property(__getTypeReference) raw_wsdl11.tPart._SetSupersedingClass(tPart) class tBindingOperation (raw_wsdl11.tBindingOperation): def __getOperationReference (self): return self.__operationReference def _setOperationReference (self, operation_reference): self.__operationReference = operation_reference __operationReference = None operationReference = property(__getOperationReference) raw_wsdl11.tBindingOperation._SetSupersedingClass(tBindingOperation) class tDefinitions (raw_wsdl11.tDefinitions): def messageMap (self): return self.targetNamespace().messages() def namespaceContext (self): return self.__namespaceContext __namespaceContext = None def bindingMap (self): return self.__bindingMap __bindingMap = None def targetNamespace (self): return self.namespaceContext().targetNamespace() def namespace (self): return self.__namespace __namespace = None def _addToMap (self, map, qname, value): map[qname] = value (ns, ln) = qname if (ns == self.targetNamespace()): map[(None, ln)] = value elif (ns is None): map[(self.targetNamespace(), ln)] = value return map def schema (self): return self.__schema __schema = None @classmethod def _PreFactory_vx (self, args, kw): # Import standard bindings. If we do this, then wildcard # binding, port, and operation elements will be recognized and # converted into bindings. import pyxb.bundles.wssplat.soapbind11 import pyxb.bundles.wssplat.soapbind12 import pyxb.bundles.wssplat.httpbind # Ensure we have definitions for any externally-referenced # things we might need. @todo: This might have to # chronologically precede the import above. pyxb.namespace.archive.NamespaceArchive.PreLoadArchives() raw_wsdl11.Namespace.validateComponentModel() state = ( kw.pop('process_schema', False), kw.pop('generation_uid', None), kw.get('_dom_node') ) return state def _postFactory_vx (self, state): (process_schema, generation_uid, dom_node) = state assert isinstance(dom_node, xml.dom.Node) node_en = pyxb.namespace.ExpandedName(dom_node) self.__namespaceContext = pyxb.namespace.NamespaceContext.GetNodeContext(dom_node) self.__buildMaps() if process_schema: self.__processSchema(generation_uid) self.__finalizeReferences() return self __WSDLCategories = ( 'service', 'port', 'message', 'binding', 'portType' ) def __buildMaps (self): tns = self.namespaceContext().targetNamespace() tns.configureCategories(self.__WSDLCategories) for m in self.message: tns.messages()[m.name] = m for pt in self.portType: tns.portTypes()[pt.name] = pt for op in pt.operation: pt.operationMap()[op.name] = op params = op.fault[:] if op.input is not None: params.append(op.input) if op.output is not None: params.append(op.output) for p in params: msg_en = p.message p._setMessageReference(p.message.message()) for b in self.binding: tns.bindings()[b.name] = b port_type_en = b.type b.setPortTypeReference(port_type_en.portType()) for wc in b.wildcardElements(): if isinstance(wc, _WSDL_binding_mixin): b._setProtocolBinding(wc) break for op in b.operation: b.operationMap()[op.name] = op for wc in op.wildcardElements(): if isinstance(wc, _WSDL_operation_mixin): op._setOperationReference(wc) break for s in self.service: tns.services()[s.name] = s for p in s.port: binding_en = p.binding p._setBindingReference(binding_en.binding()) for wc in p.wildcardElements(): if isinstance(wc, _WSDL_port_mixin): p._setAddressReference(wc) break def __processSchema (self, generation_uid): global pyxb import pyxb.xmlschema print('PS %s' % (generation_uid,)) if self.__schema is not None: print('Already have schema') return self.__schema for t in self.types: for wc in t.wildcardElements(): if isinstance(wc, xml.dom.Node) and pyxb.namespace.XMLSchema.nodeIsNamed(wc, 'schema'): # Try to load component models for any namespace referenced by this. # Probably shouldn't need to do this except for imported ones. for ns in six.itervalues(self.namespaceContext().inScopeNamespaces()): try: ns.validateComponentModel() except Exception as e: print('Error validating component model for %s: %s' % (ns.uri(), e)) self.__schema = pyxb.xmlschema.schema.CreateFromDOM(wc, namespace_context=self.namespaceContext(), generation_uid=generation_uid) elif isinstance(wc, pyxb.xmlschema.schema): self.__schema = wc else: print('No match: %s %s' % (wc.namespaceURI, namespace.localName)) if self.__schema is not None: return self.__schema return None def __finalizeReferences (self): tns = self.namespaceContext().targetNamespace() for m in six.itervalues(tns.messages()): for p in m.part: if (p.element is not None) and (p.elementReference is None): elt_en = p.element p._setElementReference(elt_en.elementDeclaration()) if (p.type is not None) and (p.typeReference is None): type_en = p.type p._setTypeReference(type_en.typeDefinition()) raw_wsdl11.tDefinitions._SetSupersedingClass(tDefinitions) pyxb.namespace.NamespaceContext._AddTargetNamespaceAttribute(raw_wsdl11.Namespace.createExpandedName('definitions'), pyxb.namespace.ExpandedName('targetNamespace'))
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os from datetime import timedelta from django.contrib.auth.models import User, Permission from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.core.management import call_command from django.core import management from django.test import override_settings from selenium.common.exceptions import TimeoutException, WebDriverException from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as ec from selenium import webdriver from selenium.webdriver.chrome.options import Options from faker import Factory from conf.models import Site, SitePermission from ..models import Client, Project, Entry, Task fake = Factory.create() @override_settings( STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage' ) class SeleniumTestCase(StaticLiveServerTestCase): @classmethod def setUpClass(cls): call_command('createsite', verbosity=0) cls.profile = fake.simple_profile() cls.profile['password'] = fake.password() super(SeleniumTestCase, cls).setUpClass() # Using saucelabs for CI testing since travis CI is inconsistent while # using selenium. if os.environ.get('SAUCE_USERNAME', None): sauce_username = os.environ['SAUCE_USERNAME'] sauce_access_key = os.environ['SAUCE_ACCESS_KEY'] sauce_url = 'http://' + sauce_username + ':' + sauce_access_key + \ '@ondemand.saucelabs.com/wd/hub' desired_capabilities = { 'browserName': 'chrome', 'version': '58', 'platform': 'ANY', 'chromeOptions': { 'prefs': { 'credentials_enable_service': False, 'profile': { 'password_manager_enabled': False } } } } if os.environ.get('TRAVIS_JOB_NUMBER', None): desired_capabilities.update({ 'tunnel-identifier': os.environ['TRAVIS_JOB_NUMBER'], 'build': os.environ['TRAVIS_BUILD_NUMBER'], 'tags': [os.environ['TRAVIS_PYTHON_VERSION'], 'CI'] }) cls.driver = webdriver.Remote( command_executor=sauce_url, desired_capabilities=desired_capabilities ) else: try: options = Options() if os.environ.get('GOOGLE_CHROME_BINARY', None): options.binary_location = \ os.environ['GOOGLE_CHROME_BINARY'] options.add_argument('--headless') options.add_argument('--disable-gpu') options.add_argument('--no-sandbox') options.add_argument('--log-level=3') options.add_argument('--window-size=1280,720') cls.driver = webdriver.Chrome(chrome_options=options) except WebDriverException: cls.driver = webdriver.Firefox() cls.driver.implicitly_wait(10) cls.wait_time = 5 @classmethod def tearDownClass(cls): cls.driver.quit() super(SeleniumTestCase, cls).tearDownClass() def setUp(self): if type(self.driver) is webdriver.Remote: # Provide context to appear in the saucelabs log. self.driver.execute_script( 'sauce:context={0}'.format(self._testMethodName)) def find(self, by, value): elements = self.driver.find_elements(by, value) if len(elements) is 1: return elements[0] else: return elements def clear(self, element): """Send back space commands to clear a form field element. This is used because Selenium's built-in clear() method is inconsistent.""" value = element.get_attribute('value') if len(value) > 0: for char in value: element.send_keys(Keys.BACK_SPACE) def wait(self, seconds): """Use a lambda that always returns False to wait for a number of seconds without any expected conditions.""" try: WebDriverWait(self.driver, seconds).until(lambda driver: 1 == 0) except TimeoutException: pass def waitForPresence(self, element): return WebDriverWait(self.driver, self.wait_time).until( ec.presence_of_element_located(element)) def waitForText(self, element, text): try: WebDriverWait(self.driver, self.wait_time).until( ec.text_to_be_present_in_element(element, text)) except TimeoutException: # assertIn produces more verbose failure text. self.assertIn(text, self.find(*element).text) def waitForClickable(self, element): """An element becomes "clickable" when it is not disabled. This method is useful for waiting on form buttons that have been disabled while javascript is doing work.""" return WebDriverWait(self.driver, self.wait_time).until( ec.element_to_be_clickable(element)) def select2Select(self, id, value): """Select a value in a select2 menu. The select element *must* have an id attribute in order for this work. Select2 uses the select element id to create its container for selections. """ selector = '#select2-' + id + '-container + .select2-selection__arrow' self.waitForClickable((By.CSS_SELECTOR, selector)) self.find(By.CSS_SELECTOR, selector).click() field = self.waitForPresence((By.CLASS_NAME, 'select2-search__field')) field.send_keys(value) field.send_keys(Keys.RETURN) def addPerms(self, perms): for codename in perms: self.user.user_permissions.add( Permission.objects.get(codename=codename)) self.wait(0.25) def logIn(self): self.user = User.objects.create_user(self.profile['username'], self.profile['mail'], self.profile['password']) site_permission = SitePermission.objects.create(user=self.user) site_permission.sites = Site.objects.filter(id=1) site_permission.save() self.driver.get('%s%s' % (self.live_server_url, '/login/')) username_input = self.find(By.NAME, 'username') self.clear(username_input) username_input.send_keys(self.profile['username']) password_input = self.find(By.NAME, 'password') self.clear(password_input) password_input.send_keys(self.profile['password']) self.find(By.NAME, 'login').click() self.waitForPresence((By.ID, 'nav-app')) def test_login_failure(self): self.driver.get('%s%s' % (self.live_server_url, '/login/')) username_input = self.find(By.NAME, 'username') username_input.send_keys(self.profile['username']) password_input = self.find(By.NAME, 'password') password_input.send_keys('incorrect password') self.find(By.NAME, 'login').click() # Log in failure creates an alert notice. self.waitForPresence((By.CSS_SELECTOR, '.alert.alert-danger')) def test_timer_start(self): self.logIn() self.waitForPresence((By.ID, 'timer-start')) self.find(By.ID, 'timer-start').click() self.wait(2) self.assertNotIn('0h 0m 0s', self.find(By.ID, 'timer-value').text) def test_timer_stop(self): self.logIn() self.waitForPresence((By.ID, 'timer-start')) self.find(By.ID, 'timer-start').click() self.wait(2) self.find(By.ID, 'timer-stop').click() self.assertNotIn('0h 0m 0s', self.find(By.ID, 'timer-value').text) def test_timer_reset(self): self.logIn() self.waitForPresence((By.ID, 'timer-start')) self.find(By.ID, 'timer-start').click() self.wait(2) self.find(By.ID, 'timer-stop').click() self.assertNotIn('0h 0m 0s', self.find(By.ID, 'timer-value').text) self.find(By.ID, 'timer-reset').click() self.waitForText((By.ID, 'timer-value'), '0h 0m 0s') def test_clients_access(self): self.logIn() # self.assertNotIn('nav-app-clients', self.find(By.ID, 'nav-app').text) self.addPerms(['view_client']) self.driver.get(self.live_server_url) self.find(By.ID, 'nav-app-clients').click() self.waitForPresence((By.ID, 'client-rows')) def test_clients_add(self): self.logIn() self.addPerms(['view_client']) self.driver.get('%s%s' % (self.live_server_url, '/clients/')) self.assertNotIn('client-add', self.driver.page_source) self.addPerms(['add_client']) self.driver.refresh() self.find(By.NAME, 'client-add').click() self.waitForPresence((By.ID, 'client-modal')) self.find(By.NAME, 'client-name').send_keys('Client') self.find(By.NAME, 'client-email').send_keys('client@company.com') self.find(By.NAME, 'client-modal-submit').click() self.waitForPresence((By.CLASS_NAME, 'client')) def test_clients_change(self): Client(name='Client', invoice_email='client@company.com', archive=False).save() self.logIn() self.addPerms(['view_client']) self.driver.get('%s%s' % (self.live_server_url, '/clients/')) self.assertNotIn('client-menu-change', self.driver.page_source) self.addPerms(['change_client']) self.driver.refresh() self.find(By.NAME, 'client-menu').click() self.find(By.ID, 'client-menu-change').click() self.waitForPresence((By.NAME, 'client-name')) self.find(By.NAME, 'client-name').send_keys(' Changed') self.find(By.NAME, 'client-modal-submit').click() # There is no case insensitive option for this test at present and # the driver returns as uppercase because the element also has class # text-uppercase. self.waitForText((By.CLASS_NAME, 'client-name'), 'CLIENT CHANGED') def test_projects_access(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() Project(name='Project 1', client=client, estimate=100.00, archive=False).save() self.logIn() self.addPerms(['view_client']) self.driver.get('%s%s' % (self.live_server_url, '/clients/')) self.assertNotIn('Project 1', self.driver.page_source) self.addPerms(['view_project']) self.driver.refresh() self.waitForText((By.CLASS_NAME, 'project'), 'Project 1') def test_projects_add(self): Client(name='Client', invoice_email='client@company.com', archive=False).save() self.logIn() self.addPerms(['view_client', 'view_project']) self.driver.get('%s%s' % (self.live_server_url, '/clients/')) self.assertNotIn('project-add', self.driver.page_source) self.addPerms(['add_project']) self.driver.refresh() self.find(By.NAME, 'project-add').click() self.waitForPresence((By.ID, 'project-modal')) self.select2Select('project-client', 'Client') self.find(By.NAME, 'project-name').send_keys('Project') self.find(By.NAME, 'project-estimate').send_keys('100.00') self.find(By.NAME, 'project-modal-submit').click() self.waitForPresence((By.CLASS_NAME, 'project')) def test_projects_change(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() Project(name='Project', client=client, estimate=100.00, archive=False).save() self.logIn() self.addPerms(['view_client', 'view_project']) self.driver.get('%s%s' % (self.live_server_url, '/clients/')) self.assertNotIn('project-menu', self.driver.page_source) self.addPerms(['change_project']) self.driver.refresh() self.waitForClickable((By.NAME, 'project-menu')) self.find(By.NAME, 'project-menu').click() self.find(By.ID, 'project-menu-change').click() self.waitForPresence((By.NAME, 'project-name')) self.find(By.NAME, 'project-name').send_keys(' Changed') self.clear(self.find(By.NAME, 'project-estimate')) self.find(By.NAME, 'project-estimate').send_keys('50.00') self.find(By.NAME, 'project-modal-submit').click() self.waitForText((By.CLASS_NAME, 'project-name'), 'Project Changed') def test_tasks_access(self): self.logIn() # self.assertNotIn('nav-app-tasks', self.driver.page_source) self.addPerms(['view_task']) self.driver.get(self.live_server_url) self.find(By.ID, 'nav-app-tasks').click() self.waitForPresence((By.ID, 'task-rows')) def test_tasks_add(self): self.logIn() self.addPerms(['view_task']) self.driver.get('%s%s' % (self.live_server_url, '/tasks/')) self.assertNotIn('task-add', self.driver.page_source) self.addPerms(['add_task']) self.driver.refresh() self.find(By.NAME, 'task-add').click() self.waitForPresence((By.ID, 'task-modal')) self.find(By.NAME, 'task-name').send_keys('Task') self.find(By.NAME, 'task-hourly-rate').send_keys('25') self.find(By.NAME, 'task-modal-submit').click() self.waitForPresence((By.CLASS_NAME, 'task')) def test_tasks_change(self): Task(name='Task', hourly_rate=25).save() self.logIn() self.addPerms(['view_task']) self.driver.get('%s%s' % (self.live_server_url, '/tasks/')) self.assertNotIn('task-menu-change', self.driver.page_source) self.addPerms(['change_task']) self.driver.refresh() self.find(By.NAME, 'task-menu').click() self.find(By.ID, 'task-menu-change').click() self.waitForPresence((By.NAME, 'task-name')) self.find(By.NAME, 'task-name').send_keys(' Changed') hourly_rate_field = self.find(By.NAME, 'task-hourly-rate') hourly_rate_field.click() self.clear(hourly_rate_field) hourly_rate_field.send_keys('125') self.find(By.NAME, 'task-modal-submit').click() self.waitForText((By.CLASS_NAME, 'task'), 'Task Changed\n$125') def test_timesheet_access(self): self.logIn() # self.assertNotIn('nav-app-timesheet', self.driver.page_source) self.addPerms(['view_entry']) self.driver.get(self.live_server_url) self.find(By.ID, 'nav-app-timesheet').click() self.waitForPresence((By.ID, 'entry-rows')) def test_timesheet_entry_add(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() Project(name='Project 1', estimate=100.00, client=client, archive=False).save() Project(name='Project 2', estimate=100.00, client=client, archive=False).save() self.logIn() self.addPerms(['view_client', 'view_entry', 'view_project']) self.driver.get('%s%s' % (self.live_server_url, '/timesheet/')) self.assertNotIn('entry-add', self.driver.page_source) self.addPerms(['add_entry']) self.driver.refresh() self.select2Select('entry-project', 'Project 1') self.find(By.NAME, 'entry-note').send_keys('Note') self.find(By.NAME, 'entry-duration').send_keys('0:35') self.find(By.NAME, 'entry-add-submit').click() self.waitForPresence((By.CLASS_NAME, 'entry')) self.waitForText((By.CLASS_NAME, 'entry'), 'Client\nProject 1\nNote\n0:35') def test_timesheet_entry_add_advanced(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() Project(name='Project 1', estimate=100.00, client=client, archive=False).save() Project(name='Project 2', estimate=100.00, client=client, archive=False).save() Task(name='Task 1', hourly_rate=130).save() Task(name='Task 2', hourly_rate=80).save() self.logIn() self.addPerms(['view_client', 'view_entry', 'view_project', 'view_task']) self.driver.get('%s%s' % (self.live_server_url, '/timesheet/')) self.assertNotIn('entry-add', self.driver.page_source) self.addPerms(['add_entry']) self.driver.refresh() self.find(By.ID, 'entry-advanced-fields').click() self.select2Select('entry-task', 'Task 2') self.select2Select('entry-project', 'Project 1') self.find(By.NAME, 'entry-note').send_keys('Note') self.find(By.NAME, 'entry-duration').send_keys('0:35') self.find(By.NAME, 'entry-add-submit').click() self.waitForPresence((By.CLASS_NAME, 'entry')) self.waitForText((By.CLASS_NAME, 'entry'), 'Client\nProject 1\nTask 2\nNote\n0:35') def test_timesheet_entry_change(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() project = Project(name='Project 1', estimate=100.00, client=client, archive=False) project.save() Project(name='Project 2', estimate=100.00, client=client, archive=False).save() task = Task(name='Task 1', hourly_rate=130) task.save() Task(name='Task 2', hourly_rate=80).save() # Log in to establish self.user. self.logIn() Entry(project=project, task=task, user=self.user, note='Note', duration=timedelta(minutes=35)).save() self.addPerms(['view_client', 'view_entry', 'view_project', 'view_task']) self.driver.get('%s%s' % (self.live_server_url, '/timesheet/')) self.assertNotIn('entry-menu', self.driver.page_source) self.addPerms(['change_entry']) self.driver.refresh() self.waitForPresence((By.NAME, 'entry-menu')) self.find(By.NAME, 'entry-menu').click() self.waitForPresence((By.CLASS_NAME, 'entry-menu-change')) self.find(By.CLASS_NAME, 'entry-menu-change').click() self.waitForPresence((By.NAME, 'entry-save')) self.select2Select('entry-project', 'Project 2') self.clear(self.find(By.NAME, 'entry-note')) self.find(By.NAME, 'entry-note').send_keys('Changed note') self.clear(self.find(By.NAME, 'entry-duration')) self.find(By.NAME, 'entry-duration').send_keys('1.5') self.find(By.NAME, 'entry-save').click() self.waitForPresence((By.CLASS_NAME, 'entry')) self.waitForText((By.CLASS_NAME, 'entry'), 'Client\nProject 2\nTask 1\nChanged note\n1:30') def test_timesheet_entry_restart(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() project = Project(name='Project 1', estimate=100.00, client=client, archive=False) project.save() task = Task(name='Task 1', hourly_rate=130) task.save() # Log in to establish self.user. self.logIn() Entry(project=project, task=task, user=self.user, note='Note', duration=timedelta(minutes=35)).save() self.addPerms(['view_entry']) self.driver.get('%s%s' % (self.live_server_url, '/timesheet/')) self.assertNotIn('entry-menu', self.driver.page_source) self.addPerms(['change_entry']) self.driver.refresh() self.waitForPresence((By.NAME, 'entry-menu')) self.find(By.NAME, 'entry-menu').click() self.waitForPresence((By.CLASS_NAME, 'entry-menu-restart')) self.find(By.CLASS_NAME, 'entry-menu-restart').click() self.waitForPresence((By.ID, 'timer-stop')) # Click Timer's "Stop" button and wait for the save button to appear. self.find(By.ID, 'timer-stop').click() self.waitForPresence((By.ID, 'timer-entry-save')) self.find(By.ID, 'timer-entry-save').click() # The actual time should not change because the timer does not run for # more than 60 seconds. self.waitForText((By.CLASS_NAME, 'entry'), 'Client\nProject 1\nTask 1\nNote\n0:35') def test_timesheet_entry_delete(self): client = Client(name='Client', invoice_email='client@company.com', archive=False) client.save() project = Project(name='Project 1', estimate=100.00, client=client, archive=False) project.save() task = Task(name='Task 1', hourly_rate=130) task.save() # Log in to establish self.user. self.logIn() Entry(project=project, task=task, user=self.user, note='Note', duration=timedelta(minutes=35)).save() self.addPerms(['view_entry', 'delete_entry']) self.driver.get('%s%s' % (self.live_server_url, '/timesheet/')) self.waitForPresence((By.NAME, 'entry-menu')) self.find(By.NAME, 'entry-menu').click() self.waitForPresence((By.CLASS_NAME, 'entry-menu-delete')) self.find(By.CLASS_NAME, 'entry-menu-delete').click() self.assertNotIn('entry', self.find(By.ID, 'entry-rows').text) def test_reports_access(self): self.logIn() # self.assertNotIn('nav-app-reports', self.driver.page_source) self.addPerms(['view_entry']) self.driver.get(self.live_server_url) self.find(By.ID, 'nav-app-reports').click() self.waitForPresence((By.ID, 'entry-rows')) def test_reports_filter(self): # Load a new user "tester" and objects associated with that user. management.call_command('loaddata', 'test_reports_filter', verbosity=0) self.user = User.objects.get(username='tester') self.addPerms(['view_client', 'view_entry', 'view_invoice', 'view_project', 'view_task']) # Log in with the "tester" account, part of the tests data fixture. self.driver.get('%s%s' % (self.live_server_url, '/login/')) username_input = self.find(By.NAME, 'username') self.clear(username_input) username_input.send_keys('tester') password_input = self.find(By.NAME, 'password') self.clear(password_input) password_input.send_keys('tester') self.find(By.NAME, 'login').click() self.waitForPresence((By.ID, 'nav-app')) self.driver.get('%s%s' % (self.live_server_url, '/reports/')) self.waitForPresence((By.ID, 'component-reports')) # The test data contains 12 fake entries. self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 12) # The "tester" user entered eight of the entries. self.select2Select('report-filter-user', 'tester') self.find(By.ID, 'generate-report').submit() # The "Generate Report" button is disabled while the report is loading. self.waitForClickable((By.ID, 'generate-report')) self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 8) # Four entries from Tester for "Client 1". self.select2Select('report-filter-client', 'Client 1') self.find(By.ID, 'generate-report').click() self.waitForClickable((By.ID, 'generate-report')) self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 4) # Three entries from tester for "Project 1" self.select2Select('report-filter-project', 'Project 1') self.find(By.ID, 'generate-report').click() self.waitForClickable((By.ID, 'generate-report')) self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 3) # Two entries from tester for "Task 1" self.select2Select('report-filter-task', 'Task 1') self.find(By.ID, 'generate-report').click() self.waitForClickable((By.ID, 'generate-report')) self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 2) # Clear existing filters self.driver.refresh() self.waitForClickable((By.ID, 'generate-report')) self.select2Select('report-filter-user', 'tester') # Five entries from Tester since 2017-05-06. self.find(By.ID, 'report-filter-min-date') # Execute Javascript directly using pickadate's odd syntax. Simulated # steps to load and click a date on the calendar here would be # difficult because the calendar view defaults to the current month. self.driver.execute_script( "p = $('#report-filter-min-date').pickadate('picker');" "p.set('select', [2017, 4, 6]);" ) self.find(By.ID, 'generate-report').submit() self.waitForClickable((By.ID, 'generate-report')) self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 5) # Three entries from Tester between 2017-05-06 and 2017-05-16 self.find(By.ID, 'report-filter-max-date') self.driver.execute_script( "p = $('#report-filter-max-date').pickadate('picker');" "p.set('select', [2017, 4, 16]);" ) self.find(By.ID, 'generate-report').submit() self.waitForClickable((By.ID, 'generate-report')) self.assertEqual(len(self.find(By.CLASS_NAME, 'entry')), 3) management.call_command('flush', verbosity=0, interactive=False)
import datetime from operator import methodcaller from django.forms import CheckboxSelectMultiple import django_filters from accounts.models import ROLE_PARTNER, CtsUser from reports.models import DonorCategoryData, DonorShipmentData from shipments.models import PackageDBView, ShipmentDBView, PackageItemDBView, Shipment EMPTY_LABEL = '---------' THIS_YEAR = datetime.date.today().year # This is how we're displaying dates, it'd be confusing to expect # a different format for input. DATE_INPUT_FORMATS = ['%m/%d/%Y'] DATE_INPUT_HELP = "(M/D/Y)" class ReportFilter(django_filters.FilterSet): def __init__(self, *args, **kwargs): """Add an empty choice to all ChoiceFields.""" self.user = kwargs.pop('user') super(ReportFilter, self).__init__(*args, **kwargs) for fltr in self.filters.values(): if 'shipment' == fltr.name: # override the options; sort on unicode of each Shipment qs = fltr.field.choices.queryset sorted_shipments = sorted(qs, key=methodcaller('__unicode__')) # If they specified a partner, limit by that if self.data.get('partner', False) is not False: partner_id = int(self.data['partner']) sorted_shipments = [shipment for shipment in sorted_shipments if shipment.partner_id == partner_id] fltr.field.choices = [(x.pk, x) for x in sorted_shipments] fltr.field.choices.insert(0, (None, EMPTY_LABEL)) if isinstance(fltr, django_filters.ChoiceFilter): fltr.field.choices.insert(0, (None, EMPTY_LABEL)) if not self.user.has_perm('reports.view_all_partners') and 'partner' in self.filters: # Remember the key to use to filter on partners later on self.partner_field_name = self.filters['partner'].name # And get rid of the user-visible partner filter del self.filters['partner'] @property def qs(self): qs = super(ReportFilter, self).qs if hasattr(self, 'partner_field_name'): # We are filtering out other partners qs = qs.filter(**{self.partner_field_name: self.user}) return qs class PackageReportFilter(ReportFilter): partner = django_filters.ModelChoiceFilter( name='shipment__partner', queryset=CtsUser.objects.filter(role=ROLE_PARTNER).order_by('name') ) status = django_filters.MultipleChoiceFilter( choices=Shipment.SHIPMENT_STATUS_CHOICES, widget=CheckboxSelectMultiple, label="Shipment status", ) class Meta: model = PackageDBView fields = ('partner', 'shipment', 'status') class DonorByShipmentReportFilter(ReportFilter): shipped_before = django_filters.DateFilter( lookup_type='lte', name='shipment__shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped before ' + DATE_INPUT_HELP ) shipped_after = django_filters.DateFilter( lookup_type='gte', name='shipment__shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped after ' + DATE_INPUT_HELP ) partner = django_filters.ModelChoiceFilter( name='shipment__partner', queryset=CtsUser.objects.filter(role=ROLE_PARTNER).order_by('name') ) status = django_filters.MultipleChoiceFilter( name='shipment__status', choices=Shipment.SHIPMENT_STATUS_CHOICES, widget=CheckboxSelectMultiple, label="Shipment status", ) class Meta: model = DonorShipmentData fields = ('partner', 'donor', 'status') class DonorByCategoryReportFilter(ReportFilter): shipped_before = django_filters.DateFilter( lookup_type='lte', name='first_date_shipped', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped before ' + DATE_INPUT_HELP ) shipped_after = django_filters.DateFilter( lookup_type='gte', name='last_date_shipped', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped after ' + DATE_INPUT_HELP ) class Meta: model = DonorCategoryData fields = ('donor', 'category', 'shipped_before', 'shipped_after') class ItemReportFilter(ReportFilter): shipped_before = django_filters.DateFilter( lookup_type='lte', name='package__shipment__shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped before ' + DATE_INPUT_HELP ) shipped_after = django_filters.DateFilter( lookup_type='gte', name='package__shipment__shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped after ' + DATE_INPUT_HELP ) partner = django_filters.ModelChoiceFilter( name='package__shipment__partner', queryset=CtsUser.objects.filter(role=ROLE_PARTNER).order_by('name') ) status = django_filters.MultipleChoiceFilter( name='package__status', choices=Shipment.SHIPMENT_STATUS_CHOICES, widget=CheckboxSelectMultiple, label='Package status' ) class Meta: model = PackageItemDBView fields = ('partner', 'donor', 'item_category', 'status') class ShipmentReportFilter(ReportFilter): shipped_before = django_filters.DateFilter( lookup_type='lte', name='shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped before ' + DATE_INPUT_HELP ) shipped_after = django_filters.DateFilter( lookup_type='gte', name='shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped after ' + DATE_INPUT_HELP ) partner = django_filters.ModelChoiceFilter( name='partner', queryset=CtsUser.objects.filter(role=ROLE_PARTNER).order_by('name') ) status = django_filters.MultipleChoiceFilter( choices=Shipment.SHIPMENT_STATUS_CHOICES, widget=CheckboxSelectMultiple, label="Shipment status", ) class Meta: model = ShipmentDBView fields = ('partner', 'status') class ReceivedItemsByShipmentReportFilter(ReportFilter): shipped_before = django_filters.DateFilter( lookup_type='lte', name='shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped before ' + DATE_INPUT_HELP ) shipped_after = django_filters.DateFilter( lookup_type='gte', name='shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped after ' + DATE_INPUT_HELP ) class Meta: model = ShipmentDBView fields = () class ReceivedItemsByDonorOrPartnerReportFilter(ReportFilter): shipped_before = django_filters.DateFilter( lookup_type='lte', name='shipment__shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped before ' + DATE_INPUT_HELP ) shipped_after = django_filters.DateFilter( lookup_type='gte', name='shipment__shipment_date', # D/M/Y is typical in Jordan, so use that input_formats=DATE_INPUT_FORMATS, label='Shipped after ' + DATE_INPUT_HELP ) partner = django_filters.ModelChoiceFilter( name='shipment__partner', queryset=CtsUser.objects.filter(role=ROLE_PARTNER).order_by('name') ) class Meta: model = DonorShipmentData fields = ('partner', 'donor') class ShipmentMonthlySummaryReportFilter(ReportFilter): partner = django_filters.ModelChoiceFilter( name='partner', queryset=CtsUser.objects.filter(role=ROLE_PARTNER).order_by('name') ) class Meta: model = ShipmentDBView fields = ('partner', )
"""NRF24L01 driver for Micro Python """ import pyb # nRF24L01+ registers CONFIG = const(0x00) EN_RXADDR = const(0x02) SETUP_AW = const(0x03) SETUP_RETR = const(0x04) RF_CH = const(0x05) RF_SETUP = const(0x06) STATUS = const(0x07) RX_ADDR_P0 = const(0x0a) TX_ADDR = const(0x10) RX_PW_P0 = const(0x11) FIFO_STATUS = const(0x17) DYNPD = const(0x1c) # CONFIG register EN_CRC = const(0x08) # enable CRC CRCO = const(0x04) # CRC encoding scheme; 0=1 byte, 1=2 bytes PWR_UP = const(0x02) # 1=power up, 0=power down PRIM_RX = const(0x01) # RX/TX control; 0=PTX, 1=PRX # RF_SETUP register POWER_0 = const(0x00) # -18 dBm POWER_1 = const(0x02) # -12 dBm POWER_2 = const(0x04) # -6 dBm POWER_3 = const(0x06) # 0 dBm SPEED_1M = const(0x00) SPEED_2M = const(0x08) SPEED_250K = const(0x20) # STATUS register RX_DR = const(0x40) # RX data ready; write 1 to clear TX_DS = const(0x20) # TX data sent; write 1 to clear MAX_RT = const(0x10) # max retransmits reached; write 1 to clear # FIFO_STATUS register RX_EMPTY = const(0x01) # 1 if RX FIFO is empty # constants for instructions R_RX_PL_WID = const(0x60) # read RX payload width R_RX_PAYLOAD = const(0x61) # read RX payload W_TX_PAYLOAD = const(0xa0) # write TX payload FLUSH_TX = const(0xe1) # flush TX FIFO FLUSH_RX = const(0xe2) # flush RX FIFO NOP = const(0xff) # use to read STATUS register class NRF24L01: def __init__(self, spi, cs, ce, channel=46, payload_size=16): assert payload_size <= 32 # init the SPI bus and pins spi.init(spi.MASTER, baudrate=4000000, polarity=0, phase=0, firstbit=spi.MSB) cs.init(cs.OUT_PP, cs.PULL_NONE) ce.init(ce.OUT_PP, ce.PULL_NONE) # store the pins self.spi = spi self.cs = cs self.ce = ce # reset everything self.ce.low() self.cs.high() self.payload_size = payload_size self.pipe0_read_addr = None pyb.delay(5) # set address width to 5 bytes and check for device present self.reg_write(SETUP_AW, 0b11) if self.reg_read(SETUP_AW) != 0b11: raise OSError("nRF24L01+ Hardware not responding") # disable dynamic payloads self.reg_write(DYNPD, 0) # auto retransmit delay: 1750us # auto retransmit count: 8 self.reg_write(SETUP_RETR, (6 << 4) | 8) # set rf power and speed self.set_power_speed(POWER_3, SPEED_250K) # Best for point to point links # init CRC self.set_crc(2) # clear status flags self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT) # set channel self.set_channel(channel) # flush buffers self.flush_rx() self.flush_tx() def reg_read(self, reg): self.cs.low() self.spi.send_recv(reg) buf = self.spi.recv(1) self.cs.high() return buf[0] def reg_write(self, reg, buf): self.cs.low() status = self.spi.send_recv(0x20 | reg)[0] self.spi.send(buf) self.cs.high() return status def flush_rx(self): self.cs.low() self.spi.send(FLUSH_RX) self.cs.high() def flush_tx(self): self.cs.low() self.spi.send(FLUSH_TX) self.cs.high() # power is one of POWER_x defines; speed is one of SPEED_x defines def set_power_speed(self, power, speed): setup = self.reg_read(RF_SETUP) & 0b11010001 self.reg_write(RF_SETUP, setup | power | speed) # length in bytes: 0, 1 or 2 def set_crc(self, length): config = self.reg_read(CONFIG) & ~(CRCO | EN_CRC) if length == 0: pass elif length == 1: config |= EN_CRC else: config |= EN_CRC | CRCO self.reg_write(CONFIG, config) def set_channel(self, channel): self.reg_write(RF_CH, min(channel, 125)) # address should be a bytes object 5 bytes long def open_tx_pipe(self, address): assert len(address) == 5 self.reg_write(RX_ADDR_P0, address) self.reg_write(TX_ADDR, address) self.reg_write(RX_PW_P0, self.payload_size) # address should be a bytes object 5 bytes long # pipe 0 and 1 have 5 byte address # pipes 2-5 use same 4 most-significant bytes as pipe 1, plus 1 extra byte def open_rx_pipe(self, pipe_id, address): assert len(address) == 5 assert 0 <= pipe_id <= 5 if pipe_id == 0: self.pipe0_read_addr = address if pipe_id < 2: self.reg_write(RX_ADDR_P0 + pipe_id, address) else: self.reg_write(RX_ADDR_P0 + pipe_id, address[0]) self.reg_write(RX_PW_P0 + pipe_id, self.payload_size) self.reg_write(EN_RXADDR, self.reg_read(EN_RXADDR) | (1 << pipe_id)) def start_listening(self): self.reg_write(CONFIG, self.reg_read(CONFIG) | PWR_UP | PRIM_RX) self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT) if self.pipe0_read_addr is not None: self.reg_write(RX_ADDR_P0, self.pipe0_read_addr) self.flush_rx() self.flush_tx() self.ce.high() pyb.udelay(130) def stop_listening(self): self.ce.low() self.flush_tx() self.flush_rx() # returns True if any data available to recv def any(self): return not bool(self.reg_read(FIFO_STATUS) & RX_EMPTY) def recv(self): # get the data self.cs.low() self.spi.send(R_RX_PAYLOAD) buf = self.spi.recv(self.payload_size) self.cs.high() # clear RX ready flag self.reg_write(STATUS, RX_DR) return buf # blocking wait for tx complete def send(self, buf, timeout=500): send_nonblock = self.send_start(buf) start = pyb.millis() result = None while result is None and pyb.elapsed_millis(start) < timeout: result = self.send_done() # 1 == success, 2 == fail if result == 2: raise OSError("send failed") # non-blocking tx def send_start(self, buf): # power up self.reg_write(CONFIG, (self.reg_read(CONFIG) | PWR_UP) & ~PRIM_RX) pyb.udelay(150) # send the data self.cs.low() self.spi.send(W_TX_PAYLOAD) self.spi.send(buf) if len(buf) < self.payload_size: self.spi.send(b'\x00' * (self.payload_size - len(buf))) # pad out data self.cs.high() # enable the chip so it can send the data self.ce.high() pyb.udelay(15) # needs to be >10us self.ce.low() # returns None if send still in progress, 1 for success, 2 for fail def send_done(self): if not (self.reg_read(STATUS) & (TX_DS | MAX_RT)): return None # tx not finished # either finished or failed: get and clear status flags, power down status = self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT) self.reg_write(CONFIG, self.reg_read(CONFIG) & ~PWR_UP) return 1 if status & TX_DS else 2
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = "Alex Wallar <aw204@st-andrews.ac.uk>" import sys import random import math as np import pygame class PolyObstacle: """ Object that represents the an obstacle represented by a series of points (in the node list) which make up a set of lines. These lines represent the exterior of an obstacle """ def __init__(self, _nodes, _screen, **kwargs): """ Creates a PolyObstacle instance and initializes certain global variables @param _nodes A list of nodes used to represent the vertices of the polygon @param _screen The PyGame screen that is used to draw the obstacle """ ## A list of nodes used to represent the vertices self.nodes = _nodes ## A dictionary of colors defined in pygame self.colors = pygame.color.THECOLORS ## The PyGame screen that is used to draw the obstacle self.screen = _screen ## Bondaries of the simualation self.boundary = (_screen.get_width(), _screen.get_height()) ## Defines wether the obstacle is dynamic or not self.dynamic = kwargs.get("dynamic", False) ## Velocity of the obstacle self.velocity = [3, 0] ## The displacement of the obstacle self.displacement = 0 ## Max displacement allowed self.max_displacement = 60 ## List of static obstacles self.obstacles = list() ## Start point self.start_point = kwargs.get("start_point", None) ## End point self.end_point = kwargs.get("end_point", None) self.estimatePoly() def removeSelfFromObstacleList(self): """ Removes self from obstacle list """ for obst in self.obstacles: if id(self) == id(obst): self.obstacles.remove(obst) def norm(self, p1, p2): """ Gets the Eulidean distance between p1 and p2 @param p1, p2 Points in space @return The distance between p1 and p2 """ return np.sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2)) def estimatePoly(self): """ Tries to estimate the polygon as a circle (very useful for environments with many obstacles i.e. a random field of obstacles) """ ## The average point in the polygon. Represents the ## center of the enclosing circle self.avgPoint = map( lambda p: p / len(self.nodes), reduce( lambda p1, p2: ( p1[0] + p2[0], p1[1] + p2[1] ), self.nodes ) ) ## The maximum distance from any vertex and the average point self.maxDist = max( [ self.norm( p, self.avgPoint ) for p in self.nodes ] ) def detectCollision(self, pStart, pEnd): """ Detects a if there is a collision with the obstacle and the line <pStart, pEnd> @param pStart The starting point of the line @param pEnd The ending point of the line @return A boolean value representing if a collision occurred """ interCross = lambda p1, p2, q1, q2: ( ( (p1[0] - p2[0]) * (q1[1] - p1[1]) - (p1[1] - p2[1]) * (q1[0] - p1[0]) ) * ( (p1[0] - p2[0]) * (q2[1] - p1[1]) - (p1[1] - p2[1]) * (q2[0] - p1[0]) ) ) < 0 interCheck = lambda p1, p2, q1, q2: ( max(p1[0], p2[0]) >= min(q1[0], q2[0]) and max(q1[0], q2[0]) >= min(p1[0], p2[0]) and max(p1[1], p2[1]) >= min(q1[1], q2[1]) and max(q1[1], q2[1]) >= min(p1[1], p2[1]) ) vecList = [[self.nodes[0], self.nodes[-1]]] for k in range(len(self.nodes) - 1): vecList += [ [ self.nodes[k], self.nodes[k + 1] ] ] return any( map( lambda p: ( interCross( p[0], p[1], pStart, pEnd ) and interCheck( p[0], p[1], pStart, pEnd ) ), vecList ) ) def getClosestPoint(self, a, b, p): """ Gets the closest point on line <a, b> to point p @param a The starting point on the line @param b The ending point of the line @param p The point in which the closest distance will be checked @return The closest point on line <a, b> to point p """ #pygame.draw.line(self.screen, self.colors["orange"], a, b, 4) a = map(float, a) b = map(float, b) p = map(float, p) xGreater = lambda r: r[0] >= max(a[0], b[0]) xLess = lambda r: r[0] <= min(a[0], b[0]) yGreater = lambda r: r[1] >= max(a[1], b[1]) yLess = lambda r: r[1] <= min(a[1], b[1]) if ( (xGreater(p) or xLess(p)) and (yGreater(p) or yLess(p)) ): if self.norm(a, p) < self.norm(b, p): return a else: return b else: #""" a_to_p = [ float(p[0] - a[0]), float(p[1] - a[1]) ] a_to_b = [ float(b[0] - a[0]), float(b[1] - a[1]) ] atb2 = a_to_b[0] ** 2 + a_to_b[1] ** 2 atp_dot_atb = a_to_p[0] * a_to_b[0] + a_to_p[1] * a_to_b[1] t = float(atp_dot_atb) / float(atb2) retVal = ( float(a[0]) + a_to_b[0] * t, float(a[1]) + a_to_b[1] * t ) # This is unicorn magic, just freaking deal with it if ( (xGreater(retVal) or xLess(retVal)) and (yGreater(retVal) or yLess(retVal)) ): if self.norm(a, retVal) < self.norm(b, retVal): return a else: return b return retVal #""" #lam = -(a[0] * p[0] + a[1] * p[1]) / (p[0] * (b[0] - a[0]) + p[1] * (b[1] - a[1])) #xk = (b[0] - a[0]) * lam + a[0] #yk = (b[1] - a[1]) * lam + a[1] #return (xk, yk) def rayintersectseg(self, p, edge): """ Determines if a ray from point p intersects with an edge, edge. Used to determine if a point p in inside the polygon @param p The point to be checked @param edge The edge that will be checked @return True if a ray from point p intersects with edge and false otherwise """ _eps = 0.00001 _huge = sys.float_info.max _tiny = sys.float_info.min a, b = edge if a[1] > b[1]: a, b = b, a if p[1] == a[1] or p[1] == b[1]: p = (p[0], p[1] + _eps) intersect = False if (p[1] > b[1] or p[1] < a[1]) or (p[0] > max(a[0], b[0])): return False if p[0] < min(a[0], b[0]): intersect = True else: if abs(a[0] - b[0]) > _tiny: m_red = (b[1] - a[1]) / float(b[0] - a[0]) else: m_red = _huge if abs(a[0] - p[0]) > _tiny: m_blue = (p[1] - a[1]) / float(p[0] - a[0]) else: m_blue = _huge intersect = m_blue >= m_red return intersect def _odd(self, x): """ Determines if an integer, x, is odd @param x The integer to be checked @return True if x is odd, false otherwise """ return x % 2 == 1 def pointInPoly(self, p): """ Determines if a point p is inside the polygon represented by this PolyObstacle object. It does this by checking the number ray intersections that occur is odd or even. If the number is odd, the point is inside the polygon, otherwise it is not. @param p The point to be checked @return True if the point is in the polygon and false otherwise """ vecList = [[self.nodes[0], self.nodes[-1]]] for k in range(len(self.nodes) - 1): vecList += [[self.nodes[k], self.nodes[k+1]]] return self._odd( sum( self.rayintersectseg(p, edge) for edge in vecList ) ) def pointAllowed(self, b, p): """ Checks if a point is allowed, meaning no collisions occur @param b The boid object that will be checked @param p The point that will be checked @return True if allowed, false otherwise """ return ( ( self.norm( self.getPoint(p), p ) > b.radius ) and ( not self.pointInPoly(p) ) ) def getPoint(self, p): """ Gets the closest point from the polygon to p @param p The point to be checked @return The closest point that lies on the polygon exterior to p """ vecList = list() # [[self.nodes[0],self.nodes[-1]]] for k in range(-1, len(self.nodes) - 1): vecList += [[self.nodes[k], self.nodes[k+1]]] #print vecList cpList = map( lambda v: self.getClosestPoint(v[0], v[1], p), vecList ) dList = map( lambda pv: self.norm(p, pv), cpList ) retVal = [ cpList[i] for i, j in enumerate(dList) if j == min(dList) ][0] #pygame.draw.circle(self.screen, self.colors["green"], map(int, retVal), 5) return retVal def getRadius(self): """ Gets the 'radius' of the checking point. Only used for conformity with circle obstacles that have not been included in this repository @return 1 """ return 1 def checkCollisionWithOtherObstacles(self, node): """ Check to see if there is a collision with a static obstacle """ # check for every static obstacle's nodes for obstacle in self.obstacles: if obstacle.pointInPoly(node): return obstacle if self.norm(node, obstacle.getPoint(node)) <= 10: return obstacle return None def checkNoGoZones(self, node): distance_start = np.sqrt( (self.start_point[0] - node[0]) ** 2 + (self.start_point[1] - node[1]) ** 2 ) distance_end = np.sqrt( (self.end_point[0] - node[0]) ** 2 + (self.end_point[1] - node[1]) ** 2 ) if distance_start < 100 or distance_end < 100: return True else: return False def translate(self): """ Translate obstacle """ # check collision for node in self.nodes: # collided with another obstacle? obst = self.checkCollisionWithOtherObstacles(node) in_nogo_zones = self.checkNoGoZones(node) if obst or in_nogo_zones: self.displacement = 0 self.velocity[0] *= -1 self.velocity[1] *= -1 break # hit boudnary? x_collision = node[0] + 10 > self.boundary[0] or node[0] < 10 y_collision = node[1] + 10 > self.boundary[1] or node[1] < 10 if x_collision or y_collision: self.displacement = -40 self.velocity[0] *= -1 self.velocity[1] *= -1 break # chage direction if max displacement reached if self.displacement >= self.max_displacement: self.change_direction() # translate for i in range(len(self.nodes)): curr_node = self.nodes[i] # tuples are immutable hence convert coord = list(curr_node) orig_coord = list(curr_node) # translate x, y coord[0] += self.velocity[0] coord[1] += self.velocity[1] # convert back to tuple and replace old node self.nodes[i] = tuple(coord) # record displacement self.displacement += self.norm(orig_coord, coord) def determine_last_direction(self): x = self.velocity[0] y = self.velocity[1] if x != 0: if x > 0: return "RIGHT" elif x < 0: return "LEFT" elif y != 0: if y > 0: return "UP" elif y < 0: return "DOWN" def change_direction(self, force_change=False, direction=None): """ Change direction """ change_direction = False last_direction = self.determine_last_direction() curr_direction = None # change direction? if random.random() > 0.5: change_direction = True if change_direction or force_change: # determine direction if direction is None: direction = random.random() # up, down, left, right orig_velocity = filter(lambda i: i != 0, self.velocity)[0] if direction <= 0.25: # up curr_direction = "UP" self.velocity[0] = 0 self.velocity[1] = orig_velocity elif direction <= 0.5: # down curr_direction = "DOWN" self.velocity[0] = 0 self.velocity[1] = orig_velocity * -1 elif direction <= 0.75: curr_direction = "LEFT" self.velocity[0] = orig_velocity * -1 self.velocity[1] = 0 else: curr_direction = "RIGHT" self.velocity[0] = orig_velocity self.velocity[1] = 0 if last_direction == curr_direction: if last_direction == "UP": self.change_direction(True, 1.0) # right elif last_direction == "DOWN": self.change_direction(True, 0.75) # left elif last_direction == "LEFT": self.change_direction(True, 0.25) # up elif last_direction == "RIGHT": self.change_direction(True, 0.5) # down self.displacement = 0 def draw(self): """ Draws the polygon on the PyGame screen """ if self.dynamic: self.translate() pygame.draw.polygon( self.screen, (60, 60, 60), self.nodes ) else: pygame.draw.polygon( self.screen, self.colors["grey"], self.nodes )