prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
#!/usr/bin/env python # Small wrapper script for weeder2, which needs the FreqFiles directory # where it is executed. This script allows running weeder2 from anywhere. import os import sys import argparse import subprocess as sp # Weeder install dir weeder_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "share", "
weeder2")) weeder_exe = "weeder2" weeder_help = sp.check_output( os.path.join(weeder_dir, weeder_exe), stderr=sp.STDOUT).decode() parser = argparse.ArgumentParser() parser.add_argument("-f", dest="fname") args,
unknownargs = parser.parse_known_args() if not args.fname: print(weeder_help) sys.exit() fname = os.path.abspath(args.fname) rest = " ".join(unknownargs) cmd = "./{} -f {} {}".format(weeder_exe, fname, rest) sys.exit(sp.call(cmd, shell=True, cwd=weeder_dir))
e = ElementTree.SubElement( xml_role_instances, 'RoleInstance') xml_role_instance_id = ElementTree.SubElement( xml_role_instance, 'Id') xml_role_instance_id.text = self._get_role_instance_id() xml_role_properties = ElementTree.SubElement( xml_role_instance, 'Properties') for name, value in properties.items(): ElementTree.SubElement( xml_role_properties, 'Property', name=name, value=value) return self._encode_xml(xml_root) def _get_goal_state(self, force_update=False): if not self._goal_state or force_update: self._goal_state = self._wire_server_request( "machine?comp=goalstate").GoalState expected_state = self._goal_state.Machine.ExpectedState if expected_state != GOAL_STATE_STARTED: raise exception.CloudbaseInitException( "Invalid machine expected state: %s" % expected_state) return self._goal_state def _get_incarnation(self): goal_state = self._get_goal_state() return goal_state.Incarnation.cdata def _get_container_id(self): goal_state = self._get_goal_state() return goal_state.Container.ContainerId.cdata def _get_role_instance_config(self): goal_state = self._get_goal_state() role_instance = goal_state.Container.RoleInstanceList.RoleInstance return role_instance.Configuration def _get_role_instance_id(self): goal_state = self._get_goal_state() role_instance = goal_state.Container.RoleInstanceList.RoleInstance return role_instance.InstanceId.cdata def _post_health_status(self, state, sub_status=None, description=None): health_report_xml = self._get_health_report_xml( state, sub_status, description) LOG.debug("Health data: %s", health_report_xml) self._wire_server_request( "machine?comp=health", health_report_xml, parse_xml=False) def provisioning_started(self): self._post_health_status( HEALTH_STATE_NOT_READY, HEALTH_SUBSTATE_PROVISIONING, "Cloudbase-Init is preparing your computer for first use...") def provisioning_completed(self): self._post_health_status(HEALTH_STATE_READY) def provisioning_failed(self): self._post_health_status( HEALTH_STATE_NOT_READY, HEALTH_SUBSTATE_PROVISIONING_FAILED, "Provisioning failed") def _post_role_properties(self, properties): role_properties_xml = self._get_role_properties_xml(properties) LOG.debug("Role properties data: %s", role_properties_xml) self._wire_server_request( "machine?comp=roleProperties", role_properties_xml,
parse_xml=False) @property def can_post_rdp_cert_thumbprint(self): return True def post_rdp_cert_thumbprint(self, thumbprint): properties = {ROLE_PROPERTY_CERT_THUMB: thumbprint
} self._post_role_properties(properties) def _get_hosting_environment(self): config = self._get_role_instance_config() return self._wire_server_request(config.HostingEnvironmentConfig.cdata) def _get_shared_config(self): config = self._get_role_instance_config() return self._wire_server_request(config.SharedConfig.cdata) def _get_extensions_config(self): config = self._get_role_instance_config() return self._wire_server_request(config.ExtensionsConfig.cdata) def _get_full_config(self): config = self._get_role_instance_config() return self._wire_server_request(config.FullConfig.cdata) @contextlib.contextmanager def _create_transport_cert(self, cert_mgr): x509_thumbprint, x509_cert = cert_mgr.create_self_signed_cert( "CN=Cloudbase-Init AzureService Transport", machine_keyset=True, store_name=CONF.azure.transport_cert_store_name) try: yield (x509_thumbprint, x509_cert) finally: cert_mgr.delete_certificate_from_store( x509_thumbprint, machine_keyset=True, store_name=CONF.azure.transport_cert_store_name) def _get_encoded_cert(self, cert_url, transport_cert): cert_config = self._wire_server_request( cert_url, headers={"x-ms-guest-agent-public-x509-cert": transport_cert.replace("\r\n", "")}) cert_data = cert_config.CertificateFile.Data.cdata cert_format = cert_config.CertificateFile.Format.cdata return cert_data, cert_format def get_server_certs(self): def _get_store_location(store_location): if store_location == u"System": return constant.CERT_LOCATION_LOCAL_MACHINE else: return store_location certs_info = [] config = self._get_role_instance_config() if not hasattr(config, 'Certificates'): return certs_info cert_mgr = x509.CryptoAPICertManager() with self._create_transport_cert(cert_mgr) as ( transport_cert_thumbprint, transport_cert): cert_url = config.Certificates.cdata cert_data, cert_format = self._get_encoded_cert( cert_url, transport_cert) pfx_data = cert_mgr.decode_pkcs7_base64_blob( cert_data, transport_cert_thumbprint, machine_keyset=True, store_name=CONF.azure.transport_cert_store_name) host_env = self._get_hosting_environment() host_env_config = host_env.HostingEnvironmentConfig for cert in host_env_config.StoredCertificates.StoredCertificate: certs_info.append({ "store_name": cert["storeName"], "store_location": _get_store_location( cert["configurationLevel"]), "certificate_id": cert["certificateId"], "name": cert["name"], "pfx_data": pfx_data, }) return certs_info def get_instance_id(self): return self._get_role_instance_id() def _get_config_set_drive_path(self): if not self._config_set_drive_path: base_paths = self._osutils.get_logical_drives() for base_path in base_paths: tag_path = os.path.join(base_path, OVF_ENV_DRIVE_TAG) if os.path.exists(tag_path): self._config_set_drive_path = base_path if not self._config_set_drive_path: raise exception.ItemNotFoundException( "No drive containing file %s could be found" % OVF_ENV_DRIVE_TAG) return self._config_set_drive_path def _get_ovf_env_path(self): base_path = self._get_config_set_drive_path() ovf_env_path = os.path.join(base_path, OVF_ENV_FILENAME) if not os.path.exists(ovf_env_path): raise exception.ItemNotFoundException( "ovf-env path does not exist: %s" % ovf_env_path) LOG.debug("ovs-env path: %s", ovf_env_path) return ovf_env_path def _get_ovf_env(self): if not self._ovf_env: ovf_env_path = self._get_ovf_env_path() self._ovf_env = untangle.parse(ovf_env_path) return self._ovf_env def get_admin_username(self): ovf_env = self._get_ovf_env() prov_section = ovf_env.Environment.wa_ProvisioningSection win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet return win_prov_conf_set.AdminUsername.cdata def get_admin_password(self): ovf_env = self._get_ovf_env() prov_section = ovf_env.Environment.wa_ProvisioningSection win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet return win_prov_conf_set.AdminPassword.cdata def get_host_name(self): ovf_env = self._get_ovf_env() prov_section = ovf_env.Environment.wa_ProvisioningSection win_prov_conf_set = prov_section.WindowsProvisioningConfigurationSet return win_prov_conf_set.ComputerName.cdata def
e_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=True) self.assertEqual(self.called, True) self.assertEqual(self.fake_vm_start_called, True) conn.finish_revert_migration(instance) self.assertEqual(self.fake_finish_revert_migration_called, True) def test_finish_migrate(self): instance = db.instance_create(self.context, self.instance_values) self.called = False self.fake_vm_start_called = False def fake_vm_start(*args, **kwargs): self.fake_vm_start_called = True def fake_vdi_resize(*args, **kwargs): self.called = True self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) self.stubs.Set(stubs.FakeSessionForMigrationTests, "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway_v6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=True) self.assertEqual(self.called, True) self.assertEqual(self.fake_vm_start_called, True) def test_finish_migrate_no_local_storage(self): tiny_type_id = \ instance_types.get_instance_type_by_name('m1.tiny')['id'] self.instance_values.update({'instance_type_id': tiny_type_id, 'root_gb': 0}) instance = db.instance_create(self.context, self.instance_values) def fake_vdi_resize(*args, **kwargs): raise Exception("This shouldn't be called") self.stubs.Set(stubs.FakeSessionForMigrationTests, "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway_v6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=True) def test_finish_migrate_no_resize_vdi(self): instance = db.instance_create(self.context, self.instance_values) def fake_vdi_resize(*args, **kwargs): raise Exception("This shouldn't be called") self.stubs.Set(stubs.FakeSessionForMigrationTests, "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway_v6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100',
'netmask': '255.255.255.0'}], 'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] # Resize instance would be determined by the compute call image_meta = {'id': instance.image_ref, 'disk_format': 'vhd'} conn.finish_migration(self.context, self.migration, instance, dict(base_copy='hurr', cow='durr'), network_info, image_meta, resize_instance=False) class XenAPIImageTypeTestCase(test.TestCase): """Test ImageType class.""" def test_to_string(self): """Can convert from type id to type string.""" self.assertEquals( vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL), vm_utils.ImageType.KERNEL_STR) def test_from_string(self): """Can convert from string to type id.""" self.assertEquals( vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR), vm_utils.ImageType.KERNEL) class XenAPIDetermineDiskImageTestCase(test.TestCase): """Unit tests for code that detects the ImageType.""" def setUp(self): super(XenAPIDetermineDiskImageTestCase, self).setUp() glance_stubs.stubout_glance_client(self.stubs) class FakeInstance(object): pass self.fake_instance = FakeInstance() self.fake_instance.id = 42 self.fake_instance.os_type = 'linux' self.fake_instance.architecture = 'x86-64' def assert_disk_type(self, image_meta, expected_disk_type): actual = vm_utils.VMHelper.determine_disk_image_type(image_meta) self.assertEqual(expected_disk_type, actual) def test_machine(self): image_meta = {'id': 'a', 'disk_format': 'ami'} self.assert_disk_type(image_meta, vm_utils.ImageType.DISK) def test_raw(self): image_meta = {'id': 'a', 'disk_format': 'raw'} self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW) def test_vhd(self): image_meta = {'id': 'a', 'disk_format': 'vhd'} self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD) class CompareVersionTestCase(test.TestCase): def test_less_than(self): """Test that cmp_version compares a as less than b""" self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0) def test_greater_than(self): """Test that cmp_version compares a as greater than b""" self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0) def test_equal(self): """Test that cmp_version compares a as equal to b""" self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0) def test_non_lexical(self): """Test that cmp_version compares non-lexically""" self.assertTrue(vmops.c
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details import os import json import platform from collections import defaultdict from anaconda_go.lib import go from anaconda_go.lib.plugin import typing cachepath = {
'linux': os.path.join('~', '.local', 'share', 'anaconda', 'cache'), 'darwin': os.path.join('~', 'Library', 'Cache', 'anaconda'), 'windows': os.path.join(os.getenv('APPDATA') or '~', 'Anaconda', 'Cache') } cache_directory = os.path.expanduser( cachepath.get(platform.system().lower()) ) PACKAGES_CA
CHE = defaultdict(lambda: []) def append(package: typing.Dict) -> None: """Append the given package into the cache """ global PACKAGES_CACHE if not package_in_cache(package): PACKAGES_CACHE[go.GOROOT].append(package) def package_in_cache(package: typing.Dict) -> bool: """Look for the given package in the cache and return true if is there """ for pkg in PACKAGES_CACHE[go.GOROOT]: if pkg['ImportPath'] == package['ImportPath']: return True return False def lookup(node_name: str='') -> typing.Dict: """Lookup the given node_name in the cache and return it back """ node = {} if node_name == '': node = PACKAGES_CACHE[go.GOROOT] else: for pkg in PACKAGES_CACHE[go.GOROOT]: guru = pkg.get('Guru') if guru is None: continue path = guru['package'].get('path') if path is not None and path == node_name: node = guru break for member in guru['package'].get('members', []): if member.get('name') == node_name: node = member break for method in member.get('methods', []): if method['name'] == node_name: node = method break return node def persist_package_cache() -> None: """Write the contents of the package cache for this GOROOT into the disk """ gopath = go.GOPATH.replace(os.path.sep, '_') cachefile = os.path.join(cache_directory, gopath, 'packages.cache') if not os.path.exists(os.path.dirname(cachefile)): os.makedirs(os.path.dirname(cachefile)) with open(cachefile, 'w') as fd: json.dump(PACKAGES_CACHE[go.GOROOT], fd) def load_package_cache() -> typing.List: """Load a previously stores package cache file """ global PACKAGES_CACHE gopath = go.GOPATH.replace(os.path.sep, '_') cachefile = os.path.join(cache_directory, gopath, 'packages.cache') try: with open(cachefile, 'r') as fd: PACKAGES_CACHE[go.GOROOT] = json.load(fd) except FileNotFoundError: pass
ABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. """Module that implements the Hash Queue machinery.""" from __future__ import with_statement import logging import threading import Queue import time from twisted.internet import reactor from twisted.python.util import OrderedDict as TxOrderedDict from ubuntuone.storageprotocol.content_hash import \ content_hash_factory, crc32 from ubuntuone.platform import ( open_file, stat_path, ) from ubuntuone.platform.constants import HASHQUEUE_DELAY NO_TIMESTAMP = None class FixedOrderedDict(TxOrderedDict): """A t.p.u.OrderedDict that behaves like Python 2.7's OrderedDict.""" def popitem(self, last=False): """Take out the first or last item, and return it.""" index = -1 if last else 0 key = self._order[index] value = self[key] del self[key] return (key, value) def clear(self): """Remove every item from the dict.""" self._order = [] TxOrderedDict.clear(self) try: # try to use the OrderedDict from stdlib >= 2.7 from collections import OrderedDict as StdlibOrderedDict OrderedDict = StdlibOrderedDict except ImportError: # if not available, use the patched one based on twisted OrderedDict = FixedOrderedDict class StopHashing(Exception): """The current hash was cancelled.""" class _Hasher(threading.Thread): """Class that lives in another thread, hashing all night long.""" def __init__(self, queue, end_mark, event_queue): self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ.hasher') self.end_mark = end_mark self.queue = queue self.eq = event_queue # mutex to access _should_cancel and _hashing attributes self.mutex = threading.Lock() self._should_cancel = None self._stopped = True # start stopped self.chunk_size = 2**16 self.hashing = None threading.Thread.__init__(self) def run(self): """Run the thread.""" self._stopped = False while True: if self._stopped: break info, timestamp = self.queue.get() if info is self.end_mark: self._stopped = True self.queue.task_done() break path, mdid = info with self.mutex: self.hashing = path m = "Hasher: got file to hash: path %r mdid %s" self.logger.debug(m, path, mdid) now = time.time() delta = timestamp - now if delta > 0: self.logger.trace("Waiting %f before starting hash", delta) time.sleep(delta) try: result = self._hash(path) except (IOError, OSError), e: m = "Hasher: hash error %s (path %r mdid %s)" self.logger.debug(m, e, path, mdid) reactor.callLater(.1, reactor.callFromThread, self.eq.push, "HQ_HASH_ERROR", mdid=mdid) except StopHashing, e: self.logger.debug(str(e)) else: hashdata, crc, size, stat = result self.logger.debug("Hasher: path hash pushed: path=%r hash=%s" " crc=%s size=%d st_ino=%d st_size=%d" " st_mtime=%r", path, hashdata,crc, size, stat.st_ino, stat.st_size, stat.st_mtime) reactor.callFromThread(self.eq.push, "HQ_HASH_NEW", path=path, hash=hashdata, crc32=crc, size=size, stat=stat) finally: with self.mutex: self.hashing = None self.queue.task_done() def stop(self): """Stop the hasher. Will be effective in the next loop if a hash is in progress. """ # clear the queue to push a end_mark, just to unblok if we are waiting # for a new item self.queue.clear() # set the end_mark in case we are waiting a path item = (self.end_mark, NO_TIMESTAMP) self.queue.put(item) self._stopped = True def _hash(self, path): """Actually hashes a file.""" hasher = content_hash_factory() crc = 0 size = 0 try: initial_stat = stat_path(path) with open_file(path, 'rb') as fh: while True: # stop hashing if path_to_cancel == path or _stopped is True with self.mutex: path_to_cancel = self._should_cancel if path_to_cancel == path or self._stopped: raise StopHashing('hashing of %r was cancelled' % path) cont = fh.read(self.chunk_size) if not cont: break hasher.update(cont) crc = crc32(cont, crc) size += len(cont) finally: with self.mutex: self._should_cancel = None return hasher.content_hash(), crc, size, initial_stat def busy(self): """Return whether we are busy.""" with self.mutex: return self.hashing def cancel_if_running(self, path): """Request a cancel/stop of the current hash, if it's == path.""" with self.mutex: if self.hashing == path: self._should_cancel = path class HashQueue(object): """Interface between the real Hasher and the rest of the world.""" def __init__(self, event_queue): self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ') self._stopped = False self._queue = UniqueQueue() self._end_mark = object() self.hasher = _Hasher(self._queue, self._end_mark, event_queue) self.hasher.setDaemon(True) self.hasher.start() self.logger.info("HashQueue: _hasher started") def _timestamp(self): """A timestamp with a small delay into the future.""" return time.time() + HASHQUEUE_DELAY def insert(self, path, mdid): """Insert the path of a file to be hashed.""" if self._stopped: self.logger.warning("HashQueue: already stopped when received " "path %r mdid %s", path, mdid) return self.logger.debug("HashQueue: inserting path %r mdid %s", path, mdid) self.hasher.cancel_if_running(path) item = ((path, mdid), self._timestamp()) self._queue.put(item) def shutdown(self): """Shutdown all resources and clear the queue""" # clear the queue self._queue.clear() # stop the hasher self.hasher.stop() self._stopped = True self.logger.info("HashQueue: _hasher stopped") def empty(self): """Return whether we are empty or not""" return self._queue.empty() and not self.hasher.busy() def __len__(self): """Return the length of the queue (not reliable!)""" return self._queue.qsize() def is_hashing(self, path, mdid): """Return if the path is being hashed or in the queue.""" if self.hasher.hashing == path: return True if (path, mdid) in self._queue: return True return False class UniqueQueue(Queue.Queue): """Variant of Queue that only inserts unique items in the Queue.""" def __init__(self, *args, **kwargs): """create the instance""" Queue.Queue.__init__(self, *args, **kwargs) self.logger = logging.getLogger('ubuntuone.SyncDaemon.HQ.Queue') def _init(self, maxsize): """Override the underlaying data initialization.""" s
elf.queue = OrderedDict()
html = NOTEBOOK_DIV.render( plot_script = script, plot_div = div, ) return encode_utf8(html) def _use_widgets(plot_objects): from .models.widgets import Widget for o in plot_objects: if isinstance(o, Document): if _use_widgets(o.roots): return True else: if any(isinstance(model, Widget) for model in o.references()): return True return False def file_html(plot_objects, resources, title, js_resources=None, css_resources=None, template=FILE, template_variables={}): '''Return an HTML document that embeds Bokeh PlotObject or Document objects. The data for the plot is stored directly in the returned HTML. This is an alias for standalone_html_page_for_models() which supports customizing the JS/CSS resources independently and customizing the jinja2 template. Args: plot_objects (PlotObject or Document or list) : Bokeh object or objects to render typically a PlotObject or Document resources (Resources) : a resource configuration for BokehJS assets title (str) : a title for the HTML document ``<title>`` tags template (Template, optional) : HTML document template (default: FILE) A Jinja2 Template, see bokeh.templates.FILE for the required template parameters template_variables (dict, optional) : variables to be used in the Jinja2 template. If used, the following variable names will be overwritten: title, js_resources, css_resources, plot_script, plot_div Returns: UTF-8 encoded HTML ''' plot_objects = _check_plot_objects(plot_objects) with _ModelInDocument(plot_objects): (docs_json, render_items) = _standalone_docs_json_and_render_items(plot_objects) custom_models = _extract_custom_models(plot_objects) return _html_page_for_render_items(resources, docs_json, render_items, title, custom_models=custom_models, websocket_url=None, js_resources=js_resources, css_resources=css_resources, template=template, template_variables=template_variables, use_widgets=_use_widgets(plot_objects)) # TODO rename this "standalone"? def autoload_static(plot_object, resources, script_path): ''' Return JavaScript code and a script tag that can be used to embed Bokeh Plots. The data for the plot is stored directly in the returned JavaScript code. Args: plot_object (PlotObject or Document) : resources (Resources) : script_path (str) : Returns: (js, tag) : JavaScript code to be saved at ``script_path`` and a ``<script>`` tag to load it Raises: ValueError ''' if resources.mode == 'inline': raise ValueError("autoload_static() requires non-inline resources") # TODO why is this? if resources.dev: raise ValueError("autoload_static() only works with non-dev resources") plot_object = _check_one_plot_object(plot_object) with _ModelInDocument(plot_object): (docs_json, render_items) = _standalone_docs_json_and_render_items([plot_object]) item = render_items[0] model_id = "" if 'modelid' in item: model_id = item['modelid'] doc_id = "" if 'docid' in item: doc_id = item['docid'] js = AUTOLOAD_JS.render( docs_json = serialize_json(docs_json), # TODO we should load all the JS files, but the code # in AUTOLOAD_JS isn't smart enough to deal with it. js_url = resources.js_files[0], css_files = resources.css_files, elementid = item['elementid'], websocket_url = None ) tag = AUTOLOAD_TAG.render( src_path = script_path, elementid = item['elementid'], modelid = model_id, docid = doc_id, loglevel = resources.log_level ) return encode_utf8(js), encode_utf8(tag) def autoload_server(plot_object, app_path="/", session_id=DEFAULT_SESSION_ID, url="default", loglevel="info"): ''' Return a script tag that can be used to embed Bokeh Plots from a Bokeh Server. The data for the plot is stored on the Bokeh Server. Args: plot_object (PlotObject) : the object to render from the session, or None for entire document app_path (str, optional) : the server path to the app we want to load session_id (str, optional) : server session ID url (str, optional) : server root URL (where static resources live, not where a specific app lives) loglevel (str, optional) : "trace", "debug", "info", "warn", "error", "fatal" Returns: tag : a ``<script>`` tag that will execute an autoload script loaded from the Bokeh Server ''' if url == "default": url = DEFAULT_SERVER_HTTP_URL elementid = str(uuid.uuid4()) # empty model_id means render the entire doc from session_id model_id = "" if plot_object is not None: model_id = plot_object._id if not url.endswith("/"): url = url + "/" if not app_path.endswith("/"): app_path = app_path + "/" if app_path.startswith("/"): app_path = app_path[1:] src_path = url + app_path + "autoload.js" + "?bokeh-autoload-element=" + elementid tag = AUTOLOAD_TAG.render( src_path = src_path, elementid = elementid, modelid = model_id, sessionid = session_id, loglevel = loglevel ) return encode_utf8(tag) def _script_for_render_items(docs_json, render_items, websocket_url, custom_models, wrap_script=True): # this avoids emitting the "register custom models" code at all # just to register an empty set if (custom_models is not None) and len(custom_models) == 0: custom_models = None plot_js = _wrap_in_function( DOC_JS.render( custom_models=custom_models, websocket_url=websocket_url, docs_json=serialize_json(docs_json), render_items=serialize_json(render_items) ) ) if wrap_script: return SCRIPT_TAG.render(js_code=plot_js) else: return plot_js def _html_page_for_render_i
tems(resources, docs_json, render_items, title, we
bsocket_url, custom_models, js_resources=None, css_resources=None, template=FILE, template_variables={}, use_widgets=True): if resources: if js_resources: warn('Both resources and js_resources provided. resources will override js_resources.') if css_resources: warn('Both resources and css_resources provided. resources will override css_resources.') js_resources = resources css_resources = resources bokeh_js = '' if js_resources: if not css_resources: warn('No Bokeh CSS Resources provided to template. If required you will need to provide them manually.') js_resources = js_resources.use_widgets(use_widgets) bokeh_js = js_resources.render_js() bokeh_css = '' if css_resources: if not js_resources: warn('No Bokeh JS Resources provided to template. If required you will need to provide them manually.') css_resources = css_resources.use_widgets(use_widgets) bokeh_css = css_resources.render_css() script = _script_for_render_items(docs_json, render_items, websocket_url, custom_models) template_variables_full = template_variables.copy() template_variables_full.update(dict( title = title, bokeh_js = bokeh_js, bokeh_css = bokeh_css, plot_script = script, plot_div = "\n".join(_div_for_render_item(item) for item in render_items) ))
# -*- coding: utf-8 -*- from test_settings import Settings class TestCase(Settings): def test_sidebar(self): # Ayarlari yapiyor. self.do_settings() # Genel'e tikliyor. self.driver.find_element_by_css_selector( 'li.ng-binding:nth-child(3) > a:nth-child(1) > span:nth-child(2)').click() # Ogrenci Iletisim Bilgilerine tikliyor. self.driver.find_element_by_css_selector('ul.in:nth-child(2) > li:nth-child(2) > a:nth-child(1)').click() self.do_login() # Genel'e tikliyor. self.driver.find_element_by_css_selector( 'li.ng-binding:nth-child(3) > a:nth-child(1) > span:nth-child(2)').click() # Ogrenci Iletisim Bilgilerine tikliyor. self.driver.find_element_by_css_selector('ul.in:nth-child(2) > li:nth-child(2) > a:nth-child(1)').click() # Ikamet Il'e deger gonderiyor. self.driver.find_element_by_css_selector('#ikamet_il').send_keys('Bilecik') # Ikamet Ilce'ye deger gonderiyor. self.driver.find_element
_by_css_selector('#ikamet_ilce').send_keys('Merkez') # Ikametgah Adresine deger yolluyor. self.driver.find_element_by_css_selector('#ikamet_adresi').send_keys('balim sokak') # Posta Kodu'na deger yolluyor. self.driver.find_element_by_css_selector('#posta_kodu').send_keys('11000') # Telefon Numarasi'na deger yolluyor. self.driver.find_element_by_css_selector('#tel_no').send_keys('05346
26286816') # Kaydet'e tikliyor self.driver.find_element_by_css_selector('button.btn-danger:nth-child(1)').click()
low=self.flow, label="Gender") age = RuleSet.objects.get(flow=self.flow, label="Age") # categories should be in the same order as our rules, should have correct counts result = Value.get_value_summary(ruleset=color)[0] self.assertEquals(3, len(result['categories'])) self.assertFalse(result['open_ended']) self.assertResult(result, 0, "Red", 2) self.assertResult(result, 1, "Blue", 1) self.assertResult(result, 2, "Green", 1) # check our age category as well result = Value.get_value_summary(ruleset=age)[0] self.assertEquals(3, len(result['categories'])) self.assertFalse(result['open_ended']) self.assertResult(result, 0, "Child", 1) self.assertResult(result, 1, "Adult", 2) self.assertResult(result, 2, "Senior", 1) # and our gender categories result = Value.get_value_summary(ruleset=gender)[0] self.assertEquals(2, len(result['categories'])) self.assertFalse(result['open_ended']) self.assertResult(result, 0, "Male", 2) self.assertResult(result, 1, "Female", 2) # now filter the results and only get responses by men result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"])])[0] self.assertResult(result, 0, "Red", 1) self.assertResult(result, 1, "Blue", 0) self.assertResult(result, 2, "Green", 1) # what about men that are adults? result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male"]), dict(ruleset=age.pk, categories=["Adult"])])[0] self.assertResult(result, 0, "Red", 0) self.assertResult(result, 1, "Blue", 0) self.assertResult(result, 2, "Green", 0) # union of all genders result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=gender.pk, categories=["Male", "Female"]), dict(ruleset=age.pk, categories=["Adult"])])[0] self.assertResult(result, 0, "Red", 1) self.assertResult(result, 1, "Blue", 1) self.assertResult(result, 2, "Green", 0) # just women adults by group result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0] self.assertResult(result, 0, "Red", 1) self.assertResult(result, 1, "Blue", 1) self.assertResult(result, 2, "Green", 0) # remove one of the women from the group ladies.update_contacts(self.user, [self.c2], False) # get a new summary result = Value.get_value_summary(ruleset=color, filters=[dict(groups=[ladies.pk]), dict(ruleset=age.pk, categories="Adult")])[0] self.assertResult(result, 0, "Red", 1) self.assertResult(result, 1, "Blue", 0) self.assertResult(result, 2, "Green", 0) # ok, back in she goes ladies.update_contacts(self.user, [self.c2], True) # do another run for contact 1 run5 = self.run_color_gender_flow(self.c1, "blue", "male", "16") # totals should reflect the new value, not the old result = Value.get_value_summary(ruleset=color)[0] self.assertResult(result, 0, "Red", 1) self.assertResult(result, 1, "Blue", 2) self.assertResult(result, 2, "Green", 1) # what if we do a partial run? self.send_me
ssage(self.flow, "red", contact=self.c1, restart_participants=True) # should change our male/female breakdown since c1 now no longer has a gender result = Value.get_value_summary(ruleset=gender)[0] self.assertEquals(2, len(result['
categories'])) self.assertResult(result, 0, "Male", 1) self.assertResult(result, 1, "Female", 2) # back to a full flow run5 = self.run_color_gender_flow(self.c1, "blue", "male", "16") # ok, now segment by gender result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(ruleset=gender.pk, categories=["Male", "Female"])) male_result = result[0] self.assertResult(male_result, 0, "Red", 0) self.assertResult(male_result, 1, "Blue", 1) self.assertResult(male_result, 2, "Green", 1) female_result = result[1] self.assertResult(female_result, 0, "Red", 1) self.assertResult(female_result, 1, "Blue", 1) self.assertResult(female_result, 2, "Green", 0) # segment by gender again, but use the contact field to do so result = Value.get_value_summary(ruleset=color, filters=[], segment=dict(contact_field="Gender", values=["MALE", "Female"])) male_result = result[0] self.assertResult(male_result, 0, "Red", 0) self.assertResult(male_result, 1, "Blue", 1) self.assertResult(male_result, 2, "Green", 1) female_result = result[1] self.assertResult(female_result, 0, "Red", 1) self.assertResult(female_result, 1, "Blue", 1) self.assertResult(female_result, 2, "Green", 0) # add in a filter at the same time result = Value.get_value_summary(ruleset=color, filters=[dict(ruleset=color.pk, categories=["Blue"])], segment=dict(ruleset=gender.pk, categories=["Male", "Female"])) male_result = result[0] self.assertResult(male_result, 0, "Red", 0) self.assertResult(male_result, 1, "Blue", 1) self.assertResult(male_result, 2, "Green", 0) female_result = result[1] self.assertResult(female_result, 0, "Red", 0) self.assertResult(female_result, 1, "Blue", 1) self.assertResult(female_result, 2, "Green", 0) # ok, try segmenting by location instead result = Value.get_value_summary(ruleset=color, segment=dict(location="State")) eastern_result = result[0] self.assertEquals('171591', eastern_result['boundary']) self.assertEquals('Eastern Province', eastern_result['label']) self.assertResult(eastern_result, 0, "Red", 0) self.assertResult(eastern_result, 1, "Blue", 0) self.assertResult(eastern_result, 2, "Green", 0) kigali_result = result[1] self.assertEquals('1708283', kigali_result['boundary']) self.assertEquals('Kigali City', kigali_result['label']) self.assertResult(kigali_result, 0, "Red", 0) self.assertResult(kigali_result, 1, "Blue", 2) self.assertResult(kigali_result, 2, "Green", 0) # updating state location leads to updated data self.c2.set_field(self.user, 'state', "Eastern Province") result = Value.get_value_summary(ruleset=color, segment=dict(location="State")) eastern_result = result[0] self.assertEquals('171591', eastern_result['boundary']) self.assertEquals('Eastern Province', eastern_result['label']) self.assertResult(eastern_result, 0, "Red", 0) self.assertResult(eastern_result, 1, "Blue", 1) self.assertResult(eastern_result, 2, "Green", 0) kigali_result = result[1] self.assertEquals('1708283', kigali_result['boundary']) self.assertEquals('Kigali City', kigali_result['label']) self.assertResult(kigali_result, 0, "Red", 0) self.assertResult(kigali_result, 1, "Blue", 1) self.assertResult(kigali_result, 2, "Green", 0) # segment by district instead result = Value.get_value_summary(ruleset=color, segment=dict(parent="1708283", location="District")) # only on district in kigali self.assertEquals(1, len(result)) kigali_result = result[0] self.assertEquals('60485579', kigali_result['boundary']) self.assertEquals('Kigali', kigali_result['label']) self.assertResult(kigali_result, 0, "Red", 0) self.assertResult(kigali_result, 1, "Blue", 2) self.assertResult(kigali_result, 2, "Green", 0) # do a sanity check on our choropleth view self.login(self.admin) response = self.client.get(reverse('flows.ruleset_choropleth', args=
_box_tab"}) a= len(mp_boxes) for box in mp_boxes: if(box.text == "Total Lifetime Grosses"): div_content= box.findNext('div') trs = div_content.find_all('tr') for tr in trs: tds = tr.find_all('td') if len(tds) == 3: if tds[0].text.strip() == "Domestic:": arrData["Total Gross"] = tds[1].text.strip() arrData["% ofTotal"] = tds[2].text.strip() arrData[tds[0].text.strip()+"_Gross"] = tds[1].text.strip() arrData[tds[0].text.strip()+"_Percentage"] = tds[2].text.strip() if(box.text == "Domestic Summary"): div_content = box.findNext('div') DS_tables = div_content.find_all('table', attrs = { 'border': '0' , 'cellspacing':'0', 'cellpadding':'0'}) for DS_table in DS_tables: DS_trs = DS_table.find_all('tr') for DS_tr in DS_trs: DS_tr_title = DS_tr.td.text.strip() if(DS_tr_title == "Opening\xa0Weekend:") or (DS_tr_title == "Opening Weekend:"): DS_tr_content = DS_tr.td.findNext('td') if DS_tr_content: arrData["Opening Weekend"] = DS_tr_content.text.strip() arrData["OpeningWknd"] = DS_tr_content.text.strip() elif "(#" in DS_tr_title: arrData['Movie Ranking'] = DS_tr_title elif "%\xa0of\xa0Total\xa0Gross" in DS_tr_title or "% of Total Gross" in DS_tr_title: DS_tr_content = DS_tr.td.findNext('td') if DS_tr_content:
arrData['Percentage of Total Gross'] = DS_tr_content.text.strip()
elif DS_tr_title == "Widest\xa0Release:" or DS_tr_title == "Widest Release:": DS_tr_content = DS_tr.td.findNext('td') if DS_tr_content: arrData['Widest Release'] = DS_tr_content.text.strip() # 14. elif DS_tr_title == "Close\xa0Date:" or DS_tr_title == "Close Date:": DS_tr_content = DS_tr.td.findNext('td') if DS_tr_content: arrData['Close Date'] = DS_tr_content.text.strip() # 15. elif DS_tr_title == "In\xa0Release:" or DS_tr_title == "In Release:": DS_tr_content = DS_tr.td.findNext('td') if DS_tr_content: arrData['In Release'] = DS_tr_content.text.strip() # 15. if(box.text == "The Players"): #print(box.findNext('div')) pass return arrData def get_movie_foreign(link, arrData): try: eachCountry = {} ColumnHeaders= [] url = "http://www.boxofficemojo.com"+ link + "&page=intl" page = urlopen(url) soup = BeautifulSoup(page, "lxml") contents = soup.find('table', attrs={'border': '3' , 'cellspacing':'0', 'cellpadding':'5', 'align':'center', 'style':'margin-top: 5px;'}) if len(contents) == 1: #print(contents) intl_table = contents.tr.td.table if intl_table: trs = intl_table.find_all("tr") if len(trs) == 3: #print ("no data") temp= 0 else: for row,tr in enumerate(trs): if row == 0: tds= tr.find_all("td") # get each header's text for td in tds: header= td.text.strip() if "/" in header: divided_header = header.split('/') ColumnHeaders.append(divided_header[0]) ColumnHeaders.append(divided_header[1]) else: ColumnHeaders.append(td.text.strip()) if(row < 3): # don't save unncessary data continue tds= tr.find_all("td") for column, td in enumerate(tds): # 11. Country, 12.Dist, 13. Release Date, 14.OW, 15.% of Total, 16.Total gross, 17. as of eachCountry[ColumnHeaders[column]] = td.text.strip() save_to_file(FILE_PATH, arrData, eachCountry) #save_to_json(FILE_PATH, arrData, eachCountry) eachCountry.clear() return arrData except Exception as e: logging.exception(e) return arrData def get_movie_detail(movies_list, link, arrData): if link not in movies_list: movies_list.append(link) url = "http://www.boxofficemojo.com"+ link # 1. URL page = urlopen(url) soup = BeautifulSoup(page, "lxml") contents= soup.find('table', attrs={'border': '0' , 'cellspacing':'1', 'cellpadding':'4' , 'bgcolor':'#dcdcdc', 'width':'95%'}) tabledata = contents.find_all("td") name_table = soup.find('table', attrs={'border': '0' , 'cellspacing':'0', 'cellpadding':'0' , 'width':'100%', 'style':'padding-top: 5px;'}) name = name_table.font.b.getText() # 0. Name # 2. Distributor, 3. Release Date, 4. Genre, 5. Runtime, 6. Rating, 7. Budget, 8. TotalGross arrData['Name'] = name arrData['URL'] = url if len(tabledata) == 6: Distributor = tabledata[0].b.getText() ReleaseDate = tabledata[1].b.getText() Genre = tabledata[2].b.getText() Runtime = tabledata[3].b.getText() Rating = tabledata[4].b.getText() Budget = tabledata[5].b.getText() arrData['Distributor'] = Distributor arrData['ReleaseDate'] = ReleaseDate arrData['Genre'] = Genre arrData['Runtime'] = Runtime arrData['Rating'] = Rating arrData['Budget'] = Budget #arrData.extend([name , url , Distributor, ReleaseDate,Genre ,Runtime , Rating,Budget]) #add_empty_data(arrData, 1) # match gap for missing column elif len(tabledata) == 7: TotalGross = tabledata[0].b.getText() Distributor = tabledata[1].b.getText() ReleaseDate = tabledata[2].b.getText() Genre = tabledata[3].b.getText() Runtime = tabledata[4].b.getText() Rating = tabledata[5].b.getText() Budget = tabledata[6].b.getText() arrData['TotalGross'] = TotalGross arrData['Distributor'] = Distributor arrData['ReleaseDate'] = ReleaseDate arrData['Genre'] = Genre arrData['Runtime'] = Runtime arrData['Rating'] = Rating arrData['Budget'] = Budget #arrData.extend([ name , url , Distributor, ReleaseDate,Genre ,Runtime , Rating,Budget ,TotalGross]) #print (result) #print contents2[0] return arrData def get_all_movies(): # Alphabet loop for how movies are indexed including # movies that start with a special character or number index = ["NUM"] + list(string.ascii_uppercase) # List of movie urls movies_list = [] # dict data arrData = {} startTime = time.time() lapTime= 0.0 # if you want to jump directly to somewhere (Set None to be not skipped) JumpTo = 'S' IsJumpTarget = False JumpToPage = 8 write_header(FILE_PATH) logging.debug("running...start at : " + str(time.time())) # Loop through the pages for each letter for le
create the listCompleter function with a list to complete from. """ def listCompleter(text, state): line = readline.get_line_buffer() if not line: return [c + " " for c in ll][state] else: return [c + " " for c in ll if c.startswith(line)][state] self.listCompleter = listCompleter def ensure_json_value(value): if is_model(value): return dict(value) else: return value def ensure_json(value): if isinstance(value, (list, tuple)): return [ensure_json_value(w) for w in value] else: return ensure_json_value(value) class EditModel(object): def __init__(self, model_type, current_value, help_map): self.model_type = model_type self.current_value = current_value self.new_value = {} self.help_map = help_map def get_fields(self): required_details = OrderedDict() non_required_details = OrderedDict() for k, f in sorted(get_fields(self.model_type).iteritems()): if is_required(f): required_details[k] = f else: non_required_details[k] = f details = OrderedDict() for k, f in required_details.iteritems(): details[k] = f for k, f in non_required_details.iteritems(): details[k] = f return details def edit_field(self, field_name): new_field_value = self.ask_field(field_name) # field = get_fields(self.current_value).get(field_name) value = ensure_json(new_field_value) self.new_value[field_name] = value def ask_field(self, field_name): field_type = self.model_type.__dict__.get(field_name, None) if not field_type: print "No field of that name." new_value = ask_detail_for_field( field_name, field_type, None, self.help_map) if is_model(new_value): new_value = new_value.to_json() return new_value def print_current(self): fields = self.get_fields() table = [] i = 1 for k, v in fields.iteritems(): value = getattr(self.current_value, k, None) row = [k, convert_for_print(value)] table.append(row) i = i + 1 print tabulate(table) def print_new(self): print self.new_value def convert_value_to_print(value): f = getattr(value, 'to_json', None) if callable(f): value = value.to_json() return value def convert_for_print(value): if isinstance(value, (list, tuple)): if len(value) > 0: value = (convert_value_to_print(w) for w in value) value = "[" + ", ".join(value) + "]" else: value = "" else: value = convert_value_to_print(value) return value def get_type(model): if type(model) == fields.Integer or model == fields.Integer: return 'Integer' elif type(model) == fields.String or model == fields.String: return 'String' else: return model.__name__ def is_required(field): return next((True for x in field.validators if isinstance(x, Required)), False) def convert_to_proper_base_type(base_type, value): ''' Converts the string input in the appropriate value type. ''' if get_type(base_type) == 'Integer': return int(value) elif get_type(base_type) == 'String': return value elif get_type(base_type) == 'Boolean': return bool(value) else: return value def edit_details_for_type(model_type, old_object, help_map={}): ''' Asks for user input to change an existing model. ''' m = EditModel(model_type, old_object, help_map) print print "Current values:" print m.print_current() print selection = "xxx" print print "Caution: the new value will replace the old value, not be added to it." print while selection: selection = raw_input("field to edit ('enter' to finish): ") if selection: print
m.edit_field(selection) print return m.new_value def ask_details_for_type(model_type, ask_only_required=True, help_map={}): ''' Asks fo
r user input to create an object of a specified type. If the type is registered in a model/builder map, the function associated with this type is used to create the object instead of the auto-generated query. ''' if MODEL_MAP.get(model_type, None): func = MODEL_MAP[model_type] return func() required_details = OrderedDict() non_required_details = OrderedDict() values = {} for k, f in sorted(get_fields(model_type).iteritems()): if is_required(f): required_details[k] = f else: non_required_details[k] = f print print "Enter values for fields below. Enter '?' or '? arg1 [arg2]' for help for each field." print print "Required fields:" print "----------------" print for k, f in required_details.iteritems(): while True: value = ask_detail_for_field(k, f, ask_only_required, help_map) if value: values[k] = value break else: print print "This is a required field, please enter value for {}.".format(k) print if not ask_only_required: print print "Optional fields, press 'Enter' to ignore a field." print "-------------------------------------------------" print for k, f in non_required_details.iteritems(): value = ask_detail_for_field(k, f, ask_only_required, help_map) if value: values[k] = value print obj = model_type(**values) return obj def ask_collection_detail(name, detail_type, ask_only_required=True, help_map={}): result = [] print "Enter details for '{}', multiple entries possible, press enter to continue to next field.".format(name) print while True: cd = ask_detail_for_field( name, detail_type, ask_only_required, help_map) if not cd: break else: result.append(cd) return result def parse_for_help(answer, help_func): if answer.startswith('?'): args = answer.split(' ')[1:] if not help_func: print 'Sorry, no help available for this field.' else: print help_func(*args) print return True else: return False def ask_simple_field(name, field_type, help_map={}): type_name = get_type(field_type) answer = raw_input(" - {} ({}): ".format(name, type_name)) if not answer: return None if parse_for_help(answer, help_map.get(name, None)): return ask_simple_field(name, field_type, help_map) try: value = convert_to_proper_base_type(field_type, answer) except Exception as e: print "Can't convert input: ", e return ask_simple_field(name, field_type, help_map) return value def ask_detail_for_field(name, detail_type, ask_only_required=True, help_map={}): value = None if MODEL_MAP.get(type(detail_type), None): func = MODEL_MAP[type(detail_type)] value = func() return value # collections are a special case if type(detail_type) == booby.fields.Collection: # collection value = ask_collection_detail( name, detail_type.model, ask_only_required, help_map) elif is_model(detail_type): # collection, and model field value = ask_details_for_type(detail_type, ask_only_required, help_map) elif issubclass(type(detail_type), booby.fields.Field): # non-collection, and non-model field value = ask_simple_field(name, type(detail_type), help_map) elif issubclass(detail_type, booby.fields.Field): # collection, and non-model field value = ask_simple_field(name, detail_type, help_map) retu
import urllib2, json, time, sys from datetime import date, datetime from dateutil.rrule import rrule, DAILY from optparse import OptionParser parser = OptionParser() parser.add_option("-f", dest="fahrenheit", action
="store", default=False, type="string", help="C
onvert to FAHRENHEIT") parser.add_option("-e", dest="end", action="store", default=False, type="string", help="START date") parser.add_option("-s", dest="start", action="store", default=False, type="string", help="END date") parser.add_option("-t", dest="token", action="store", default=False, type="string", help="Weather Underground TOKEN") (options, args) = parser.parse_args() if options.token: token = options.token else: parser.print_help() sys.exit() if options.start: start = options.start else: parser.print_help() sys.exit() if options.end: end = options.end else: parser.print_help() sys.exit() if options.fahrenheit: fahrenheit = True else: fahrenheit = False start = datetime.strptime(start,'%Y-%m-%d') end = datetime.strptime(end,'%Y-%m-%d') url = "" if end < start: print "Error: end date " + str(end) + " occurs before start date " + str(start) sys.exit() for dt in rrule(DAILY, dtstart=start, until=end): total = 0.0 temp = 0.0 count = 0 wunderground_url ="http://api.wunderground.com/api/" + token + "/history_" + dt.strftime("%Y%m%d") +"/q/NY/New_York_City.json" try: url = urllib2.urlopen(wunderground_url) parsed_json = json.loads(url.read()) except: print "Error reading URL " + wunderground_url print "Is your token correct?" url.close() sys.exit() try: for mean in parsed_json['history']['observations']: if fahrenheit: total += float(mean['tempi']) else: total += float(mean['tempm']) count += 1 temp = (total / count) print dt.strftime("%Y-%m-%d") + "," + str(temp) except: print "Error retrieving temperature records for start date " + str(start) + " end date " + str(end) url.close() time.sleep(10)
import logging def
init_logger(): formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]') logger = logging.getLogger('redberry') logger.setLevel(logging.DEBUG) console = logging.StreamHandler() console.setFormatter(formatter) logger.addHan
dler(console)
from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch from vine import promise from amqp.abstract_channel import AbstractChannel from amqp.exceptions import AMQPNotImplementedError, RecoverableConnectionError from amqp.serialization import dumps class test_AbstractChannel: class Channel(AbstractChannel): def _setup_listeners(self): pass @pytest.fixture(autouse=True) def setup_conn(self): self.conn = Mock(name='connection') self.conn.channels = {} self.channel_id = 1 self.c = self.Channel(self.conn, self.channel_id) self.method = Mock(name='method') self.content = Mock(name='content') self.content.content_encoding = 'utf-8' self.c._METHODS = {(50, 61): self.method} def test_enter_exit(self): self.c.close = Mock(name='close') with self.c: pass self.c.close.assert_called_with() def test_send_method(self): self.c.send_method((50, 60), 'iB', (30, 0)) self.conn.frame_writer.assert_called_with( 1, self.channel_id, (50, 60), dumps('iB', (30, 0)), None, ) def test_send_method__callback(self): callback = Mock(name='callback') p = promise(callback) self.c.send_method((50, 60), 'iB', (30, 0), callback=p) callback.assert_called_with() def test_send_method__wait(self): self.c.wait = Mock(name='wait') self.c.send_method((50, 60), 'iB', (30, 0), wait=(50, 61)) self.c.wait.assert_called_with((50, 61), returns_tuple=False) def test_send_method__no_connection(self): self.c.connection = None with pytest.raises(RecoverableConnectionError): self.c.send_method((50, 60)) def test_send_method__connection_dropped(self): self.c.connection.frame_writer.side_effect = StopIteration with pytest.raises(RecoverableConnectionError): self.c.send_method((50, 60)) def test_close(self): with pytest.raises(NotImplementedError): self.c.close() def test_wait(self): with patch('amqp.abstract_channel.ensure_promise') as ensure_promise: p = ensure_promise.return_value p.ready = False def on_drain(*args, **kwargs): p.ready = True self.conn.drain_events.side_effect = on_drain p.value = (1,), {'arg': 2} self.c.wait((50, 61), timeout=1) self.conn.drain_events.assert_called_with(timeout=1) prev = self.c._pending[(50, 61)] = Mock(name='p2') p.value = None self.c.wait([(50, 61)]) assert self.c._pending[(50, 61)] is prev def test_dispatch_method__content_encoding(self): self.c.auto_decode = True self.method.args = None self.c.dispatch_method((50, 61), 'payload', s
elf.content) self.content.body.decode.side_effect = KeyError() self.c.dispatch_method((50, 61), 'payload', self.content) def test_dispatch_method__unknown_method(self): with pytest.raises(AMQPNotImplementedError): self.c.
dispatch_method((100, 131), 'payload', self.content) def test_dispatch_method__one_shot(self): self.method.args = None p = self.c._pending[(50, 61)] = Mock(name='oneshot') self.c.dispatch_method((50, 61), 'payload', self.content) p.assert_called_with((50, 61), self.content) def test_dispatch_method__one_shot_no_content(self): self.method.args = None self.method.content = None p = self.c._pending[(50, 61)] = Mock(name='oneshot') self.c.dispatch_method((50, 61), 'payload', self.content) p.assert_called_with((50, 61)) assert not self.c._pending def test_dispatch_method__listeners(self): with patch('amqp.abstract_channel.loads') as loads: loads.return_value = [1, 2, 3], 'foo' p = self.c._callbacks[(50, 61)] = Mock(name='p') self.c.dispatch_method((50, 61), 'payload', self.content) p.assert_called_with(1, 2, 3, self.content) def test_dispatch_method__listeners_and_one_shot(self): with patch('amqp.abstract_channel.loads') as loads: loads.return_value = [1, 2, 3], 'foo' p1 = self.c._callbacks[(50, 61)] = Mock(name='p') p2 = self.c._pending[(50, 61)] = Mock(name='oneshot') self.c.dispatch_method((50, 61), 'payload', self.content) p1.assert_called_with(1, 2, 3, self.content) p2.assert_called_with((50, 61), 1, 2, 3, self.content) assert not self.c._pending assert self.c._callbacks[(50, 61)]
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2018-01-24 07:34 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): depende
ncies = [ ('appauth', '0016_userprofile_numq'), ] operations = [ migrations.AddField( model_name='userprofile', name='exp_data', field=models.TextField(default='{}'), ),
]
#!/usr/bin/python # vim: set expandtab tabstop=4 shiftwidth=4: # -*- coding: utf-8 -*- # gen_cacert <http://rhizomatik.net/> # Python functions for generate a X509 CA certificate # # Copyright (C) 2010 duy at rhizomatik dot net # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. """ gen_cacert Python functions for generate a X509 CA certificate. Usage: execute ./gen_cacert -h @author: duy @organization: rhizomatik labs @copyright: author @license: GNU GPL version 3 or any later version (details at http://www.gnu.org) @contact: duy at rhizomatik dot net @dependencies: python (>= version 2.5) @change log: @TODO: * Get error/warning when some of the main parameters have space and th at and the nexts get ignored * Add paramter for certificate serial path """ __app__ = "gen_cacert" __author__ = "duy" __version__ = "0.1" __copyright__ = "Copyright (c) 2010 duy" __date__ = "2010/03/01" __license__ = " GNU GPL version 3 or any later version (details at http://www.gnu.org)" __credits__ = "" from xmpp_foaf_cert import * import sys import getopt DEBUG = True ## ---------------------------------------------------------------------- ## administrative functions ## ---------------------------------------------------------------------- def _usage(): print "Usage: %s options" % __app__ print """ Options: -h, --help Print this usage message. -d, --debug -p, --certificate-path CA certificate path -k, --certificate-key-path CA private key path -n, --commmonname certificate commonName -c, --country certificate countryName
-o, --organization certificate organizationName -u, --organizationalunit certificate organizationalUnitNam -e, --email certificate emailAddress """ def _version(): """ Display a formatted version string for the module """ print """%(__app__)s %(__version__)s %(__copyright__)s released %(__date__)s Thanks to: %(__credits__)s""" % globals() def mai
n(argv): """ Create an x509 CA certificate and save it as PEM file @param CN: certificate commonName @param C: certificate countryName @param O: certificate organizationName @param OU: certificate organizationalUnitName @param Email: certificate emailAddress @type CN: string @type C: string @type O: string @type OU: string @type Email: string @param cacert_path: CA certificate path @param cakey_path: CA private key path @type cacert_path: string @type cakey_path: string """ short_opts = "hdp:k:n:c:o:u:e:" long_opts = ["help","debug", "certificate-path=","certificate-key-path=","commonname=","country=","organization=","organizationalunit=","email="] try: opts, args = getopt.getopt(argv, short_opts, long_opts) except getopt.GetoptError: print "The CA certificate will be created with default values" # _usage() # sys.exit(0) # Example default values CN = "CA Certificate" C = "CR" O="Rhizomatik Labs" OU="Mycelia project" Email="ca@rhizomatik.net" cacert_path='/tmp/xmpp_foaf_cacert.pem' cakey_path='/tmp/xmpp_foaf_cakey.key' for opt, arg in opts: if opt in ("-h", "--help"): _usage() sys.exit(0) elif opt in ("-p","--certificate-path"): cacert_path = arg elif opt in ("-k","--certificate-key-path"): cakey_path = arg elif opt in ("-n","--commmonname"): CN = arg elif opt in ("-c","--country"): C = arg elif opt in ("-o","--organization"): O = arg elif opt in ("-u","--organizationalunit"): OU = arg elif opt in ("-e","--email"): Email = arg if DEBUG: print "CN: "+CN print "C: "+C print "O: "+O print "OU: "+OU print "Email: "+Email mkcacert_save(cacert_path, cakey_path, CN, C, O, OU, Email) if __name__ == "__main__": main(sys.argv[1:])
import six from unittest import TestCase from dark.reads import Read, Reads from dark.score import HigherIsBetterScore from dark.hsp import HSP, LSP from dark.alignments import ( Alignment, bestAlignment, ReadAlignments, ReadsAlignmentsParams, ReadsAlignments) class TestAlignment(TestCase): """ Tests for the dark.alignment.Alignment class """ def testExpectedAtt
rs(self): """ An alignment must have the expected attributes. """ alignment = Alignment(45, 'title') self.assertEqual('title', alignment.subjectTitle) self.assertEqual(45, alignment.subjectLength) def testNoHspsWhenCreated(self): """ An alignment must have no HSPs when it is created. """ alignment = Alignment(45, 'title') self.assertEqual(0, len(alignmen
t.hsps)) def testAddHsp(self): """ It must be possible to add an HSP to an alignment. """ alignment = Alignment(45, 'title') alignment.addHsp(HSP(3)) self.assertEqual(HSP(3), alignment.hsps[0]) class TestReadAlignments(TestCase): """ Tests for the dark.alignment.ReadAlignments class """ def testRead(self): """ An read alignments must store its read. """ read = Read('id', 'ACGT') readAlignments = ReadAlignments(read) self.assertEqual(read, readAlignments.read) def testNoAlignments(self): """ An read alignments must be able to have no alignments. """ read = Read('id', 'ACGT') readAlignments = ReadAlignments(read) self.assertEqual(0, len(readAlignments)) def testAlignments(self): """ An read alignments must store its alignments. """ read = Read('id', 'ACGT') alignment1 = Alignment(45, 'title1') alignment2 = Alignment(55, 'title2') readAlignments = ReadAlignments(read, [alignment1, alignment2]) self.assertEqual([alignment1, alignment2], readAlignments) class TestBestAlignmentHSP(TestCase): """ Test the L{dark.hits.bestAlignment} function when HSPs are used. """ def testOneAlignment(self): """ When one alignment is present that alignment must be returned by bestAlignment. """ alignment = Alignment(44, 'Seq 1') alignment.addHsp(HSP(10)) alignment.addHsp(HSP(9)) alignments = [alignment] hit = ReadAlignments(Read('id1', 'aaa'), alignments) best = bestAlignment(hit) self.assertEqual('Seq 1', best.subjectTitle) self.assertEqual(44, best.subjectLength) def testThreeAlignments(self): """ When three alignments are present, the one with the highest first HSP must be returned by bestAlignment. """ alignment1 = Alignment(33, 'Seq 1') alignment1.addHsp(HSP(10)) alignment1.addHsp(HSP(9)) alignment2 = Alignment(44, 'Seq 2') alignment2.addHsp(HSP(30)) alignment2.addHsp(HSP(29)) alignment3 = Alignment(55, 'Seq 3') alignment3.addHsp(HSP(20)) alignment3.addHsp(HSP(19)) alignments = [alignment1, alignment2, alignment3] hit = ReadAlignments(Read('id1', 'aaa'), alignments) best = bestAlignment(hit) self.assertEqual('Seq 2', best.subjectTitle) self.assertEqual(44, best.subjectLength) class TestBestAlignmentLSP(TestCase): """ Test the L{dark.hits.bestAlignment} function when LSPs are used. """ def testOneAlignment(self): """ When one alignment is present that alignment must be returned by bestAlignment. """ alignment = Alignment(44, 'Seq 1') alignment.addHsp(LSP(10)) alignment.addHsp(LSP(9)) alignments = [alignment] readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments) best = bestAlignment(readAlignments) self.assertEqual('Seq 1', best.subjectTitle) self.assertEqual(44, best.subjectLength) def testThreeAlignments(self): """ When three alignments are present, the one with the lowest first HSP must be returned by bestAlignment. """ alignment1 = Alignment(33, 'Seq 1') alignment1.addHsp(LSP(10)) alignment1.addHsp(LSP(9)) alignment2 = Alignment(44, 'Seq 2') alignment2.addHsp(LSP(3)) alignment2.addHsp(LSP(2)) alignment3 = Alignment(55, 'Seq 3') alignment3.addHsp(LSP(20)) alignment3.addHsp(LSP(19)) alignments = [alignment1, alignment2, alignment3] readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments) best = bestAlignment(readAlignments) self.assertEqual('Seq 2', best.subjectTitle) self.assertEqual(44, best.subjectLength) class TestReadsAlignmentsParams(TestCase): """ Test the L{dark.alignments.ReadsAlignmentsParams} class. """ def testExpectedAttrs(self): """ A ReadsAlignmentsParams instance must have the expected attributes. """ applicationParams = {} params = ReadsAlignmentsParams('application name', applicationParams, False, 'Bit score') self.assertEqual('application name', params.application) self.assertIs(applicationParams, params.applicationParams) self.assertFalse(params.subjectIsNucleotides) self.assertEqual('Bit score', params.scoreTitle) class TestReadsAlignments(TestCase): """ Test the L{dark.alignments.ReadsAlignments} class. """ # NOTE: The ReadsAlignments class is a base class for concrete # implementations, such as BlastReadsAlignments. So it can only be # tested minimally by itself. For full tests see the # TestBlastReadsAlignments and TestBlastReadsAlignmentsFiltering # classes in test/blast/blast_alignments.py def testExpectedAttrs(self): """ A ReadsAlignments instance must have the expected attributes. """ reads = Reads() params = { 'application': 'app name' } readsAlignments = ReadsAlignments(reads, params) self.assertIs(readsAlignments.reads, reads) self.assertEqual('app name', readsAlignments.params['application']) self.assertIs(params, readsAlignments.params) self.assertIs(HigherIsBetterScore, readsAlignments.scoreClass) def testNotIterable(self): """ Iterating an empty ReadsAlignments must result in the empty list. """ reads = Reads() readsAlignments = ReadsAlignments(reads, 'applicationName', None) self.assertEqual([], list(readsAlignments)) def testGetSubjectSequence(self): """ A ReadsAlignments instance will not implement getSubjectSequence. Subclasses are expected to implement it. """ reads = Reads() readsAlignments = ReadsAlignments(reads, 'applicationName', None) error = 'getSubjectSequence must be implemented by a subclass' six.assertRaisesRegex(self, NotImplementedError, error, readsAlignments.getSubjectSequence, 'title')
ode, bbox def findBlobs(self, img): rects= [] cnts= self.findContours(img) for c in cnts: c= c.reshape(-1, 2) if len(c) < 4: continue arcl= cv2.arcLength(c, True) approx= cv2.approxPolyDP(c, 0.02 * arcl, True) approx= approx.reshape(-1, 2) rect= cv2.minAreaRect(approx) w, h= rect[1] if len(approx) >= 4: if (h > 0) and (w > h): ratio = float(w) / h if 2.4 < ratio < 4.2: rects.append(rect) return rects def ocr(self, rect): ang= rect[2] w,h= rect[1] if ang < -45: ang= ang + 90 w= h h= rect[1][0] box= cv2.boxPoints(rect) box= np.int0(box) box= self.warp.order_points(box) letters= [] code= [] try: roic= self.warp.transform(self.edged, box) roi= self.warp.transform(self.pre, box) roi_orig= self.warp.transform(self.original_image, box) except: pass print "some error" return code (roich, roicw)= roic.shape[:2] nh= 143 if roich > 200: nw= (roicw * nh)/roich roi= cv2.resize(roi,(nw, nh), interpolation= cv2.INTER_LINEAR) roic= cv2.resize(roic,(nw, nh), interpolation= cv2.INTER_LINEAR) #~ self.do_skeleton(roi) image_rect= self.prepare_for_ocr(roi) image_rect2= image_rect.copy() if self.vlogger: self.vlogger.debug(VisualRecord("candidate", [image_rect], fmt = "jpg")) i, cnts, hie_letters= cv2.findContours(image_rect, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if self.vlogger: self.vlogger.debug(VisualRecord("candidate after contours", [cv2.drawContours(roi_orig,cnts,-1,(0,255,0),1)], fmt = "jpg")) h= roic.shape[0] filtered_cnts= [] for i,b in enumerate(cnts): hie_let= hie_letters[0][i] # [next, previous, first_child, parent] if hie_let[3] == -1: # if contour has no parent then continue with next continue c = b.reshape(-1,2) if len(b) < 3: # ?? continue r= cv2.boundingRect(c) # pantentes.txt - las letras miden 3.2cm y la patente completa 29.4cm if r[2] < (image_rect.shape[1] / 10): continue ratio= float(r[3]) / r[2] if not 1.5 <= ratio <= 2.5: continue letters.append(r) filtered_cnts.append(b) if len(letters) >= 4: for p in enumerate(sorted(letters, key= lambda b:b[0])): code.append(self._do_ocr(image_rect2, p[1], p[0])) if self.vlogger: self.vlogger.debug(VisualRecord("LETTER DETECTION", [cv2.drawContours(image_rect2,filtered_cnts,-1,(0,255,0),1)], fmt = "jpg")) return code def _do_ocr(self, img, b, i): x,y,w,h = b l = cv2.copyMakeBorder( img[y:y+h, x:x+w], 5, 5, 5, 5, cv2.BORDER_CONSTANT, value=255) if i > 2: return self.ocr_engine.read_digit(l) return self.ocr_engine.read_text(l) def findContours(self, img): imgcopy= img.copy() if self.bnight: i= self.prepare_night(img) else: i= self.prepare_day(img) _,cnts, hie = cv2.findContours(i, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if self.vlogger: if self.bnight: self.vlogger.debug(VisualRecord("contours", [cv2.drawContours(imgcopy,cnts,-1, (80,255,80),2),i], fmt = "jpg")) else: self.vlogger.debug(VisualRecord("contours", [cv2.drawContours(imgcopy,cnts,-1, (255,120,120),2),i], fmt = "jpg")) return cnts #################################################################################################### def prepare_night(self, img): tinit= timer() self.original_image= img gray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gauss_gray= cv2.GaussianBlur(gray, (5, 5), 0) max_gray= np.max(gray) std_gray= np.std(gray) saturated_night= np.uint8(( gray > ( max_gray - 2 * std_gray )) * 255) # argentina self.pre= gauss_gray self.edged= cv2.Canny(saturated_night, 10, 200, apertureSize= 5) if self.vlogger: self.vlogger.debug(VisualRecord("thresholding > (max - 2 * std)", [saturated_night], fmt = "jpg")) print "e:%.3f"%(timer()-tinit) return self.edged #################################################################################################### def prepare_day(self, img): self.original_image= img gray= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gauss_gray= cv2.GaussianBlur(gray, (5, 5), 0) self.pre= gauss_gray self.edged= cv2.Canny(gauss_gray, 1000, 1700, apertureSize= 5) if self.vlogger: self.vlogger.debug(VisualRecord("day prepare", [self.pre, self.edged], fmt = "jpg")) return self.edged #################################################################################################### def angle_cos(self, p0, p1, p2): d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float') return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) def prepare_for_ocr(self, img, scale=True): kern= cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)) # http://docs.opencv.org/master/d5/daf/tutorial_py_histogram_equalization.html#gsc.tab=0 clahe= cv2.createCLAHE(clipLimit=2.0, tileGridSize=(5,5)) ims= clahe.apply(img) ret,th= cv2.threshold(ims, 150, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) th1= cv2.morphologyEx(th, cv2.MORPH_CLOSE, kern) th2= self.create_rect(th1) if self.vlogger: self.vlogger.debug(VisualRecord("prepare_for_ocr", [img, ims, th], fmt = "jpg")) return th2 def create_rect(self, img): dims= img.shape imgcop= img.copy() imgcop[0:4,0:dims[1]]= 255 imgcop[dims[0]-2:dims[0],0:dims[1]]= 255 if self.vlogger: self.vlogger.debug(VisualRecord("CREATE RECT", [imgcop], fmt = "jpg")) return imgcop if __name__ == "__main__": logger.setLevel(logging.DEBUG) ch = StreamHandler() ch.setLevel(logging.INFO) logger.addHandler(ch) p = os.path.dirname(os.path.abspath(__file__)) p = os.path.dirname(os.path.dirname(p)) fh = FileHandler(p+"/log/debug.html", mode = "w") fh.setLevel(logging.DEBUG) logger.addHandler(fh) if len(sys.argv) >= 2: path = sys.argv[1] else: path = '/home/queimadas/patchcap/samples/images/ehy435.jpg' path = '/home/queimadas/patchcap/samples/images/ehy435.jpg' s = timer() f = PlateDetector.Instance() f.set_logger(logger) if os.path.exists(path): img = cv2.imread(path) else: print "file d
oes not exists..." exit() logger.debug(VisualRecord("letters", [img], fmt = "jpg")) txt = f.first(img) e = timer() logger.debug('tiempo de exe %s', (e-s)) print txt print 'tiempo de exe %s', (e-s) #~ def prepare_laplace_highpass(self, img, scale=True): #~ tinit= timer
() #~ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #~ self.gauss_gray = cv2.GaussianBlur(gray, (5, 5), 0) #~ self.lap_gray = cv2.Laplacian(self.gauss_gray, cv2.CV_8U) #~ min_val= np.min(self.gauss_gray) #~ max_val= np.max(self.gauss_gray) #~ dif= max_val - min_val #~ step= dif / 10 #~ print min_val, max_val, dif, step #~ self.edged_gauss = cv2.Canny(self.gauss_gray, min_val + 6*step, max_val - step, apertureSize= 5) #~ self.edged_lap = cv2.Canny(self.lap_gray, np.min(self.lap_gray) , np.max(self.lap_gray), apertureSize=
__all__ = ["wordlists", "roles", "bnc", "processes", "verbs", "uktous", "tagtoclass", "queries", "mergetags"] from corpkit.dictionaries.bnc import _get_bnc from corpkit.dictionaries.process_types import processes from corpkit.dictionaries.process_types import verbs from corpkit.dictionaries.roles import ro
les from corpkit.dictionaries.wordlists import wordlists from corpkit.dictionaries.queries import queries from corpkit.dictionaries.word_transforms import taglemma from corpkit.dictionaries.word_transforms import mergetags from corpkit.dictionaries.word_transforms import usa_convert roles = roles wordlists = wordlists processes = processes bnc = _get_bnc queries = queries tagtoclass = taglemma uktous = usa_convert mergetags = mergetags v
erbs = verbs
from django.contrib import admin from simulation.models import SimulationStage, SimulationStageMatch, SimulationStageMatchResult class SimulationStageAdmin(admin.ModelAdmin): list_display = ["number", "created_at"] list_filter =
["created_at"] class SimulationStageMatchAdmin(admin.Mo
delAdmin): list_display = ["stage", "order", "raund", "cat", "rat", "won", "created_at"] list_filter = ["stage", "created_at"] search_fields = ["cat", "rat"] readonly_fields = ["won", "cat_password", "rat_password", "system_password"] class SimulationStageMatchResultAdmin(admin.ModelAdmin): list_display = ["match", "is_caught", "distance", "is_cancelled", "created_at"] list_filter = ["created_at"] search_fields = ["match"] admin.site.register(SimulationStage, SimulationStageAdmin) admin.site.register(SimulationStageMatch, SimulationStageMatchAdmin) admin.site.register(SimulationStageMatchResult, SimulationStageMatchResultAdmin)
#!/usr/bin/env python # -*- coding: utf-8 -*- import matplotlib.pyplot as plt import scipy as sp #Nome do arquivo em que está os dados da posição arq = 'CurvaGiro/pos.dat' #Limites dos eixos v = [-10,1000, 0, 1000] #Título eixo x xl = r'y metros' #Título do eixo y yl = r'x
metros' x = sp.genfromtxt('CurvaGiro/pos.dat') a = plt.plot(x[:,2], x[:,1], 'k-') plt.grid(True, 'both', color = '0
.8', linestyle = '--', linewidth = 1) plt.axis(v) plt.xlabel(xl) plt.ylabel(yl) plt.show(a)
#!/usr/bin/env python3 import os import re import itertools from functools import reduce from .version import __version__ sep_regex = re.compile(r'[ \-_~!@#%$^&*\(\)\[\]\{\}/\:;"|,./?`]') def get_portable_filename(filename): path, _ = os.path.split(__file__) filename = os.path.join(path, filename) return filename def load_conversion_file(filename): filename = get_portable_filename(filename) with open(filename, encoding='utf-8') as f: l = list(f) l = [i for i in l if i.strip()] l = [i.strip().split() for i in l] return {i[0]: i[1:] for i in l} print('Loading converters...') beginning = load_conversion_file('f2p-beginning.txt') middle = load_conversion_file('f2p-middle.txt') ending = load_conversion_file('f2p-ending.txt') print('Loading persian word list...') with open(get_portable_filename('persian-word-freq.txt'), encoding='utf-8') as f: word_freq = list(f) word_freq = [i.strip() for i in word_freq if i.strip()] word_freq = [i.split() for i in word_freq if not i.startswith('#')] word_freq = {i[0]: int(i[1]) for i in word_freq} print('Loading dictionary...') with open(get_portable_filename('f2p-dict.txt'), encoding='utf-8') as f: dictionary = [i.strip().split(' ', 1) for i in f if i.strip()] dictionary = {k.strip(): v.strip() for k, v in dictionary} def f2p_word_internal(word, original_word): # this function receives the word as separate letters persian = [] for i, letter in enumerate(word): if i == 0: converter = beginning elif i == len(word) - 1: converter = ending else: converter = middle conversions = converter.get(letter) if conversions == None: return [(''.join(original_word), 0.0)] else: conversions = ['' if i == 'nothing' else i for i in conversions] persian.append(conversions) alternatives = itertools.product(*persian) alternatives = [''.join(i) for i in alternatives] alternatives = [(i, word_freq[i]) if i in word_freq else (i, 0) for i in alternatives] if len(alternatives) > 0: max_freq = max(freq for _, freq in alternatives) alternatives = [(w, float(freq / max_freq)) if freq != 0 else (w, 0.0) for w, freq in alternatives] else: alternatives = [(''.join(word), 1.0)] return alternatives def variations(word): """Create variations of the word based on letter combinations like oo, sh, etc.""" if word == 'a': return [['A']] elif len(word) == 1: return [[word[0]]] elif word == 'aa': return [['A']] elif word == 'ee': return [['i']] elif word == 'ei': return [['ei']] elif word in ['oo', 'ou']: return [['u']] elif word == 'kha': return [['kha'], ['kh', 'a']] elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return [[word]] elif word in ["'ee", "'ei"]: return [["'i"]] elif word in ["'oo", "'ou"]: return [["'u"]] elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[0] + "'"]] elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [["'"
+ word[1]]] elif len(word) == 2 and word[0] == word[1]: return [[word[0]]]
if word[:2] == 'aa': return [['A'] + i for i in variations(word[2:])] elif word[:2] == 'ee': return [['i'] + i for i in variations(word[2:])] elif word[:2] in ['oo', 'ou']: return [['u'] + i for i in variations(word[2:])] elif word[:3] == 'kha': return \ [['kha'] + i for i in variations(word[3:])] + \ [['kh', 'a'] + i for i in variations(word[3:])] + \ [['k', 'h', 'a'] + i for i in variations(word[3:])] elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return \ [[word[:2]] + i for i in variations(word[2:])] + \ [[word[0]] + i for i in variations(word[1:])] elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[:2]] + i for i in variations(word[2:])] elif word[:3] in ["'ee", "'ei"]: return [["'i"] + i for i in variations(word[3:])] elif word[:3] in ["'oo", "'ou"]: return [["'u"] + i for i in variations(word[3:])] elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [[word[:2]] + i for i in variations(word[2:])] elif len(word) >= 2 and word[0] == word[1]: return [[word[0]] + i for i in variations(word[2:])] else: return [[word[0]] + i for i in variations(word[1:])] def f2p_word(word, max_word_size=15, cutoff=3): """Convert a single word from Finglish to Persian. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. """ original_word = word word = word.lower() c = dictionary.get(word) if c: return [(c, 1.0)] if word == '': return [] elif len(word) > max_word_size: return [(original_word, 1.0)] results = [] for w in variations(word): results.extend(f2p_word_internal(w, original_word)) # sort results based on the confidence value results.sort(key=lambda r: r[1], reverse=True) # return the top three results in order to cut down on the number # of possibilities. return results[:cutoff] def f2p_list(phrase, max_word_size=15, cutoff=3): """Convert a phrase from Finglish to Persian. phrase: The phrase to convert. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. Returns a list of lists, each sub-list contains a number of possibilities for each word as a pair of (word, confidence) values. """ # split the phrase into words results = [w for w in sep_regex.split(phrase) if w] # return an empty list if no words if results == []: return [] # convert each word separately results = [f2p_word(w, max_word_size, cutoff) for w in results] return results def f2p(phrase, max_word_size=15, cutoff=3): """Convert a Finglish phrase to the most probable Persian phrase. """ results = f2p_list(phrase, max_word_size, cutoff) return ' '.join(i[0][0] for i in results) def main(): print('Finglish to Persian Converter, v{}'.format(__version__)) print('finglish: ', end='') phrase = input() result = f2p(phrase) print(result) if __name__ == '__main__': main()
put' function. # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3) _sys_raw_input = Any() _sys_eval_input = Any() def __init__(self, **kwargs): super(IPythonKernel, self).__init__(**kwargs) # Initialize the InteractiveShell subclass self.shell = self.shell_class.instance(parent=self, profile_dir=self.profile_dir, user_module=self.user_module, user_ns=self.user_ns, kernel=self, ) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('execute_result') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket self.shell.data_pub.session = self.session self.shell.data_pub.pub_socket = self.iopub_socket # TMP - hack while developing self.shell._reply_content = None self.comm_manager = CommManager(shell=self.shell, parent=self, kernel=self) self.comm_manager.register_target( 'ipython.widget', Widget.handle_comm_opened) self.shell.configurables.append(self.comm_manager) comm_msg_types = ['comm_open', 'comm_msg', 'comm_close'] for msg_type in comm_msg_types: self.shell_handlers[msg_type] = getattr( self.comm_manager, msg_type) # Kernel info fields implementation = 'ipython' implementation_version =
release.version
language_info = { 'name': 'python', 'version': sys.version.split()[0], 'mimetype': 'text/x-python', 'codemirror_mode': {'name': 'ipython', 'version': sys.version_info[0]}, 'pygments_lexer': 'ipython%d' % (3 if PY3 else 2), 'nbconvert_exporter': 'python', 'file_extension': '.py' } @property def banner(self): return self.shell.banner def start(self): self.shell.exit_now = False super(IPythonKernel, self).start() def set_parent(self, ident, parent): """Overridden from parent to tell the display hook and output streams about the parent message. """ super(IPythonKernel, self).set_parent(ident, parent) self.shell.set_parent(parent) def _forward_input(self, allow_stdin=False): """Forward raw_input and getpass to the current frontend. via input_request """ self._allow_stdin = allow_stdin if PY3: self._sys_raw_input = builtin_mod.input builtin_mod.input = self.raw_input else: self._sys_raw_input = builtin_mod.raw_input self._sys_eval_input = builtin_mod.input builtin_mod.raw_input = self.raw_input builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt)) self._save_getpass = getpass.getpass getpass.getpass = self.getpass def _restore_input(self): """Restore raw_input, getpass""" if PY3: builtin_mod.input = self._sys_raw_input else: builtin_mod.raw_input = self._sys_raw_input builtin_mod.input = self._sys_eval_input getpass.getpass = self._save_getpass @property def execution_count(self): return self.shell.execution_count @execution_count.setter def execution_count(self, value): # Ignore the incrememnting done by KernelBase, in favour of our shell's # execution counter. pass def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): shell = self.shell # we'll need this a lot here self._forward_input(allow_stdin) reply_content = {} # FIXME: the shell calls the exception handler itself. shell._reply_content = None try: shell.run_cell(code, store_history=store_history, silent=silent) except: status = u'error' # FIXME: this code right now isn't being used yet by default, # because the run_cell() call above directly fires off exception # reporting. This code, therefore, is only active in the scenario # where runlines itself has an unhandled exception. We need to # uniformize this, for all exception construction to come from a # single location in the codbase. etype, evalue, tb = sys.exc_info() tb_list = traceback.format_exception(etype, evalue, tb) reply_content.update(shell._showtraceback(etype, evalue, tb_list)) else: status = u'ok' finally: self._restore_input() reply_content[u'status'] = status # Return the execution counter so clients can display prompts reply_content['execution_count'] = shell.execution_count - 1 # FIXME - fish exception info out of shell, possibly left there by # runlines. We'll need to clean up this logic later. if shell._reply_content is not None: reply_content.update(shell._reply_content) e_info = dict( engine_uuid=self.ident, engine_id=self.int_id, method='execute') reply_content['engine_info'] = e_info # reset after use shell._reply_content = None if 'traceback' in reply_content: self.log.info( "Exception in execute request:\n%s", '\n'.join(reply_content['traceback'])) # At this point, we can tell whether the main code execution succeeded # or not. If it did, we proceed to evaluate user_expressions if reply_content['status'] == 'ok': reply_content[u'user_expressions'] = \ shell.user_expressions(user_expressions or {}) else: # If there was an error, don't even try to compute expressions reply_content[u'user_expressions'] = {} # Payloads should be retrieved regardless of outcome, so we can both # recover partial output (that could have been generated early in a # block, before an error) and clear the payload system always. reply_content[u'payload'] = shell.payload_manager.read_payload() # Be agressive about clearing the payload because we don't want # it to sit in memory until the next execute_request comes in. shell.payload_manager.clear_payload() return reply_content def do_complete(self, code, cursor_pos): # FIXME: IPython completers currently assume single line, # but completion messages give multi-line context # For now, extract line from cell, based on cursor_pos: if cursor_pos is None: cursor_pos = len(code) line, offset = line_at_cursor(code, cursor_pos) line_cursor = cursor_pos - offset txt, matches = self.shell.complete('', line, line_cursor) return {'matches': matches, 'cursor_end': cursor_pos, 'cursor_start': cursor_pos - len(txt), 'metadata': {}, 'status': 'ok'} def do_inspect(self, code, cursor_pos, detail_level=0): name = token_at_cursor(code, cursor_pos) info = self.shell.object_inspect(name) reply_content = {'status': 'ok'} reply_content['data'] = data = {} reply_content['metadata'] = {} reply_content['found'] = info['found'] if info['found']: info_text = self.shell.object_inspect_text( name, detail_level=detail_level, ) data['text/plain'] = info_text return reply_content def do_history(self, hist_access_type, output, raw, session=None, start=None, stop=None, n=None, pattern=None, unique=False): if hist_access_ty
# -*- coding: utf-8 -*- # # phys_pkg.py # # Copyright (C) 2013 Steve Canny scanny@cisco.com # # This module is part of opc-diag and is released under the MIT License: # http://www.opensource.org/licenses/mit-license.php """Interface to a physical OPC package, either a zip archive or directory""" import os import shutil from zipfile import ZIP_DEFLATED, ZipFile class BlobCollection(dict): """ Structures a set of blobs, like a set of files in an OPC package. It can add and retrieve items by URI (relative path, roughly) and can also retrieve items by uri_tail, the trailing portion of the URI. """ class PhysPkg(object): """ Provides read and write services for packages on the filesystem. Suitable for use with O
PC packages in either Zip or expanded directory form. |PhysPkg| objects are iterable, generating a (uri, blob) 2-tuple for each item in the package. """ def __init__(self, blobs, root_uri): super(PhysPkg, self).__init__() self._blobs = blobs self._root_uri = root_uri def __iter__(self): """ Generate a (uri, blob) 2-tuple for each of the items in the package. """ return iter
(self._blobs.items()) @staticmethod def read(path): """ Return a |PhysPkg| instance loaded with contents of OPC package at *path*, where *path* can be either a regular zip package or a directory containing an expanded package. """ if os.path.isdir(path): return DirPhysPkg.read(path) else: return ZipPhysPkg.read(path) @property def root_uri(self): return self._root_uri # pragma: no cover @staticmethod def write_to_dir(blobs, dirpath): """ Write the contents of the |BlobCollection| instance *blobs* to a directory at *dirpath*. If a directory already exists at *dirpath*, it is deleted before being recreated. If a file exists at *dirpath*, |ValueError| is raised, to prevent unintentional overwriting. """ PhysPkg._clear_or_make_dir(dirpath) for uri, blob in blobs.items(): PhysPkg._write_blob_to_dir(dirpath, uri, blob) @staticmethod def write_to_zip(blobs, pkg_zip_path): """ Write "files" in |BlobCollection| instance *blobs* to a zip archive at *pkg_zip_path*. """ zipf = ZipFile(pkg_zip_path, 'w', ZIP_DEFLATED) for uri in sorted(blobs.keys()): blob = blobs[uri] zipf.writestr(uri, blob) zipf.close() @staticmethod def _clear_or_make_dir(dirpath): """ Create a new, empty directory at *dirpath*, removing and recreating any directory found there. Raises |ValueError| if *dirpath* exists but is not a directory. """ # raise if *dirpath* is a file if os.path.exists(dirpath) and not os.path.isdir(dirpath): tmpl = "target path '%s' is not a directory" raise ValueError(tmpl % dirpath) # remove any existing directory tree at *dirpath* if os.path.exists(dirpath): shutil.rmtree(dirpath) # create dir at dirpath, as well as any intermediate-level dirs os.makedirs(dirpath) @staticmethod def _write_blob_to_dir(dirpath, uri, blob): """ Write *blob* to a file under *dirpath*, where the segments of *uri* that precede the filename are created, as required, as intermediate directories. """ # In general, uri will contain forward slashes as segment separators. # This next line converts them to backslashes on Windows. item_relpath = os.path.normpath(uri) fullpath = os.path.join(dirpath, item_relpath) dirpath, filename = os.path.split(fullpath) if not os.path.exists(dirpath): os.makedirs(dirpath) with open(fullpath, 'wb') as f: f.write(blob) class DirPhysPkg(PhysPkg): """ An OPC physical package that has been expanded into individual files in a directory structure that mirrors the pack URI. """ def __init__(self, blobs, root_uri): super(DirPhysPkg, self).__init__(blobs, root_uri) @classmethod def read(cls, pkg_dir): """ Return a |BlobCollection| instance loaded from *pkg_dir*. """ blobs = BlobCollection() pfx_len = len(pkg_dir)+1 for filepath in cls._filepaths_in_dir(pkg_dir): uri = filepath[pfx_len:].replace('\\', '/') with open(filepath, 'rb') as f: blob = f.read() blobs[uri] = blob root_uri = pkg_dir return cls(blobs, root_uri) @staticmethod def _filepaths_in_dir(dirpath): """ Return a sorted list of relative paths, one for each of the files under *dirpath*, recursively visiting all subdirectories. """ filepaths = [] for root, dirnames, filenames in os.walk(dirpath): for filename in filenames: filepath = os.path.join(root, filename) filepaths.append(filepath) return sorted(filepaths) class ZipPhysPkg(PhysPkg): """ An OPC physical package in the typically encountered form, a zip archive. """ def __init__(self, blobs, root_uri): super(ZipPhysPkg, self).__init__(blobs, root_uri) @classmethod def read(cls, pkg_zip_path): """ Return a |BlobCollection| instance loaded from *pkg_zip_path*. """ blobs = BlobCollection() zipf = ZipFile(pkg_zip_path, 'r') for name in zipf.namelist(): blobs[name] = zipf.read(name) zipf.close() root_uri = os.path.splitext(pkg_zip_path)[0] return cls(blobs, root_uri)
try: import pkg_resources p
kg_resources.declare_namespace(__name__) except ImportError: import pkgutil __path__ = pkgutil.extend_path(__path__, __name__) from ckanext.geonetwork.ha
rvesters.geonetwork import GeoNetworkHarvester from ckanext.geonetwork.harvesters.utils import GeoNetworkClient
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law
or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================
====== from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf # pylint: disable=g-bad-import-order from isl import augment from isl import test_util from isl import util flags = tf.flags test = tf.test lt = tf.contrib.labeled_tensor FLAGS = flags.FLAGS class CorruptTest(test_util.Base): def setUp(self): super(CorruptTest, self).setUp() self.signal_lt = lt.select(self.input_lt, {'mask': util.slice_1(False)}) rc = lt.ReshapeCoder(['z', 'channel', 'mask'], ['channel']) self.corrupt_coded_lt = augment.corrupt(0.1, 0.05, 0.1, rc.encode(self.signal_lt)) self.corrupt_lt = rc.decode(self.corrupt_coded_lt) def test_name(self): self.assertIn('corrupt', self.corrupt_coded_lt.name) def test(self): self.assertEqual(self.corrupt_lt.axes, self.signal_lt.axes) self.save_images('corrupt', [self.get_images('', self.corrupt_lt)]) self.assert_images_near('corrupt', True) class AugmentTest(test_util.Base): def setUp(self): super(AugmentTest, self).setUp() ap = augment.AugmentParameters(0.1, 0.05, 0.1) self.input_augment_lt, self.target_augment_lt = augment.augment( ap, self.input_lt, self.target_lt) def test_name(self): self.assertIn('augment/input', self.input_augment_lt.name) self.assertIn('augment/target', self.target_augment_lt.name) def test(self): self.assertEqual(self.input_augment_lt.axes, self.input_lt.axes) self.assertEqual(self.target_augment_lt.axes, self.target_lt.axes) self.save_images('augment', [ self.get_images('input_', self.input_augment_lt), self.get_images('target_', self.target_augment_lt) ]) self.assert_images_near('augment', True) if __name__ == '__main__': test.main()
import pytest from webdriverwrapper.exceptions import InfoMessagesException def test_check_info_messages(driver_in
fo_msgs): with pytest.raises(InfoMessagesException) as excinfo: driver_info_msgs.check_infos(expected_info_messages=('some-info',)) def test_check_expected_info_messages(driver_info_msgs): driver_info_msgs.check_infos(expected_info_messages=('some-info', 'another-info')) def test_check_allowed_info_messages(driver_info_msgs): driver_info_msgs.check_infos(allowed_info_messages=('some-info', 'another-info')) def test_check_expected_and_allowed_info_messages(driver_info_msgs
): driver_info_msgs.check_infos(expected_info_messages=('some-info',), allowed_info_messages=('another-info',))
from yowsup.structs import ProtocolEntity, ProtocolTreeNode from .iq import IqProtocolEnti
ty class PingIqProtocolEntity(IqProtocolEntity): ''' Receive <iq type="get" xmlns="urn:xmpp:ping" from="s.whatsapp.net" id="1416174955-ping"> </iq> Send <iq type="get" xmlns="w:p" to="s.whatsapp.net" id="1416174955-ping">
</iq> ''' def __init__(self, _from = None, to = None, _id = None): super(PingIqProtocolEntity, self).__init__("urn:xmpp:ping" if _from else "w:p", _id = _id, _type = "get", _from = _from, to = to)
#!/usr/bin/env python3 import math, logging, threading, concurrent.futures import numpy import simplespectral from soapypower import threadpool logger = logging.getLogger(__name__) class PSD: """Compute averaged power spectral density using Welch's method""" def __init__(self, bins, sample_rate, fft_window='hann', fft_overlap=0.5, crop_factor=0, log_scale=True, remove_dc=False, detrend=None, lnb_lo=0, max_threads=0, max_queue_size=0): self._bins = bins
self._sample_rate = sample_rate self._fft_window = fft_window self._fft_overlap = fft_overlap self._fft_overlap_bins = math.floor(self._bins * self._fft_overlap) self._crop_factor = crop_factor self._log_scale = log_scale self._remove_dc = remove_dc self._detrend = detrend self._lnb_lo = lnb_lo self._executor = threadpool.ThreadPoolExecutor(
max_workers=max_threads, max_queue_size=max_queue_size, thread_name_prefix='PSD_thread' ) self._base_freq_array = numpy.fft.fftfreq(self._bins, 1 / self._sample_rate) def set_center_freq(self, center_freq): """Set center frequency and clear averaged PSD data""" psd_state = { 'repeats': 0, 'freq_array': self._base_freq_array + self._lnb_lo + center_freq, 'pwr_array': None, 'update_lock': threading.Lock(), 'futures': [], } return psd_state def result(self, psd_state): """Return freqs and averaged PSD for given center frequency""" freq_array = numpy.fft.fftshift(psd_state['freq_array']) pwr_array = numpy.fft.fftshift(psd_state['pwr_array']) if self._crop_factor: crop_bins_half = round((self._crop_factor * self._bins) / 2) freq_array = freq_array[crop_bins_half:-crop_bins_half] pwr_array = pwr_array[crop_bins_half:-crop_bins_half] if psd_state['repeats'] > 1: pwr_array = pwr_array / psd_state['repeats'] if self._log_scale: pwr_array = 10 * numpy.log10(pwr_array) return (freq_array, pwr_array) def wait_for_result(self, psd_state): """Wait for all PSD threads to finish and return result""" if len(psd_state['futures']) > 1: concurrent.futures.wait(psd_state['futures']) elif psd_state['futures']: psd_state['futures'][0].result() return self.result(psd_state) def result_async(self, psd_state): """Return freqs and averaged PSD for given center frequency (asynchronously in another thread)""" return self._executor.submit(self.wait_for_result, psd_state) def _release_future_memory(self, future): """Remove result from future to release memory""" future._result = None def update(self, psd_state, samples_array): """Compute PSD from samples and update average for given center frequency""" freq_array, pwr_array = simplespectral.welch(samples_array, self._sample_rate, nperseg=self._bins, window=self._fft_window, noverlap=self._fft_overlap_bins, detrend=self._detrend) if self._remove_dc: pwr_array[0] = (pwr_array[1] + pwr_array[-1]) / 2 with psd_state['update_lock']: psd_state['repeats'] += 1 if psd_state['pwr_array'] is None: psd_state['pwr_array'] = pwr_array else: psd_state['pwr_array'] += pwr_array def update_async(self, psd_state, samples_array): """Compute PSD from samples and update average for given center frequency (asynchronously in another thread)""" future = self._executor.submit(self.update, psd_state, samples_array) future.add_done_callback(self._release_future_memory) psd_state['futures'].append(future) return future
internal function for counting total reads required params: :param data, A dictionary containing seqrun ids a key an read counts as values :param seqrun_list, A list of sequencing runs ''' try: data['run_count'] = 0 if 'total_read' not in data: data['total_read']=0 if len(seqrun_list) >1: for run in seqrun_list: if data[run] > 0: data['run_count'] += 1 data['total_read'] += data[run] #if data['run_count'] == 1: # data['total_read'] = 0 return data except: raise def convert_project_data_gviz_data(input_data, sample_col='sample_igf_id', read_count_col='attribute_value', seqrun_col='flowcell_id'): ''' A utility method for converting project's data availability information to gviz data table format https://developers.google.com/chart/interactive/docs/reference#DataTable required params: :param input_data: A pandas data frame, it should contain following columns sample_igf_id, flowcell_id, attribute_value (R1_READ_COUNT) :param sample_col, Column name for sample id, default sample_igf_id :param seqrun_col, Column name for sequencing run identifier, default flowcell_id :param read_count_col, Column name for sample read counts, default attribute_value return a dictionary of description a list of data dictionary a tuple of column_order ''' try: if not isinstance(input_data, pd.DataFrame): raise AttributeError('Expecting a pandas dataframe and got {0}'.\ format(type(input_data))) input_data[read_count_col]=input_data[read_count_col].astype(float) # convert reac counts to int processed_data=input_data.\ pivot_table(values=read_count_col, index=[sample_col, seqrun_col], aggfunc='sum') # group data by sample id and seq runs processed_data.\ reset_index([sample_col, seqrun_col], inplace=True) # reset index for processed data intermediate_data=list() # define empty intermediate data structure seqrun_set=set() # define empty seqrun set for line in processed_data.to_dict(orient='records'): # reformat processed data to required structure tmp_data=dict() tmp_data.update({sample_col:line[sample_col], line[seqrun_col]:line[read_count_col]}) seqrun_set.add(line[seqrun_col]) intermediate_data.append(tmp_data) intermediate_data=pd.DataFrame(intermediate_data) # convert intermediate data to dataframe intermediate_data.fillna(0,inplace=True) # replace NAN values with zero intermediate_data=intermediate_data.\ pivot_table(index=sample_col, aggfunc='sum').\ reset_index(sample_col) # group data by samples id intermediate_data=intermediate_data.\ apply(lambda line: \ _count_total_reads(data=line, seqrun_list=list(seqrun_set)), axis=1) # count total reads for multiple seq runs multiple_run_data=intermediate_data[intermediate_data['run_count'] > 1] # check for multi run projects if len(multiple_run_data.index)==0 and \ 'total_read' in multiple_run_data.columns: intermediate_data.drop('total_read',axis=1,inplace=True) # drop the total read column if all samples are single run if 'run_count' in intermediate_data.columns: intermediate_data.drop('run_count',axis=1,inplace=True) # removing run_count column intermediate_data.fillna(0,inplace=True) # fail safe for missing samples description = {sample_col: ("string", "Sample ID")} # define description if len(list(seqrun_set)) >1 and \ 'total_read' in intermediate_data.columns: description.update({"total_read":("number", "Total Reads")}) # add total read column for samples with multiple runs intermediate_data['total_read']=intermediate_data['total_read'].\ astype(float) # convert column to number for run in list(seqrun_set): description.update({run:("number",run)}) # add seqrun columns intermediate_data[run]=intermediate_data[run].\ astype(float) # convert column to number column_list=[sample_col] # define column order column_list.extend(list(seqrun_set)) if len(list(seqrun_set)) > 1 and \ 'total_read' in intermediate_data.columns: column_list.append('total_read') # total read is present only for multiple runs intermediate_data=intermediate_data.to_dict(orient='records') # c
onvert data frame to json column_order=tuple(column_list) return description,interme
diate_data,column_order except: raise def _modify_seqrun_data(data_series,seqrun_col,flowcell_col,path_col): ''' An internal method for parsing seqrun dataframe and adding remote dir path required columns: seqrun_igf_id, flowcell_id :param seqrun_col, Column name for sequencing run id, default seqrun_igf_id :param flowcell_col, Column namae for flowcell id, default flowcell_id :param path_col, Column name for path, default path returns a data series with following columns: flowcell_id, path ''' try: if not isinstance(data_series,pd.Series): raise AttributeError('Expecting a pandas data series and got {0}'.\ format(type(data_series))) seqrun_igf_id=data_series[seqrun_col] flowcell_id=data_series[flowcell_col] seqrun_date=get_seqrun_date_from_igf_id(seqrun_igf_id) data_series[path_col]=os.path.join(seqrun_date,flowcell_id) # adding path to data series del data_series[seqrun_col] return data_series except: raise def add_seqrun_path_info(input_data,output_file,seqrun_col='seqrun_igf_id', flowcell_col='flowcell_id',path_col='path'): ''' A utility method for adding remote path to a dataframe for each sequencing runs of a project required params: :param input_data, A input dataframe containing the following columns seqrun_igf_id flowcell_id :param seqrun_col, Column name for sequencing run id, default seqrun_igf_id :param flowcell_col, Column namae for flowcell id, default flowcell_id :param path_col, Column name for path, default path output_file: An output filepath for the json data ''' try: if not isinstance(input_data,pd.DataFrame): raise AttributeError('Expecting a pandas dataframe and got {0}'.\ format(type(input_data))) input_data.drop_duplicates(inplace=True) # remove duplicate entries input_data=input_data.\ apply(lambda line: \ _modify_seqrun_data(data_series=line, seqrun_col=seqrun_col, flowcell_col=flowcell_col, path_col=path_col), axis=1) # add remote seqrun path input_data=input_data.to_json(orient='records') # encode output json w
""" Classes for using robotic or other hardware using Topographica. This module contains several classes for constructing robotics interfaces to Topographica simulations. It includes modules that read input from or send output to robot devices, and a (quasi) real-time simulation object that attempts to maintain a correspondence between simulation time and real time. This module requires the PlayerStage robot interface system (from playerstage.sourceforge.net), and the playerrobot module for high-level communications with Player robots. """ import Image import ImageOps from math import pi,cos,sin import param from topo.base.simulation import E
ventProcessor from imagen.image import GenericImage from playerrobot import Camer
aDevice, PTZDevice class CameraImage(GenericImage): """ An image pattern generator that gets its image from a Player camera device. """ camera = param.ClassSelector(CameraDevice,default=None,doc=""" An instance of playerrobot.CameraDevice to be used to generate images.""") def __init__(self,**params): super(CameraImage,self).__init__(**params) self._image = None def _get_image(self,params): self._decode_image(*self.camera.image) return True def _decode_image(self,fmt,w,h,bpp,fdiv,data): if fmt==1: self._image = Image.new('L',(w,h)) self._image.fromstring(data,'raw') else: # JPALERT: if not grayscale, then assume color. This # should be expanded for other modes. rgb_im = Image.new('RGB',(w,h)) rgb_im.fromstring(data,'raw') self._image = ImageOps.grayscale(rgb_im) class CameraImageQueued(CameraImage): """ A version of CameraImage that gets the image from the camera's image queue, rather than directly from the camera object. Using queues is necessary when running the playerrobot in a separate process without shared memory. When getting an image, this pattern generator will fetch every image in the image queue and use the most recent as the current pattern. """ def _get_image(self,params): im_spec = None if self._image is None: # if we don't have an image then block until we get one im_spec = self.camera.image_queue.get() self.camera.image_queue.task_done() # Make sure we clear the image queue and get the most recent image. while not self.camera.image_queue.empty(): im_spec = self.camera.image_queue.get_nowait() self.camera.image_queue.task_done() if im_spec: # If we got a new image from the queue, then # construct a PIL image from it. self._decode_image(*im_spec) return True else: return False class PTZ(EventProcessor): """ Pan/Tilt/Zoom control. This event processor takes input events on its 'Saccade' input port in the form of (amplitude,direction) saccade commands (as produced by the topo.sheet.saccade.SaccadeController class) and appropriately servoes the attached PTZ object. There is not currently any dynamic zoom control, though the static zoom level can be set as a parameter. """ ptz = param.ClassSelector(PTZDevice,default=None,doc=""" An instance of playerrobot.PTZDevice to be controlled.""") zoom = param.Number(default=120,bounds=(0,None),doc=""" Desired FOV width in degrees.""") speed = param.Number(default=200,bounds=(0,None),doc=""" Desired max pan/tilt speed in deg/sec.""") invert_amplitude = param.Boolean(default=False,doc=""" Invert the sense of the amplitude signal, in order to get the appropriate ipsi-/contralateral sense of saccades.""") dest_ports = ["Saccade"] src_ports = ["State"] def start(self): pass def input_event(self,conn,data): if conn.dest_port == "Saccade": # the data should be (amplitude,direction) amplitude,direction = data self.shift(amplitude,direction) def shift(self,amplitude,direction): self.debug("Executing shift, amplitude=%.2f, direction=%.2f"%(amplitude,direction)) if self.invert_amplitude: amplitude *= -1 # if the amplitude is negative, invert the direction, so up is still up. if amplitude < 0: direction *= -1 angle = direction * pi/180 pan,tilt,zoom = self.ptz.state_deg pan += amplitude * cos(angle) tilt += amplitude * sin(angle) self.ptz.set_ws_deg(pan,tilt,self.zoom,self.speed,self.speed) ## self.ptz.cmd_queue.put_nowait(('set_ws_deg', ## (pan,tilt,self.zoom,self.speed,self.speed)))
#!/usr/bin/env python ''' A/V control for System76 laptop using Unity ''' import os from execute import returncode # check for the existenc
e of /dev/video0 which is used currently for webcam webcam = lambda: os.path.exists('/dev/video0') == False def webcam_toggle(): if webcam(): returncode('sudo /sbin/modprobe uvcvideo') else: returncode('sudo /sbin/modprobe -rv uvcvideo') # use the amixer application to glean the status of the microphone microphone = lambda: returncode("amixer get Capture | grep Capt | grep off") == 0 microphone_togg
le = lambda: returncode("amixer set Capture toggle") def main(): print "Mic muted ? {0}, Webcam off ? {1}".format(microphone(), webcam()) if __name__ == '__main__': main()
calendar from django.utils.dates import MONTHS, MONTHS_3, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR from django.utils.tzinfo import LocalTimezone from django.utils.translation import ugettext as _ from django.utils.encoding import force_unicode re_formatchars = re.compile(r'(?<!\\)([aAbBcdDfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])') re_escaped = re.compile(r'\\(.)') class Formatter(object): def format(self, formatstr): pieces = [] for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))): if i % 2: pieces.append(force_unicode(getattr(self, piece)())) elif piece: pieces.append(re_escaped.sub(r'\1', piece)) return u''.join(pieces) class TimeFormat(Formatter): def __init__(self, t): self.data = t def a(self): "'a.m.' or 'p.m.'" if self.data.hour > 11: return _('p.m.') return _('a.m.') def A(self): "'AM' or 'PM'" if self.data.hour > 11: return _('PM') return _('AM') def B(self): "Swatch Internet time" raise NotImplementedError def f(self): """ Time, in 12-hour ho
urs and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """ if self.data.minute == 0: return self.g() return u'%s:%s' % (self.g(), self.i()) def g(self): "Hour, 12-hour format without le
ading zeros; i.e. '1' to '12'" if self.data.hour == 0: return 12 if self.data.hour > 12: return self.data.hour - 12 return self.data.hour def G(self): "Hour, 24-hour format without leading zeros; i.e. '0' to '23'" return self.data.hour def h(self): "Hour, 12-hour format; i.e. '01' to '12'" return u'%02d' % self.g() def H(self): "Hour, 24-hour format; i.e. '00' to '23'" return u'%02d' % self.G() def i(self): "Minutes; i.e. '00' to '59'" return u'%02d' % self.data.minute def P(self): """ Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off if they're zero and the strings 'midnight' and 'noon' if appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' Proprietary extension. """ if self.data.minute == 0 and self.data.hour == 0: return _('midnight') if self.data.minute == 0 and self.data.hour == 12: return _('noon') return u'%s %s' % (self.f(), self.a()) def s(self): "Seconds; i.e. '00' to '59'" return u'%02d' % self.data.second def u(self): "Microseconds" return self.data.microsecond class DateFormat(TimeFormat): year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] def __init__(self, dt): # Accepts either a datetime or date object. self.data = dt self.timezone = getattr(dt, 'tzinfo', None) if hasattr(self.data, 'hour') and not self.timezone: self.timezone = LocalTimezone(dt) def b(self): "Month, textual, 3 letters, lowercase; e.g. 'jan'" return MONTHS_3[self.data.month] def c(self): """ ISO 8601 Format Example : '2008-01-02T10:30:00.000123' """ return self.data.isoformat() def d(self): "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'" return u'%02d' % self.data.day def D(self): "Day of the week, textual, 3 letters; e.g. 'Fri'" return WEEKDAYS_ABBR[self.data.weekday()] def F(self): "Month, textual, long; e.g. 'January'" return MONTHS[self.data.month] def I(self): "'1' if Daylight Savings Time, '0' otherwise." if self.timezone and self.timezone.dst(self.data): return u'1' else: return u'0' def j(self): "Day of the month without leading zeros; i.e. '1' to '31'" return self.data.day def l(self): "Day of the week, textual, long; e.g. 'Friday'" return WEEKDAYS[self.data.weekday()] def L(self): "Boolean for whether it is a leap year; i.e. True or False" return calendar.isleap(self.data.year) def m(self): "Month; i.e. '01' to '12'" return u'%02d' % self.data.month def M(self): "Month, textual, 3 letters; e.g. 'Jan'" return MONTHS_3[self.data.month].title() def n(self): "Month without leading zeros; i.e. '1' to '12'" return self.data.month def N(self): "Month abbreviation in Associated Press style. Proprietary extension." return MONTHS_AP[self.data.month] def O(self): "Difference to Greenwich time in hours; e.g. '+0200'" seconds = self.Z() return u"%+03d%02d" % (seconds // 3600, (seconds // 60) % 60) def r(self): "RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'" return self.format('D, j M Y H:i:s O') def S(self): "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'" if self.data.day in (11, 12, 13): # Special case return u'th' last = self.data.day % 10 if last == 1: return u'st' if last == 2: return u'nd' if last == 3: return u'rd' return u'th' def t(self): "Number of days in the given month; i.e. '28' to '31'" return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1] def T(self): "Time zone of this machine; e.g. 'EST' or 'MDT'" name = self.timezone and self.timezone.tzname(self.data) or None if name is None: name = self.format('O') return unicode(name) def U(self): "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)" if getattr(self.data, 'tzinfo', None): return int(calendar.timegm(self.data.utctimetuple())) else: return int(time.mktime(self.data.timetuple())) def w(self): "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)" return (self.data.weekday() + 1) % 7 def W(self): "ISO-8601 week number of year, weeks starting on Monday" # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt week_number = None jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1 weekday = self.data.weekday() + 1 day_of_year = self.z() if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4: if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)): week_number = 53 else: week_number = 52 else: if calendar.isleap(self.data.year): i = 366 else: i = 365 if (i - day_of_year) < (4 - weekday): week_number = 1 else: j = day_of_year + (7 - weekday) + (jan1_weekday - 1) week_number = j // 7 if jan1_weekday > 4: week_number -= 1 return week_number def y(self): "Year, 2 digits; e.g. '99'" return unicode(self.data.year)[2:] def Y(self): "Year, 4 digits; e.g. '1999'" return self.data.year def z(self): "Day of the year; i.e. '0' to '365'" doy = self.year_days[self.data.month] + self.data.day if self.L() and self.data.month > 2: doy += 1 return doy def Z(self): """ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. """ if not self.timezone: return 0 offset = self.timezone.utcoffset(self.data) # Only days can be negative, so negative offsets have days=-1 and #
rgs += ['-e', '%s=%s' % (k, v)] docker_env = { 'DOCKERFILE_DIR': dockerfile_dir, 'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh' } jobspec = jobset.JobSpec( cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args, environ=docker_env, shortname='distribtest.%s' % (name), timeout_seconds=timeout_seconds, flake_retries=flake_retries, timeout_retries=timeout_retries) return jobspec def create_jobspec(name, cmdline, environ=None, shell=False, flake_retries=0, timeout_retries=0, use_workspace=False, timeout_seconds=10 * 60): """Creates jobspec.""" environ = environ.copy() if use_workspace: environ['WORKSPACE_NAME'] = 'workspace_%s' % name cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh' ] + cmdline jobspec = jobset.JobSpec(cmdline=cmdline, environ=environ, shortname='distribtest.%s' % (name), timeout_seconds=timeout_seconds, flake_retries=flake_retries, timeout_retries=timeout_retries, shell=shell) return jobspec class CSharpDistribTest(object): """Tests C# NuGet package""" def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False, presubmit=False): self.name = 'csharp_%s_%s' % (platform, arch) self.platform = platform self.arch = arch self.docker_suffix = docker_suffix self.labels = ['distribtest', 'csharp', platform, arch] if presubmit: self.labels.append('presubmit') self.script_suffix = '' if docker_suffix: self.name += '_%s' % docker_suffix self.labels.append(docker_suffix) if use_dotnet_cli: self.name += '_dotnetcli' self.script_suffix = '_dotnetcli' self.labels.append('dotnetcli') else: self.labels.append('olddotnet') def pre_build_jobspecs(self): return [] def build_jobspec(self, inner_jobs=None): del inner_jobs # arg unused as there is little opportunity for parallelizing whats inside the distribtests if self.platform == 'linux': return create_docker_jobspec( self.name, 'tools/dockerfile/distribtest/csharp_%s_%s' % (self.docker_suffix, self.arch), 'test/distrib/cs
harp/run_distrib_test%s.sh' % self.script_suffix, copy_rel_path='test/distrib') elif self.platform == 'macos': return create_jobspec(self.name, [ 'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix ], environ={'EXTERNAL_GIT_ROOT': '../../../..'}, use_workspace=True) elif self.platform == 'windows': if self.arch == 'x64':
# Use double leading / as the first occurrence gets removed by msys bash # when invoking the .bat file (side-effect of posix path conversion) environ = { 'MSBUILD_EXTRA_ARGS': '//p:Platform=x64', 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug' } else: environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'} return create_jobspec(self.name, [ 'test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix ], environ=environ, use_workspace=True) else: raise Exception("Not supported yet.") def __str__(self): return self.name class PythonDistribTest(object): """Tests Python package""" def __init__(self, platform, arch, docker_suffix, source=False, presubmit=False): self.source = source if source: self.name = 'python_dev_%s_%s_%s' % (platform, arch, docker_suffix) else: self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix) self.platform = platform self.arch = arch self.docker_suffix = docker_suffix self.labels = ['distribtest', 'python', platform, arch, docker_suffix] if presubmit: self.labels.append('presubmit') def pre_build_jobspecs(self): return [] def build_jobspec(self, inner_jobs=None): # TODO(jtattermusch): honor inner_jobs arg for this task. del inner_jobs if not self.platform == 'linux': raise Exception("Not supported yet.") if self.source: return create_docker_jobspec( self.name, 'tools/dockerfile/distribtest/python_dev_%s_%s' % (self.docker_suffix, self.arch), 'test/distrib/python/run_source_distrib_test.sh', copy_rel_path='test/distrib') else: return create_docker_jobspec( self.name, 'tools/dockerfile/distribtest/python_%s_%s' % (self.docker_suffix, self.arch), 'test/distrib/python/run_binary_distrib_test.sh', copy_rel_path='test/distrib') def __str__(self): return self.name class RubyDistribTest(object): """Tests Ruby package""" def __init__(self, platform, arch, docker_suffix, ruby_version=None, source=False, presubmit=False): self.package_type = 'binary' if source: self.package_type = 'source' self.name = 'ruby_%s_%s_%s_version_%s_package_type_%s' % ( platform, arch, docker_suffix, ruby_version or 'unspecified', self.package_type) self.platform = platform self.arch = arch self.docker_suffix = docker_suffix self.ruby_version = ruby_version self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix] if presubmit: self.labels.append('presubmit') def pre_build_jobspecs(self): return [] def build_jobspec(self, inner_jobs=None): # TODO(jtattermusch): honor inner_jobs arg for this task. del inner_jobs arch_to_gem_arch = { 'x64': 'x86_64', 'x86': 'x86', } if not self.platform == 'linux': raise Exception("Not supported yet.") dockerfile_name = 'tools/dockerfile/distribtest/ruby_%s_%s' % ( self.docker_suffix, self.arch) if self.ruby_version is not None: dockerfile_name += '_%s' % self.ruby_version return create_docker_jobspec( self.name, dockerfile_name, 'test/distrib/ruby/run_distrib_test.sh %s %s %s' % (arch_to_gem_arch[self.arch], self.platform, self.package_type), copy_rel_path='test/distrib') def __str__(self): return self.name class PHP7DistribTest(object): """Tests PHP7 package""" def __init__(self, platform, arch, docker_suffix=None, presubmit=False): self.name = 'php7_%s_%s_%s' % (platform, arch, docker_suffix) self.platform = platform self.arch = arch self.docker_suffix = docker_suffix self.labels = ['distribtest', 'php', 'php7', platform, arch] if presubmit: self.labels.append('presubmit') if docker_suffix: self.labels.append(docker_suffix) def pre_build_jobspecs(self): return [] def build_jobspec(self, inner_jobs=None): # TODO(jtattermusch): honor inner_
pass else: log.error("Unknown scheme: {scheme}", scheme=scheme) if credFactory: wireEncryptedCredentialFactories.append(credFactory) if schemeConfig.get("AllowedOverWireUnencrypted", False): wireUnencryptedCredentialFactories.append(credFactory) # # Setup Resource hierarchy # log.info("Setting up document root at: {root}", root=config.DocumentRoot) # principalCollection = directory.principalCollection if config.EnableCalDAV: log.info("Setting up calendar collection: {cls}", cls=calendarResourceClass) calendarCollection = calendarResourceClass( directory, "/calendars/", newStore, ) if config.EnableCardDAV: log.info("Setting up address book collection: {cls}", cls=addressBookResourceClass) addressBookCollection = addressBookResourceClass( directory, "/addressbooks/", newStore, ) if config.DirectoryAddressBook.Enabled and config.EnableSearchAddressBook: log.info("Setting up directory address book: {cls}", cls=directoryBackedAddressBookResourceClass) directoryBackedAddressBookCollection = directoryBackedAddressBookResourceClass( principalCollections=(principalCollection,), principalDirectory=directory, uri=joinURL("/", config.DirectoryAddressBook.name, "/") ) if _reactor._started: directoryBackedAddressBookCollection.provisionDirectory() else: addSystemEventTrigger("after", "startup", directoryBackedAddressBookCollection.provisionDirectory) else: # remove /directory from previous runs that may have created it directoryPath = os.path.join(config.DocumentRoot, config.DirectoryAddressBook.name) try: FilePath(directoryPath).remove() log.info("Deleted: {path}", path=directoryPath) except (OSError, IOError), e: if e.errno != errno.ENOENT: log.error("Could not delete: {path} : {error}", path=directoryPa
th, error=e) log.info("Setting up root resource: {cls}", cls=rootResourceClass) root = rootResourceClass( config.DocumentRoot, principalCollections=(principalCollection,), ) root.putChild("principals", principalCollection) if config.EnableCalDAV: root.putChild("calendars", calendarCollection) if config.EnableCardDAV: root.putChild('addressbooks', addressBookCollection) if config.DirectoryAddressBook.E
nabled and config.EnableSearchAddressBook: root.putChild(config.DirectoryAddressBook.name, directoryBackedAddressBookCollection) # /.well-known if config.EnableWellKnown: log.info("Setting up .well-known collection resource") wellKnownResource = SimpleResource( principalCollections=(principalCollection,), isdir=True, defaultACL=SimpleResource.allReadACL ) root.putChild(".well-known", wellKnownResource) for enabled, wellknown_name, redirected_to in ( (config.EnableCalDAV, "caldav", "/",), (config.EnableCardDAV, "carddav", "/",), (config.TimezoneService.Enabled, "timezone", "/stdtimezones",), (config.Scheduling.iSchedule.Enabled, "ischedule", "/ischedule"), ): if enabled: if config.EnableSSL: scheme = "https" port = config.SSLPort else: scheme = "http" port = config.HTTPPort wellKnownResource.putChild( wellknown_name, SimpleRedirectResource( principalCollections=(principalCollection,), isdir=False, defaultACL=SimpleResource.allReadACL, scheme=scheme, port=port, path=redirected_to) ) for alias in config.Aliases: url = alias.get("url", None) path = alias.get("path", None) if not url or not path or url[0] != "/": log.error("Invalid alias: URL: {url} Path: {path}", url=url, path=path) continue urlbits = url[1:].split("/") parent = root for urlpiece in urlbits[:-1]: child = parent.getChild(urlpiece) if child is None: child = Resource() parent.putChild(urlpiece, child) parent = child if parent.getChild(urlbits[-1]) is not None: log.error("Invalid alias: URL: {url} Path: {path} already exists", url=url, path=path) continue resource = FileResource(path) parent.putChild(urlbits[-1], resource) log.info("Added alias {url} -> {path}", url=url, path=path) # Need timezone cache before setting up any timezone service log.info("Setting up Timezone Cache") TimezoneCache.create() # Timezone service is optional if config.EnableTimezoneService: log.info("Setting up time zone service resource: {cls}", cls=timezoneServiceResourceClass) timezoneService = timezoneServiceResourceClass( root, ) root.putChild("timezones", timezoneService) # Standard Timezone service is optional if config.TimezoneService.Enabled: log.info("Setting up standard time zone service resource: {cls}", cls=timezoneStdServiceResourceClass) timezoneStdService = timezoneStdServiceResourceClass( root, ) root.putChild("stdtimezones", timezoneStdService) # TODO: we only want the master to do this if _reactor._started: _reactor.callLater(0, timezoneStdService.onStartup) else: addSystemEventTrigger("after", "startup", timezoneStdService.onStartup) # # iSchedule/cross-pod service for podding # if config.Servers.Enabled: log.info("Setting up iSchedule podding inbox resource: {cls}", cls=iScheduleResourceClass) ischedule = iScheduleResourceClass( root, newStore, podding=True ) root.putChild(config.Servers.InboxName, ischedule) log.info("Setting up podding conduit resource: {cls}", cls=conduitResourceClass) conduit = conduitResourceClass( root, newStore, ) root.putChild(config.Servers.ConduitName, conduit) # # iSchedule service (not used for podding) # if config.Scheduling.iSchedule.Enabled: log.info("Setting up iSchedule inbox resource: {cls}", cls=iScheduleResourceClass) ischedule = iScheduleResourceClass( root, newStore, ) root.putChild("ischedule", ischedule) # Do DomainKey resources DKIMUtils.validConfiguration(config) if config.Scheduling.iSchedule.DKIM.Enabled: log.info("Setting up domainkey resource: {res}", res=DomainKeyResource) domain = config.Scheduling.iSchedule.DKIM.Domain if config.Scheduling.iSchedule.DKIM.Domain else config.ServerHostName dk = DomainKeyResource( domain, config.Scheduling.iSchedule.DKIM.KeySelector, config.Scheduling.iSchedule.DKIM.PublicKeyFile, ) wellKnownResource.putChild("domainkey", dk) # # WebCal # if config.WebCalendarRoot: log.info("Setting up WebCalendar resource: {res}", res=config.WebCalendarRoot) webCalendar = webCalendarResourceClass( config.WebCalendarRoot, principalCollections=(principalCollection,), ) root.putChild("webcal", webCalendar) # # WebAdmin # if config.EnableWebAdmin: log.info("Setting up WebAdmin resource") webAdmin = webAdminResourceClass(
f
rom __future__ import print_function from .patchpi
pette import PatchPipette
"""Models for the util app. """ import cStringIO import gzip import logging from config_models.models import ConfigurationModel from django.db import models from django.utils.text import compress_string from opaque_keys.edx.django.models import CreatorMixin logger = logging.getLogger(__name__) # pylint: disable=invalid-name class RateLimitConfiguration(ConfigurationModel): """Configuration flag to enable/disable rate limiting. Applies to Django Rest Framework views. This is useful for disabling rate limiting for performance tests. When enabled, it will disable rate limiting on any view decorated with the `can_disable_rate_limit` class decorator. """ class Meta(ConfigurationModel.Meta): app_label = "util" def decompress_string(value): """ Helper function to reverse CompressedTextField.get_prep_value. """
try: val = value.encode('utf').decode('base64') zbuf = cStringIO.StringIO(val)
zfile = gzip.GzipFile(fileobj=zbuf) ret = zfile.read() zfile.close() except Exception as e: logger.error('String decompression failed. There may be corrupted data in the database: %s', e) ret = value return ret class CompressedTextField(CreatorMixin, models.TextField): """ TextField that transparently compresses data when saving to the database, and decompresses the data when retrieving it from the database. """ def get_prep_value(self, value): """ Compress the text data. """ if value is not None: if isinstance(value, unicode): value = value.encode('utf8') value = compress_string(value) value = value.encode('base64').decode('utf8') return value def to_python(self, value): """ Decompresses the value from the database. """ if isinstance(value, unicode): value = decompress_string(value) return value
"AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: f5bigip_ltm_profile_sip short_description: BIG-IP ltm profile sip module description: - Configures a Session Initiation Protocol (SIP) profile. version_added: "2.4" author: - "Gabriel Fortin (@GabrielFortin)" options: alg_enable: description: - Enables or disables the SIP ALG (Application Level Gateway) feature. default: disabled choices: ['disabled', 'enabled'] app_service: description: - Specifies the name of the application service to which the object belongs. community: description: - Specifies the community to which you want to assign the virtual server that you associate with this profile. defaults_from: description: - Specifies the profile that you want to use as the parent profile. default: sip description: description: - User defined description. dialog_aware: description: - Enables or disables the ability for the system to be aware of unauthorized use of the SIP dialog. default: disabled choices: ['disabled', 'enabled'] dialog_establishment_timeout: description: - Indicates the timeout value for dialog establishment in a sip session. default: 10 enable_sip_firewall: description: - Indicates whether to enable SIP firewall functionality or not. default: no choices: ['no', 'yes'] insert_record_route_header: description: - Enables or disables the insertion of a Record-Route header, which indicates the next hop for the following SIP request messages. default: disabled choices: ['disabled', 'enabled'] insert_via_header: description: - Enables or disables the insertion of a Via header, which indicates where the message originated. default: disabled choices: ['disabled', 'enabled'] log_profile: description: - Specify the name of the ALG log profile which controls the logging of ALG . log_publisher: description: - Specify the name of the log publisher which logs translation events. max_media_sessions: description: - Indicates the maximum number of SDP media sessions that the BIG-IP system accepts. default: 6 max_registrations: description: - Indicates the maximum number of registrations, the maximum allowable REGISTER messages can be recorded that the BIG-IP system accepts. default: 100 max_sessions_per_registration: description: - Indicates the maximum number of calls or sessions can be made by a user for a single registration that the BIG-IP system accepts. default: 50 max_size: description: - Specifies the maximum SIP message size that the BIG-IP system accepts. default: 65535 name: description: - Specifies a unique name for the component. required: true partition: description: - Displays the administrative partition within which the component resides. registration_timeout: description: - Indicates the timeout value for a sip registration. default: 3600 rtp_proxy_style: description: - Indicates the style in which the RTP will proxy the data. default: symmetric choices: ['symmetric', 'restricted-by-ip-address', 'any-location'] secure_via_header: description: - Enables or disables the insertion of a Secure Via header, which indicates where the message originated. default: disabled choices: ['disabled', 'enabled'] security: description: - Enables or disables security for the SIP profile. default: disabled choices: ['disabled', 'enabled'] sip_session_timeout: description: - Indicates the timeout value for a sip session.
default: 300 state: description: - Specifies the state of the component on the BIG-IP system. default: present choices: ['absent', 'present'] terminate_on_bye: description: - Enables or disables the termination of a conne
ction when a BYE transaction finishes. default: enabled choices: ['disabled', 'enabled'] user_via_header: description: - Enables or disables the insertion of a Via header specified by a system administrator. requirements: - BIG-IP >= 12.0 - ansible-common-f5 - f5-sdk ''' EXAMPLES = ''' - name: Create LTM Profile sip f5bigip_ltm_profile_sip: f5_hostname: 172.16.227.35 f5_username: admin f5_password: admin f5_port: 443 name: my_sip_profile partition: Common description: My sip profile state: present delegate_to: localhost ''' RETURN = ''' # ''' from ansible.module_utils.basic import AnsibleModule from ansible_common_f5.base import F5_ACTIVATION_CHOICES from ansible_common_f5.base import F5_NAMED_OBJ_ARGS from ansible_common_f5.base import F5_POLAR_CHOICES from ansible_common_f5.base import F5_PROVIDER_ARGS from ansible_common_f5.bigip import F5BigIpNamedObject class ModuleParams(object): @property def argument_spec(self): argument_spec = dict( alg_enable=dict(type='str', choices=F5_ACTIVATION_CHOICES), app_service=dict(type='str'), community=dict(type='str'), defaults_from=dict(type='str'), description=dict(type='str'), dialog_aware=dict(type='str', choices=F5_ACTIVATION_CHOICES), dialog_establishment_timeout=dict(type='int'), enable_sip_firewall=dict(type='str', choices=F5_POLAR_CHOICES), insert_record_route_header=dict(type='str', choices=F5_ACTIVATION_CHOICES), insert_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES), log_profile=dict(type='str'), log_publisher=dict(type='str'), max_media_sessions=dict(type='int'), max_registrations=dict(type='int'), max_sessions_per_registration=dict(type='int'), max_size=dict(type='int'), registration_timeout=dict(type='int'), rtp_proxy_style=dict(type='str', choices=['symmetric', 'restricted-by-ip-address', 'any-location']), secure_via_header=dict(type='str', choices=F5_ACTIVATION_CHOICES), security=dict(type='str', choices=F5_ACTIVATION_CHOICES), sip_session_timeout=dict(type='int'), terminate_on_bye=dict(type='str', choices=F5_ACTIVATION_CHOICES), user_via_header=dict(type='str') ) argument_spec.update(F5_PROVIDER_ARGS) argument_spec.update(F5_NAMED_OBJ_ARGS) return argument_spec @property def supports_check_mode(self): return True class F5BigIpLtmProfileSip(F5BigIpNamedObject): def _set_crud_methods(self): self._methods = { 'create': self._api.tm.ltm.profile.sips.sip.create, 'read': self._api.tm.ltm.profile.sips.sip.load, 'update': self._api.tm.ltm.profile.sips.sip.update, 'delete': self._api.tm.ltm.profile.sips.sip.delete, 'exists': self._api.tm.ltm.profile.sips.sip.exists } def main(): params = ModuleParams() module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode) try: obj = F5BigIpLtmProfileSip(check_mode=module.check_mode, **module.params) result = obj.flush() module.exit_json(**result) except Exception as exc: module.fail_json(msg=str(
to wait for the response, in seconds. A value of zero (the default) means wait forever. If the timeout expires before the response is received an exception will be raised." compress: required: False description: - "A boolean flag indicating if the SDK should ask the server to send compressed responses. The default is I(True). Note that this is a hint for the server, and that it may return uncompressed data even when this parameter is set to I(True)." type: bool kerberos: required: False description: - "A boolean flag indicating if Kerberos authentication should be used instead of the default basic authentication." type: bool headers: required: False description: - "A dictionary of HTTP headers to be added to each API call." version_added: "2.4" requirements: - python >= 2.7 - ovirt-engine-sdk-python >= 4.3.0 notes: - "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket, when you no longer need it, otherwise the ticket would be revoked by engine when it expires. For an example of how to achieve that, please take a look at I(examples) section." - "In order to use this module you have to install oVirt/RHV Python SDK. To ensure it's installed with correct version you can create the following task: I(pip: name=ovirt-engine-sdk-python version=4.3.0)" - "Note that in oVirt/RHV 4.1 if you want to use a user which is not administrator you must enable the I(ENGINE_API_FILTER_BY_DEFAULT) variable in engine. In oVirt/RHV 4.2 and later it's enabled by default." ''' EXAMPLES = ''' - block: # Create a vault with `ovirt_password` variable which store your # oVirt/RHV user's password, and include that yaml file with variable: - include_vars: ovirt_password.yml - name: Obtain SSO token with using username/password credentials ovirt_auth: url: https://ovirt.example.com/ovirt-engine/api username: admin@internal ca_file: ca.pem password: "{{ ovirt_password }}" # Previous task generated I(ovirt_auth) fact, which you can later use # in different modules as follows: - ovirt_vm: auth: "{{ ovirt_auth }}"
state: absent name: myvm always: - name: Always revoke the SSO token ovirt_aut
h: state: absent ovirt_auth: "{{ ovirt_auth }}" # When user will set following environment variables: # OVIRT_URL = https://fqdn/ovirt-engine/api # OVIRT_USERNAME = admin@internal # OVIRT_PASSWORD = the_password # User can login the oVirt using environment variable instead of variables # in yaml file. # This is mainly useful when using Ansible Tower or AWX, as it will work # for Red Hat Virtualization credentials type. - name: Obtain SSO token ovirt_auth: state: present ''' RETURN = ''' ovirt_auth: description: Authentication facts, needed to perform authentication to oVirt/RHV. returned: success type: complex contains: token: description: SSO token which is used for connection to oVirt/RHV engine. returned: success type: str sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw" url: description: URL of the oVirt/RHV engine API endpoint. returned: success type: str sample: "https://ovirt.example.com/ovirt-engine/api" ca_file: description: CA file, which is used to verify SSL/TLS connection. returned: success type: str sample: "ca.pem" insecure: description: Flag indicating if insecure connection is used. returned: success type: bool sample: False timeout: description: Number of seconds to wait for response. returned: success type: int sample: 0 compress: description: Flag indicating if compression is used for connection. returned: success type: bool sample: True kerberos: description: Flag indicating if kerberos is used for authentication. returned: success type: bool sample: False headers: description: Dictionary of HTTP headers to be added to each API call. returned: success type: dict ''' import os import traceback try: import ovirtsdk4 as sdk except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ovirt import check_sdk def main(): module = AnsibleModule( argument_spec=dict( url=dict(default=None), hostname=dict(default=None), username=dict(default=None), password=dict(default=None, no_log=True), ca_file=dict(default=None, type='path'), insecure=dict(required=False, type='bool', default=None), timeout=dict(required=False, type='int', default=0), compress=dict(required=False, type='bool', default=True), kerberos=dict(required=False, type='bool', default=False), headers=dict(required=False, type='dict'), state=dict(default='present', choices=['present', 'absent']), token=dict(default=None, no_log=True), ovirt_auth=dict(required=None, type='dict'), ), required_if=[ ('state', 'absent', ['ovirt_auth']), ], supports_check_mode=True, ) check_sdk(module) state = module.params.get('state') if state == 'present': params = module.params elif state == 'absent': params = module.params['ovirt_auth'] def get_required_parameter(param, env_var, required=False): var = params.get(param) or os.environ.get(env_var) if not var and required and state == 'present': module.fail_json(msg="'%s' is a required parameter." % param) return var url = get_required_parameter('url', 'OVIRT_URL', required=False) hostname = get_required_parameter('hostname', 'OVIRT_HOSTNAME', required=False) if url is None and hostname is None: module.fail_json(msg="You must specify either 'url' or 'hostname'.") if url is None and hostname is not None: url = 'https://{0}/ovirt-engine/api'.format(hostname) username = get_required_parameter('username', 'OVIRT_USERNAME') password = get_required_parameter('password', 'OVIRT_PASSWORD') token = get_required_parameter('token', 'OVIRT_TOKEN') ca_file = get_required_parameter('ca_file', 'OVIRT_CAFILE') insecure = params.get('insecure') if params.get('insecure') is not None else not bool(ca_file) connection = sdk.Connection( url=url, username=username, password=password, ca_file=ca_file, insecure=insecure, timeout=params.get('timeout'), compress=params.get('compress'), kerberos=params.get('kerberos'), headers=params.get('headers'), token=token, ) try: token = connection.authenticate() module.exit_json( changed=False, ansible_facts=dict( ovirt_auth=dict( token=token, url=url, ca_file=ca_file, insecure=insecure, timeout=params.get('timeout'), compress=params.get('compress'), kerberos=params.get('kerberos'), headers=params.get('headers'), ) if state == 'present' else dict() ) ) except Exception as e: module.fail_json(msg=str(e), exception=traceback.format_exc()) finally: # Close the connection, but don't revoke token connection.close(logo
""" Application entry point """ def main(): pass if __name__ == "__main__": # delegates to main_debug during construction
try: import main_debug main_debug.main() except Impor
tError: main()
""" XING OAuth1 backend, docs at: http://psa.matiasaguirre.net/do
cs/backends/xing.html """ from social.backends.oauth import BaseOAuth1 class XingOAuth(BaseOAuth1): """Xing OAuth authentication backend""" name = 'xing' AUTHORIZATION_U
RL = 'https://api.xing.com/v1/authorize' REQUEST_TOKEN_URL = 'https://api.xing.com/v1/request_token' ACCESS_TOKEN_URL = 'https://api.xing.com/v1/access_token' SCOPE_SEPARATOR = '+' EXTRA_DATA = [ ('id', 'id'), ('user_id', 'user_id') ] def get_user_details(self, response): """Return user details from Xing account""" email = response.get('email', '') fullname, first_name, last_name = self.get_user_names( first_name=response['first_name'], last_name=response['last_name'] ) return {'username': first_name + last_name, 'fullname': fullname, 'first_name': first_name, 'last_name': last_name, 'email': email} def user_data(self, access_token, *args, **kwargs): """Return user data provided""" profile = self.get_json( 'https://api.xing.com/v1/users/me.json', auth=self.oauth_auth(access_token) )['users'][0] return { 'user_id': profile['id'], 'id': profile['id'], 'first_name': profile['first_name'], 'last_name': profile['last_name'], 'email': profile['active_email'] }
#!/usr/bin/env python import p
ytest from pyxenon_snippets import slurm_queues_getter_with_p
rops def test_slurm_queues_getter_with_props(): slurm_queues_getter_with_props.run_example()
""" byceps.services.shop.article.dbmodels.article ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from datetime import datetime from decimal import Decimal from typing import Optional from sqlalchemy.ext.hybrid import hybrid_property from .....database import db, generate_uuid from .....util.instances import ReprBuilder from ...shop.transfer.models import ShopID from ..transfer.models import ArticleNumber, ArticleType class Article(db.Model): """An article that can be bought.""" __tablename__ = 'shop_articles' __table_args__ = ( db.UniqueConstraint('shop_id', 'description'), db.CheckConstraint('available_from < available_until'), ) id = db.Column(db.Uuid, default=generate_uuid, primary_key=True) shop
_id = db.Column(db.UnicodeText, db.ForeignKey('shops.id'), index=True, nullable=False) ite
m_number = db.Column(db.UnicodeText, unique=True, nullable=False) _type = db.Column('type', db.UnicodeText, nullable=False) description = db.Column(db.UnicodeText, nullable=False) price = db.Column(db.Numeric(6, 2), nullable=False) tax_rate = db.Column(db.Numeric(3, 3), nullable=False) available_from = db.Column(db.DateTime, nullable=True) available_until = db.Column(db.DateTime, nullable=True) total_quantity = db.Column(db.Integer, nullable=False) quantity = db.Column(db.Integer, db.CheckConstraint('quantity >= 0'), nullable=False) max_quantity_per_order = db.Column(db.Integer, nullable=False) not_directly_orderable = db.Column(db.Boolean, default=False, nullable=False) separate_order_required = db.Column(db.Boolean, default=False, nullable=False) shipping_required = db.Column(db.Boolean, nullable=False) def __init__( self, shop_id: ShopID, item_number: ArticleNumber, type_: ArticleType, description: str, price: Decimal, tax_rate: Decimal, total_quantity: int, max_quantity_per_order: int, shipping_required: bool, *, available_from: Optional[datetime] = None, available_until: Optional[datetime] = None, ) -> None: self.shop_id = shop_id self.item_number = item_number self._type = type_.name self.description = description self.price = price self.tax_rate = tax_rate self.available_from = available_from self.available_until = available_until self.total_quantity = total_quantity self.quantity = total_quantity # Initialize with total quantity. self.max_quantity_per_order = max_quantity_per_order self.shipping_required = shipping_required @hybrid_property def type_(self) -> ArticleType: return ArticleType[self._type] def __repr__(self) -> str: return ReprBuilder(self) \ .add_with_lookup('id') \ .add('shop', self.shop_id) \ .add_with_lookup('item_number') \ .add_with_lookup('description') \ .build()
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Jan 21, 2020 @author: alfoa, wangc Cross-validated Lasso, using the LARS algorithm. """ #Internal Modules (Lazy Importer)-------------------------------------------------------------------- from numpy import finfo #Internal Modules (Lazy Importer) End---------------------------------------------------------------- #External Modules------------------------------------------------------------------------------------ #External Modules End-------------------------------------------------------------------------------- #Internal Modules------------------------------------------------------------------------------------ from SupervisedLearning.ScikitLearn import ScikitLearnBase from utils import InputData, InputTypes #Internal Modules End-------------------------------------------------------------------------------- class LassoLarsCV(ScikitLearnBase): """ Cross-validated Lasso, using the LARS algorithm """ info = {'problemtype':'regression', 'normalize':False} def __init__(self): """ Constructor that will appropriately initialize a supervised learning object @ In, None @ Out, None """ super().__init__() import sklearn import sklearn.linear_model self.model = sklearn.linear_model.LassoLarsCV @classmethod def getInputSpecification(cls): """ Method to get a reference to a class that specifies the input data for class cls. @ In, cls, the class for which we are retrieving the specification @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls. """ specs = super(LassoLarsCV, cls).getInputSpecification() specs.description = r"""The \xmlNode{LassoLarsCV} (\textit{Cross-validated Lasso model fit with Least Angle Regression}) This model is an augomentation of the LassoLars model with the addition of cross validation tecniques. The optimization objective for Lasso is: \begin{equation} (1 / (2 * n\_samples)) * ||y - Xw||^2\_2 + alpha * ||w||\_1 \end{equation} \zNormalizationNotPerformed{LassoLarsCV} """ specs.addSub(InputData.parameterInputFactory("fit_intercept", contentType=InputTypes.BoolType, descr=r"""Whether the intercept should be estimated or not. If False, the data is assumed to be already centered.""", default=True)) specs.addSub(InputData.parameterInputFactory("max_iter", contentType=InputTypes.IntegerType, descr=r"""The maximum number of iterations.""", default=500)) specs.addSub(InputData.parameterInputFactory("normalize", contentType=InputTypes.BoolType, descr=r"""This parameter is ignored when fit_intercept is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.""", default=True)) specs.addSub(InputData.parameterInputFactory
("precompute", contentType=InputTypes.StringType, descr=r"""Whether to use a precomputed Gram matrix to speed up calculations. For sparse input this option is always True to preserve sparsity.""", default='auto')) specs.addSub(InputData.parameterInputFactory("max_n_alphas", contentType=InputTypes.IntegerType, descr=r"""The maximum number of points on t
he path used to compute the residuals in the cross-validation""", default=1000)) specs.addSub(InputData.parameterInputFactory("eps", contentType=InputTypes.FloatType, descr=r"""The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization.""", default=finfo(float).eps)) specs.addSub(InputData.parameterInputFactory("positive", contentType=InputTypes.BoolType, descr=r"""When set to True, forces the coefficients to be positive.""", default=False)) specs.addSub(InputData.parameterInputFactory("cv", contentType=InputTypes.IntegerType, descr=r"""Determines the cross-validation splitting strategy. It specifies the number of folds..""", default=None)) specs.addSub(InputData.parameterInputFactory("verbose", contentType=InputTypes.BoolType, descr=r"""Amount of verbosity.""", default=False)) return specs def _handleInput(self, paramInput): """ Function to handle the common parts of the distribution parameter input. @ In, paramInput, ParameterInput, the already parsed input. @ Out, None """ super()._handleInput(paramInput) settings, notFound = paramInput.findNodesAndExtractValues(['fit_intercept','max_iter', 'normalize', 'precompute', 'max_n_alphas','eps','positive','cv', 'verbose']) # notFound must be empty assert(not notFound) self.initializeModel(settings)
import numpy as np from bokeh.io import curdoc, show from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Triangle N = 9 x = np.linspace(-2, 2, N) y = x**2 sizes = np.linspace(10, 20, N) source = ColumnDataSource(dict(x=x, y=y, sizes=sizes)) plot = Plot( title=None, plot_width=300, plot_height=300, min_border=0, toolbar_location=None) glyph = Triangle(x="x", y="y", size="sizes", line_color="#99d594", line_width=2, fill_color=None) plot.add_glyph(source, glyph) xaxis = LinearAxis() plot.add_layout(xaxis, 'below')
yax
is = LinearAxis() plot.add_layout(yaxis, 'left') plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker)) plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker)) curdoc().add_root(plot) show(plot)
#!/usr/bin/env python import pynotify '''
No purpose here other than creating a callable library for system n
otifications ''' class message: def __init__(self, messagex): pynotify.init('EventCall') m = pynotify.Notification("RSEvent Notification", "%s" % messagex) m.show()
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015-2018 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Admin model views for records.""" import json from flask import flash from flask_admin.contrib.sqla import ModelView from flask_babelex import gettext as _ from invenio_admin.filters import FilterConverter from invenio_db import db from markupsafe import Markup from sqlalchemy.exc import SQLAlchemyError from .api import Record from .models import RecordMetadata class RecordMetadataModelView(ModelView): """Records admin model view.""" filter_converter = FilterConverter() can_create = False can_edit = False can_delete = True can_view_details = True column_list = ('id', 'version_id', 'updated', 'created',) column_details_list = ('id', 'version_id', 'updated', 'created', 'json') column_labels = dict( id=_('UUID'), version_id=_('Revision'), json=_('JSON'), ) column_formatters = dict( version_id=lambda v, c, m, p: m.version_id-1, json=lambda v, c, m, p: Markup("<pre>{0}</pre>".format( json.dumps(m.json, indent=2, sort_keys=True))) ) column_filters = ('created', 'updated', ) column_default_sort = ('updated', True) page_size = 25 def delete_model(self, model): """Delete a record.""" try: if model.json is None:
return True record = Record(mode
l.json, model=model) record.delete() db.session.commit() except SQLAlchemyError as e: if not self.handle_view_exception(e): flash(_('Failed to delete record. %(error)s', error=str(e)), category='error') db.session.rollback() return False return True record_adminview = dict( modelview=RecordMetadataModelView, model=RecordMetadata, category=_('Records'))
import hashlib as md5 class Palette: def __init__(self, palette={}, colors=[]): self.job_status_palette = { 'Received': '#D9E7F8', 'Checking': '#FAFAFA', 'Staging': '#6190CD', 'Waiting': '#004EFF', 'Matched': '#FEF7AA',
'Running': '#FDEE65', 'Stalled': '#BC5757', 'Completed': '#00FF21', 'Done': '#238802', 'Failed': '#FF0000', 'failed': '#FF0000', 'Killed': '#111111' } self.job_minor_status_palette = { "AncestorDepth Not Found" : '#BAA312', 'Application Finished With Errors' : '#BC2133', 'BK Input Data Not Available' : '#E6D600', 'BK-LFC In
tegrity Check Failed' : '#BC1143', 'Can not get Active and Banned Sites from JobDB' : '#84CBFF', 'Chosen site is not eligible' : '#B4A243', 'Error Sending Staging Request' : '#B4A243', 'Exceeded Maximum Dataset Limit (100)' : '#BA5C9D', 'Exception During Execution' : '#AA240C', 'Execution Complete' : '#338B39', 'Failed to access database' : '#FFE267', 'File Catalog Access Failure' : '#FF8000', 'Illegal Job JDL' : '#D96C00', 'Impossible Site + InputData Requirement' : '#BDA822', 'Impossible Site Requirement' : '#F87500', 'Input Data Not Available' : '#2822A6', 'Input Data Resolution' : '#FFBE94', 'Input Sandbox Download' : '#586CFF', 'Input data contains //' : '#AB7800', 'Input data not correctly specified' : '#6812D6', 'Job Wrapper Initialization' : '#FFFFCC', 'Job has exceeded maximum wall clock time' : '#FF33CC', 'Job has insufficient disk space to continue' : '#33FFCC', 'Job has reached the CPU limit of the queue' : '#AABBCC', 'No Ancestors Found For Input Data' : '#BDA544', 'No candidate sites available' : '#E2FFBC', 'No eligible sites for job' : '#A8D511', 'Parameter not found' : '#FFB80C', 'Pending Requests' : '#52FF4F', 'Received Kill signal' : '#FF312F', 'Socket read timeout exceeded' : '#B400FE', 'Stalled' : '#FF655E', 'Uploading Job Outputs' : '#FE8420', 'Watchdog identified this job as stalled' : '#FFCC99' } self.miscelaneous_pallette = { 'Others': '#666666', 'NoLabels': '#0025AD', 'Total': '#00FFDC', 'Default': '#FDEE65' } self.country_palette = { 'France':'#73C6BC', 'UK':'#DCAF8A', 'Spain':'#C2B0E1', 'Netherlands':'#A9BF8E', 'Germany':'#800000', 'Russia':'#00514A', 'Italy':'#004F00', 'Switzerland':'#433B00', 'Poland':'#528220', 'Hungary':'#825CE2', 'Portugal':'#009182', 'Turkey':'#B85D00' } self.palette = self.country_palette self.palette.update(self.job_status_palette) self.palette.update(self.miscelaneous_pallette) self.palette.update(self.job_minor_status_palette) def setPalette(self, palette): self.palette = palette def setColor(self, label, color): self.palette[label] = color def addPalette(self, palette): self.palette.update(palette) def getColor(self, label): if label in self.palette.keys(): return self.palette[label] else: return self.generateColor(label) def generateColor(self, label): if label == None: label = str(label) myMD5 = md5.md5() myMD5.update(str(label)) hexstring = myMD5.hexdigest() color = "#" + hexstring[:6] return color
]"}), 'start': ('django.db.models.fields.DateTimeField', [], {}), 'team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}) }, 'misago.setting': { 'Meta': {'object_name': 'Setting'}, 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'extra': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'field': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.SettingsGroup']", 'to_field': "'key'"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'normalize_to': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'separator': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'setting': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'value_default': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'misago.settingsgroup': { 'Meta': {'object_name': 'SettingsGroup'}, 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'misago.signinattempt': { 'Meta': {'object_name': 'SignInAttempt'}, 'date': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}) }, 'misago.thread': { 'Meta':
{'object_name':
'Thread'}, 'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'downvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last': ('django.db.models.fields.DateTimeField', [], {}), 'last_post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Post']"}), 'last_poster': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.User']"}), 'last_poster_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'last_poster_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'last_poster_style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'private_thread_set'", 'symmetrical': 'False', 'to': "orm['misago.User']"}), 'replies': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'replies_deleted': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'replies_moderated': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'replies_reported': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'report_for': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_set'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Post']"}), 'score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '30'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'start': ('django.db.models.fields.DateTimeField', [], {}), 'start_post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['misago.Post']"}), 'start_poster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'start_poster_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'start_poster_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'start_poster_style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'upvotes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, 'misago.threadread': { 'Meta': {'object_name': 'ThreadRead'}, 'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Forum']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.Thread']"}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['misago.User']"}) }, 'misago.token': { 'Meta': {'object_name': 'Token'}, 'accessed': ('django.db.models.fields.DateTimeField', [], {}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.CharField', [], {'max_length': '42', 'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'signin_tokens'", 'to': "orm['misago.User']"}) }, 'misago.user': { 'Meta': {'object_name': 'User'}, 'acl_key': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}), 'activation': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'alerts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'alerts_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'allow_pds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'avatar_ban': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'avatar_ban_reason_admin': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'avatar_ban_reason_user': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'avatar_image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'avatar_original': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'avatar_temp': ('django.db.models.f
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf import settings from django.db import connection from django.utils.translation import ugettext as _ from modoboa.lib.exceptions import InternalError def db_table_exists(table): """Check if table exists.""" return table in connection.introspection.tabl
e_names() def db_type(cname="default"): """Return the type of the *default* database Supported values : 'postgres', 'mysql', 'sqlite' :param str cname: connection name :return: a string or None """ if cname not in settings.DATABASES: raise InternalError( _("Connection to database %s not configured" % cname)) for t in ["postgres", "mysq
l", "sqlite"]: if settings.DATABASES[cname]["ENGINE"].find(t) != -1: return t return None
from base import * class Te
st (TestBase): def __init__ (self): Test
Base.__init__ (self, __file__) self.name = "Broken header entry III" self.expected_error = 200 self.request = "GET / HTTP/1.0\r\n" +\ "Entry:value\r\n"
tern_cache = {} def groups_for_host(self, host): if host in self._hosts_cache: return self._hosts_cache[host].get_groups() else: return [] def groups_list(self): if not self._groups_list: groups = {} for g in self.groups: groups[g.name] = [h.name for h in g.get_hosts()] ancestors = g.get_ancestors() for a in ancestors: if a.name not in groups: groups[a.name] = [h.name for h in a.get_hosts()] self._groups_list = groups self._groups_cache = {} return self._groups_list def get_groups(self): return self.groups def get_host(self, hostname): if hostname not in self._hosts_cache: self._hosts_cache[hostname] = self._get_host(hostname) if hostname in C.LOCALHOST: for host in C.LOCALHOST.difference((hostname,)): self._hosts_cache[host] = self._hosts_cache[hostname] return self._hosts_cache[hostname] def _get_host(self, hostname): if hostname in C.LOCALHOST: for host in self.get_group('all').get_hosts(): if host.name in C.LOCALHOST: return host return self._create_implicit_localhost(hostname) matching_host = None for group in self.groups: for host in group.get_hosts(): if hostname == host.name: matching_host = host self._hosts_cache[host.name] = host return matching_host def get_group(self, groupname): if not self._groups_cache: for group in self.groups: self._groups_cache[group.name] = group return self._groups_cache.get(groupname) def get_group_variables(self, groupname, update_cached=False, vault_password=None): if groupname not in self._vars_per_group or update_cached: self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password) return self._vars_per_group[groupname] def _get_group_variables(self, groupname, vault_password=None): group = self.get_group(groupname) if group is None: raise Exception("group not found: %s" % groupname) vars = {} # plugin.get_group_vars retrieves just vars for specific group vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] for updated in vars_results: if updated is not None: vars = combine_vars(vars, updated) # Read group_vars/ files vars = combine_vars(vars, self.get_group_vars(group)) return vars def get_vars(self, hostname, update_cached=False, vault_password=None): host = self.get_host(hostname) if not host: raise AnsibleError("no vars as host is not in inventory: %s" % hostname) return host.get_vars() def get_host_variables(self, hostname, update_cached=False, vault_password=None): if hostname not in self._vars_per_host or update_cached: self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password) return self._vars_per_host[hostname] def _get_host_variables(self, hostname, vault_password=None): host = self.get_host(hostname) if host is None: raise AnsibleError("no host vars as host is not in inventory: %s" % hostname) vars = {} # plugin.run retrieves all vars (also from groups) for host vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] for updated in vars_results: if updated is not None: vars = combine_vars(vars, updated) # plugin.get_host_vars retrieves just vars for specific host vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] for updated in vars_results: if updated is not None: vars = combine_vars(vars, updated) # still need to check InventoryParser per host vars # which actually means InventoryScript per host, # which is not performant if self.parser is not None: vars = combine_vars(vars, self.parser.get_host_variables(host)) # Read host_vars/ files vars = combine_vars(vars, self.get_host_vars(host)) return vars def add_group(self, group): if group.name not in self.groups_list(): self.groups.append(group) self._groups_list = None # invalidate internal cache self._groups_cache = {} else: raise AnsibleError("group already in inventory: %s" % group.name) def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ result = [ h for h in self.get_hosts(pattern) ] if len(result) == 0 and pattern in C.LOCALHOST: result = [pattern] return result def list_groups(self): return sorted([ g.name for g in self.groups ], key=lambda x: x) def restrict_to_hosts(self, restriction): """ Restrict list operations to the hosts given in restriction. This is used to batch serial operations in main playbook code, don't use this for other reasons. """ if restriction is None: return elif not isinstance(restriction, list): restriction = [ restriction ] self._restriction = restriction def subset(self, subset_pattern): """ Limits inventory results to a subset of inventory that matches a given pattern, such as to select a given geographic of numeric slice amongst a previous 'hosts' selection that only select roles, or vice versa. Corresponds to --limit parameter to ansible-playbook """ if subset_pattern is None: self._subset = None else: if ';' in subset_pattern or ',' in subset_pattern: display.deprecated("Use ':' instead of ',' or ';' to separate host patterns", version=2.0, removed=True) subset_patterns = self._split_pattern(subset_pattern) results = [] # allow Unix style @filename data for x in subset_patterns: if x.startswith("@"): fd = open(x[1:]) results.extend(fd.read().split("\n")) fd.close() else:
results.append(x) self._subset = results def remov
e_restriction(self): """ Do not restrict list operations """ self._restriction = None def is_file(self): """ did inventory come from a file? """ if not isinstance(self.host_list, basestring): return False return os.path.exists(self.host_list) def basedir(self): """ if inventory came from a file, what's the directory? """ dname = self.host_list if not self.is_file(): dname = None elif os.path.isdir(self.host_list): dname = self.host_list else: dname = os.path.dirname(self.host_list) if dname is None or dname == '' or dname == '.': cwd = os.getcwd() dname = cwd if dname: dname = os.path.abspath(dname) return dname def src(self): """ if inventory came from a file, what's the directory and file name? """ if not self.is_file(): return None return self.host_list def playbook_basedir(self): """ returns the directory of the current playbook """ return self._playbook_basedir def set_playbook_basedir(self, dir_name): """ sets the base directory of the playbook so inven
= None self.audio.terminate() except ImportError: pass class WavFile(AudioSource): def __init__(self, filename_or_fileobject): if isinstance(filename_or_fileobject, str): self.filename = filename_or_fileobject else: self.filename = None self.wav_file = filename_or_fileobject self.stream = None def __enter__(self): if self.filename: self.wav_file = open(self.filename, "rb") self.wav_reader = wave.open(self.wav_file, "rb") self.SAMPLE_WIDTH = self.wav_reader.getsampwidth() self.RATE = self.wav_reader.getframerate() self.CHANNELS = self.wav_reader.getnchannels() assert self.CHANNELS == 1 # audio must be mono self.CHUNK = 4096 self.stream = WavFile.WavStream(self.wav_reader) return self def __exit__(self, exc_type, exc_value, traceback): if self.filename: self.wav_file.close() self.stream = None class WavStream(object): def __init__(self, wav_reader): self.wav_reader = wav_reade
r def read(self, size = -1): if size == -1: return self.wav_reader.readframes(self.wav_reader.getnframes()) return self.wav_reader.readframes(size) class AudioData(object): def __init__(self, rate, data): self.rate = rate self.data = data class Recognizer(AudioSource): def __init__(self,
language = "fr-FR", key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"): self.key = key self.language = language self.energy_threshold = 1500 # minimum audio energy to consider for recording self.pause_threshold = 0.8 # seconds of quiet time before a phrase is considered complete self.quiet_duration = 0.5 # amount of quiet time to keep on both sides of the recording def samples_to_flac(self, source, frame_data): import platform, os with io.BytesIO() as wav_file: with wave.open(wav_file, "wb") as wav_writer: wav_writer.setsampwidth(source.SAMPLE_WIDTH) wav_writer.setnchannels(source.CHANNELS) wav_writer.setframerate(source.RATE) wav_writer.writeframes(frame_data) wav_data = wav_file.getvalue() # determine which converter executable to use system = platform.system() path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored if shutil.which("flac") is not None: # check for installed version first flac_converter = shutil.which("flac") elif system == "Windows" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: # Windows NT, use the bundled FLAC conversion utility flac_converter = os.path.join(path, "flac-win32.exe") elif system == "Linux" and platform.machine() in {"i386", "x86", "x86_64", "AMD64"}: flac_converter = os.path.join(path, "flac-linux-i386") else: raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC utility") process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) flac_data, stderr = process.communicate(wav_data) return flac_data def record(self, source, duration = None): assert isinstance(source, AudioSource) and source.stream frames = io.BytesIO() seconds_per_buffer = source.CHUNK / source.RATE elapsed_time = 0 while True: # loop for the total number of chunks needed elapsed_time += seconds_per_buffer if duration and elapsed_time > duration: break buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break frames.write(buffer) frame_data = frames.getvalue() frames.close() return AudioData(source.RATE, self.samples_to_flac(source, frame_data)) def listen(self, source, timeout = None): assert isinstance(source, AudioSource) and source.stream # record audio data as raw samples frames = collections.deque() assert self.pause_threshold >= self.quiet_duration >= 0 seconds_per_buffer = source.CHUNK / source.RATE pause_buffer_count = math.ceil(self.pause_threshold / seconds_per_buffer) # number of buffers of quiet audio before the phrase is complete quiet_buffer_count = math.ceil(self.quiet_duration / seconds_per_buffer) # maximum number of buffers of quiet audio to retain before and after elapsed_time = 0 # store audio input until the phrase starts while True: # handle timeout if specified elapsed_time += seconds_per_buffer if timeout and elapsed_time > timeout: raise TimeoutError("listening timed out") buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break # reached end of the stream frames.append(buffer) # check if the audio input has stopped being quiet energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal if energy > self.energy_threshold: break if len(frames) > quiet_buffer_count: # ensure we only keep the needed amount of quiet buffers frames.popleft() # read audio input until the phrase ends pause_count = 0 while True: buffer = source.stream.read(source.CHUNK) if len(buffer) == 0: break # reached end of the stream frames.append(buffer) # check if the audio input has gone quiet for longer than the pause threshold energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal if energy > self.energy_threshold: pause_count = 0 else: pause_count += 1 if pause_count > pause_buffer_count: # end of the phrase break # obtain frame data for i in range(quiet_buffer_count, pause_buffer_count): frames.pop() # remove extra quiet frames at the end frame_data = b"".join(list(frames)) return AudioData(source.RATE, self.samples_to_flac(source, frame_data)) def recognize(self, audio_data, show_all = False): assert isinstance(audio_data, AudioData) url = "http://www.google.com/speech-api/v2/recognize?client=chromium&lang=%s&key=%s" % (self.language, self.key) self.request = urllib.request.Request(url, data = audio_data.data, headers = {"Content-Type": "audio/x-flac; rate=%s" % audio_data.rate}) # check for invalid key response from the server try: response = urllib.request.urlopen(self.request) except: raise KeyError("Server wouldn't respond (invalid key or quota has been maxed out)") response_text = response.read().decode("utf-8") # ignore any blank blocks actual_result = [] for line in response_text.split("\n"): if not line: continue result = json.loads(line)["result"] if len(result) != 0: actual_result = result[0] # make sure we have a list of transcriptions if "alternative" not in actual_result: raise LookupError("Speech is unintelligible") # return the best guess unless told to do otherwise if not show_all: for prediction in actual_result["alternative"]: if "confidence" in prediction: return prediction["transcript"] raise LookupError("Speech is unintelligible") spoken_text = [] # check to see if Google thinks it's 100% correct default_confidence = 0 if len(actual_result["alternative"])==1: default_confidence = 1 # return all the possibilities for prediction in actual_result["alternative"]: if "confidence" in prediction:
.arg_scopes_map['resnet_v1_152'] = resnet_arg_scope nets_factory.arg_scopes_map['resnet_v1_200'] = resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_50'] = resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_101'] = resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_152'] = resnet_arg_scope nets_factory.arg_scopes_map['resnet_v2_200'] = resnet_arg_scope pass augments = None #from . config import * #if os.path.exists('config.py'): def print_red (txt): print('\033[91m' + txt + '\033[0m') def print_green (txt): print('\033[92m' + txt + '\033[0m') print(augments) flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('db', None, 'training db') flags.DEFINE_string('val_db', None, 'validation db') flags.DEFINE_integer('classes', 2, 'number of classes') flags.DEFINE_string('mixin', None, 'mix-in training db') flags.DEFINE_integer('size', None, '') flags.DEFINE_integer('batch', 1, 'Batch size. ') flags.DEFINE_integer('channels', 3, '') flags.DEFINE_integer('shift', 0, '') flags.DEFINE_integer('stride', 16, '') flags.DEFINE_integer('max_size', 2000, '') flags.DEFINE_boolean('cache', True, '') flags.DEFINE_integer('picpac_dump', 0, '') flags.DEFINE_string('augments', None, 'augment config file') flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture') flags.DEFINE_string('model', None, 'model directory') flags.DEFINE_string('resume', None, 'resume training from this model') flags.DEFINE_string('finetune', None, '') flags.DEFINE_integer('max_to_keep', 100, '') # optimizer settings flags.DEFINE_float('lr', 0.01, 'Initial learning rate.') flags.DEFINE_float('decay_rate', 0.95, '') flags.DEFINE_float('decay_steps', 500, '') flags.DEFINE_float('weight_decay', 0.00004, '') # flags.DEFINE_integer('epoch_steps', None, '') flags.DEFINE_integer('max_epochs', 20000, '') flags.DEFINE_integer('ckpt_epochs', 10, '') flags.DEFINE_integer('val_epochs', 10, '') flags.DEFINE_boolean('adam', False, '') flags.DEFINE_boolean('vgg', False, '') COLORSPACE = 'BGR' PIXEL_MEANS = tf.constant([[[[127.0, 127.0, 127.0]]]]) PIXEL_MEANS1 = tf.constant([[[[127.0]]]]) VGG_PIXEL_MEANS = tf.constant([[[[103.94, 116.78, 123.68]]]]) def fcn_loss (logits, labels): logits = tf.reshape(logits, (-1, FLAGS.classes)) labels = tf.reshape(labels, (-1,)) # cross-entropy xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) xe = tf.reduce_mean(xe, name='xe') # accuracy acc = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32) acc = tf.reduce_mean(acc, name='acc') # regularization reg = tf.reduce_sum(tf.losses.get_regularization_losses()) reg = tf.identity(reg, name='re') # loss loss = tf.identity(xe + reg, name='lo') return loss, [acc, xe, reg, loss] def setup_finetune (ckpt, exclusions): print("Finetuning %s" % ckpt) # TODO(sguada) variables.filter_variables() variables_to_restore = [] for var in slim.get_model_variables(): excluded = False for exclusion in exclusions: if var.op.name.startswith(exclusion): print("Excluding %s" % var.op.name) excluded = True break if not excluded: variables_to_restore.append(var) if tf.gfile.IsDirectory(ckpt): ckpt = tf.train.latest_checkpoint(ckpt) variables_to_train = [] for scope in exclusions: variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) variables_to_train.extend(variables) print("Training %d out of %d variables" % (len(variables_to_train), len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))) if len(variables_to_train) < 10: for var in variables_to_train: print(" %s" % var.op.name) return slim.assign_from_checkpoint_fn( ckpt, variables_to_restore, ignore_missing_vars=False), variables_to_train def create_picpac_stream (db_path, is_training): assert os.path.exists(db_path) augments = [] if is_training: if FLAGS.augments: with open(FLAGS.augments, 'r') as f: augments = json.loads(f.read()) print("Using augments:") print(json.dumps(augments)) else: augments = [ #{"type": "augment.flip", "horizontal": True, "vertical": False}, {"type": "augment.rotate", "min":-10, "max":10}, {"type": "augment.scale", "min":0.9, "max":1.1}, {"type": "augment.add", "range":20}, ] else: augments = [] config = {"db": db_path, "loop": is_training, "shuffle": is_training, "reshuffle": is_training, "annotate": True, "channels": FLAGS.channels, "stratify": is_training, "dtype": "float32", "batch": FLAGS.batch, "colorspace": COLORSPACE, "cache": FLAGS.cache, "dump": FLAGS.picpac_dump, "transforms": augments + [ {"type": "resize", "max_size": FLAGS.max_size}, {"type": "clip", "round": FLAGS.stride}, {"type": "rasterize"}, ] } if is_training and not FLAGS.mixin is None: print("mixin support is incomplete in new picpac.") assert os.path.exists(FLAGS.mixin) config['mixin'] = FLAGS.mixin config['mixin_group_reset'] = 0 config['mixin_group_delta'] = 1 pass return picpac.ImageStream(config) def main (_): global PIXEL_MEANS global PIXEL_MEANS1 if FLAGS.channels == 1: PIXEL_MEANS = PIXEL_MEANS1
logging.basicConfig(filename='train-%s-%s.log' % (FLAGS.backbone, dat
etime.datetime.now().strftime('%Y%m%d-%H%M%S')),level=logging.DEBUG, format='%(asctime)s %(message)s') if FLAGS.model: try: os.makedirs(FLAGS.model) except: pass if FLAGS.finetune or FLAGS.vgg: print_red("finetune, using RGB with vgg pixel means") COLORSPACE = 'RGB' PIXEL_MEANS = VGG_PIXEL_MEANS X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images") # ground truth labels Y = tf.placeholder(tf.int32, shape=(None, None, None, 1), name="labels") is_training = tf.placeholder(tf.bool, name="is_training") if not FLAGS.finetune: patch_arg_scopes() #with \ # slim.arg_scope([slim.batch_norm], decay=0.9, epsilon=5e-4): network_fn = nets_factory.get_network_fn(FLAGS.backbone, num_classes=None, weight_decay=FLAGS.weight_decay, is_training=is_training) ft, _ = network_fn(X-PIXEL_MEANS, global_pool=False, output_stride=16) FLAGS.stride = 16 with tf.variable_scope('head'): logits = slim.conv2d_transpose(ft, FLAGS.classes, 32, 16) logits = tf.identity(logits, name='logits') # probability of class 1 -- not very useful if FLAGS.classes > 2 probs = tf.squeeze(tf.slice(tf.nn.softmax(logits), [0,0,0,1], [-1,-1,-1,1]), 3) loss, metrics = fcn_loss(logits, Y) metric_names = [x.name[:-2] for x in metrics] def format_metrics (avg): return ' '.join(['%s=%.3f' % (a, b) for a, b in zip(metric_names, list(avg))]) init_finetune, variables_to_train = None, None if FLAGS.finetune: print_red("finetune, using RGB with vgg pixel means") init_finetune, variables_to_train = setup_finetune(FLAGS.finetune, ['head']) global_step = tf.train.create_global_step() LR = tf.train.exponential_decay(FLAGS.lr, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True) if FLAGS.adam: print("Using Adam optimizer, reducing LR by 100x") optimizer = tf.train.AdamOptimizer(LR/100) else: optimizer = tf.train.MomentumOptimizer(learning_rate=LR, momentum=0.9) print(variables_to_train) train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step, variables_to_train=variables_to_train) saver = tf.train.Saver(max_to_keep=FLAGS.max_to_kee
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 2 1
7:52:19 2017 Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn Function: ______________________________________________________ """ from numpy.random import multivariate_normal as npmvn from numpy import diag def PartGen(emitT,numPart): meanPart=[0.,0.,0.,0.,0.,0.] covPart=diag([emitT[0],emitT[0],emitT[1],emitT[1],emitT[2],emitT[2]]) x,xp,y,yp,z,zp=npmvn(mean
Part,covPart,numPart).T return x,xp,y,yp,z,zp
create_temporary_bill(legisinfo_id=resid, number=match.group(0), session=self.hansard.session) except Exception, e: print "Related bill search failed for callback %s" % resid print repr(e) return string return u'<bill id="%d" name="%s">%s</bill>' % (bill.id, escape(bill.name), string) elif restype == 'Affiliation': try: pol = Politician.objects.getByParlID(resid) except Politician.DoesNotExist: print "Related politician search failed for callback %s" % resid if getattr(settings, 'PARLIAMENT_LABEL_FAILED_CALLBACK', False): # FIXME migrate away from internalxref? InternalXref.objects.get_or_create(schema='pol_parlid', int_value=resid, target_id=-1) return string if pol == current_politician: return string # When someone mentions her riding, don't link back to her return u'<pol id="%d" name="%s">%s</pol>' % (pol.id, escape(pol.name), string) def get_text(self, cursor): text = u'' for string in cursor.findAll(text=parsetools.r_hasText): if string.parent.name == 'a' and string.parent['class'] == 'WebOption': text += self.process_related_link(string.parent, string, self.t['politician']) else: text += unicode(string) return text def parse(self): super(HansardParser2009, self).parse() # Initialize variables t = ParseTracker() self.t = t member_refs = {} # Get the date c = self.soup.find(text='OFFICIAL REPORT (HANSARD)').findNext('h2') self.date = datetime.datetime.strptime(c.string.strip(), "%A, %B %d, %Y").date() self.hansard.date = self.date self.hansard.save() c = c.findNext(text=r_housemet) match = re.search(r_housemet, c.string) t['timestamp'] = self.houseTime(match.group(1), match.group(2)) t.setNext('timestamp', t['timestamp']) # Move the pointer to the start c = c.next # And start the big loop while c is not None: # It's a string if not hasattr(c, 'na
me'): pass # Heading elif c.name == 'h2': c = c.next if not parsetools.isString(
c): raise ParseException("Expecting string right after h2") t.setNext('heading', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip()))) # Topic elif c.name == 'h3': top = c.find(text=r_letter) #if not parsetools.isString(c): # check if it's an empty header # if c.parent.find(text=r_letter): # raise ParseException("Expecting string right after h3") if top is not None: c = top t['topic_set'] = True t.setNext('topic', parsetools.titleIfNecessary(parsetools.tameWhitespace(c.string.strip()))) elif c.name == 'h4': if c.string == 'APPENDIX': self.saveStatement(t) print "Appendix reached -- we're done!" break # Timestamp elif c.name == 'a' and c.has_key('name') and c['name'].startswith('T'): match = re.search(r'^T(\d\d)(\d\d)$', c['name']) if match: t.setNext('timestamp', parsetools.time_to_datetime( hour=int(match.group(1)), minute=int(match.group(2)), date=self.date)) else: raise ParseException("Couldn't match time %s" % c.attrs['name']) elif c.name == 'b' and c.string: # Something to do with written answers match = r_honorific.search(c.string) if match: # It's a politician asking or answering a question # We don't get a proper link here, so this has to be a name match polname = re.sub(r'\(.+\)', '', match.group(2)).strip() self.saveStatement(t) t['member_title'] = c.string.strip() t['written_question'] = True try: pol = Politician.objects.get_by_name(polname, session=self.hansard.session) t['politician'] = pol t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date) except Politician.DoesNotExist: print "WARNING: No name match for %s" % polname except Politician.MultipleObjectsReturned: print "WARNING: Multiple pols for %s" % polname else: if not c.string.startswith('Question'): print "WARNING: Unexplained boldness: %s" % c.string # div -- the biggie elif c.name == 'div': origdiv = c if c.find('b'): # We think it's a new speaker # Save the current buffer self.saveStatement(t) c = c.find('b') if c.find('a'): # There's a link... c = c.find('a') match = re.search(r'ResourceType=Affiliation&ResourceID=(\d+)', c['href']) if match and c.find(text=r_letter): parlwebid = int(match.group(1)) # We have the parl ID. First, see if we already know this ID. pol = Politician.objects.getByParlID(parlwebid, lookOnline=False) if pol is None: # We don't. Try to do a quick name match first (if flags say so) if not GET_PARLID_ONLINE: who = c.next.string match = re.search(r_honorific, who) if match: polname = re.sub(r'\(.+\)', '', match.group(2)).strip() try: #print "Looking for %s..." % polname, pol = Politician.objects.get_by_name(polname, session=self.hansard.session) #print "found." except Politician.DoesNotExist: pass except Politician.MultipleObjectsReturned: pass if pol is None: # Still no match. Go online... try: pol = Politician.objects.getByParlID(parlwebid, session=self.hansard.session) except Politician.DoesNotExist: print "WARNING: Couldn't find politician for ID %d" % parlwebid if pol is not None: t['member'] = ElectedMember.objects.get_by_pol(politician=pol, date=self.date) t['politician'] = pol c = c.next if not parsetools.isString(c): raise Exception("Expecting string in b for member name") t['member_title'] = c.strip() #print c if t['member_title'].endswith(':'): # Remove colon in e.g. Some hon. members: t['member_titl
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import unittest from google.cloud import bigquery from google.cloud.bigquery.job import ExtractJobConfig, DestinationFormat from google.api_core import exceptions from kfp_component.google.bigquery import query CREATE_JOB_MODULE = 'kfp_component.google.bigquery._query' @mock.patch(CREATE_JOB_MODULE + '.display.display') @mock.patch(CREATE_JOB_MODULE + '.gcp_common.dump_file') @mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext') @mock.patch(CREATE_JOB_MODULE + '.bigquery.Client') class TestQuery(unittest.TestCase): def test_query_succeed(self, mock_client, mock_kfp_context, mock_dump_json, mock_display): mock_kfp_context().__enter__().context_id.return_value = 'ctx1' mock_client().get_job.side_effect = exceptions.NotFound('not found') mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1') mock_client().dataset.return_value = mock_dataset mock_client().get_dataset.side_effect = exceptions.NotFound('not found') mock_response = { 'configuration': { 'query': { 'query': 'SELECT * FROM table_1' } } } mock_client().query.return_value.to_api_repr.return_value = mock_response result = query('SELECT * FROM table_1', 'project-1', 'dataset-1', output_gcs_path='gs://output/path') self.assertEqual(mock_response, result) mock_client().create_dataset.assert_called() expected_job_config = bigquery.QueryJobConfig() expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE expected_job_config.destination = mock_dataset.table('query_ctx1') mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY, job_id = 'query_ctx1') actual_job_config = mock_client().query.call_args_list[0][0][1] self.assertDictEqual( expected_job_config.to_api_repr(), actual_job_config.to_api_repr() ) extract = mock_client().extract_table.call_args_list[0] self.assertEqual(extract[0], (mock_dataset.table('query_ctx1'), 'gs://output/path',)) self.assertEqual(extract[1]["job_config"].destination_format, "CSV",) def test_query_no_output_path(self, mock_client, mock_kfp_context, mock_dump_json, mock_display): mock_kfp_context().__enter__().context_id.return_value =
'ctx1' mock_client().get_job.side_effect = exceptions.NotFound('not found') mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1') mock_client().dataset.return_value = mock_dataset mock_client().get_dataset.return_value = bigquery.Dataset(mock_dataset) mock_response = { 'configuration': { 'query': {
'query': 'SELECT * FROM table_1' } } } mock_client().query.return_value.to_api_repr.return_value = mock_response result = query('SELECT * FROM table_1', 'project-1', 'dataset-1', 'table-1') self.assertEqual(mock_response, result) mock_client().create_dataset.assert_not_called() mock_client().extract_table.assert_not_called() expected_job_config = bigquery.QueryJobConfig() expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE expected_job_config.destination = mock_dataset.table('table-1') mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY, job_id = 'query_ctx1') actual_job_config = mock_client().query.call_args_list[0][0][1] self.assertDictEqual( expected_job_config.to_api_repr(), actual_job_config.to_api_repr() ) def test_query_output_json_format(self, mock_client, mock_kfp_context, mock_dump_json, mock_display): mock_kfp_context().__enter__().context_id.return_value = 'ctx1' mock_client().get_job.side_effect = exceptions.NotFound('not found') mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1') mock_client().dataset.return_value = mock_dataset mock_client().get_dataset.side_effect = exceptions.NotFound('not found') mock_response = { 'configuration': { 'query': { 'query': 'SELECT * FROM table_1' } } } mock_client().query.return_value.to_api_repr.return_value = mock_response result = query('SELECT * FROM table_1', 'project-1', 'dataset-1', output_gcs_path='gs://output/path', output_destination_format="NEWLINE_DELIMITED_JSON") self.assertEqual(mock_response, result) mock_client().create_dataset.assert_called() extract = mock_client().extract_table.call_args_list[0] self.assertEqual(extract[0], (mock_dataset.table('query_ctx1'), 'gs://output/path',)) self.assertEqual(extract[1]["job_config"].destination_format, "NEWLINE_DELIMITED_JSON",)
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range from ansible import errors from ansible import utils import shlex import re import ast class InventoryParser(object): """ Host inventory for ansible. """ def __init__(self, filename=C.DEFAULT_HOST_LIST): with open(filename) as fh: self.lines = fh.readlines() self.groups = {} self.hosts = {} self._parse() def _parse(self): self._parse_base_groups() self._parse_group_children() self._add_allgroup_children() self._parse_group_variables() return self.groups @staticmethod def _parse_value(v): if "#" not in v: try: return ast.literal_eval(v) # Using explicit exceptions. # Likely a string that literal_eval does not like. We wil then just set it. except ValueError: # For some reason this was thought to be malformed. pass except SyntaxError: # Is this a hash with an equals at the end? pass return v # [webservers] # alpha # beta:2345 # gamma sudo=True user=root # delta asdf=jkl favcolor=red def _add_allgroup_children(self): for group in self.groups.values(): if group.depth == 0 and group.name != 'all': self.groups['all'].add_child_group(group) def _parse_base_groups(self): # FIXME: refactor ungrouped = Group(name='ungrouped') all = Group(name='all') all.add_child_group(ungrouped) self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' for line in self.lines: line = utils.before_comment(line).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: active_group_name = active_group_name.rsplit(":", 1)[0] if active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) active_group_name = None elif active_group_name not in self.groups: new_group = self.groups[active_group_name] = Group(name=active_group_name) elif line.startswith(";") or line == '': pass elif active_group_name: tokens = shlex.split(line) if len(tokens) == 0: continue hostname = tokens[0] port = C.DEFAULT_REMOTE_PORT # Three cases to check: # 0. A hostname that contains a range pesudo-code and a port # 1. A hostname that contains just a port if hostname.count(":") > 1: # Possible an IPv6 address, or maybe a host line with multiple ranges # IPv6 with Port XXX:XXX::XXX.port # FQDN foo.example.com if hostname.count(".") == 1: (hostname, port) = hostname.rsplit(".", 1) elif ("[" in hostname and "]" in hostname and ":" in hostname and (hostname.rindex("]") < hostname.rindex(":")) or ("]" not in hostname and ":" in hostname)): (hostname, port) = hostname.rsplit(":", 1) hostnames = [] if detect_range(hostname): hostnames = expand_hostname_range(hostname) else: hostnames = [hostname] for hn in hostnames: host = None if hn in self.hosts: host = self.hosts[hn]
else: host = Host(name=hn, port=port) self.hosts[hn] = host if len(tokens) > 1: for t in tokens[1:]: if t.startswith('#'): break try: (k,v) = t.split("=", 1) except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e))) host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) # [southeast:children] # atlanta # raleigh def _parse_group_children(self): group = None for line in self.lines: line = line.strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: line = line.replace("[","").replace(":children]","") group = self.groups.get(line, None) if group is None: group = self.groups[line] = Group(name=line) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): group = None elif group: kid_group = self.groups.get(line, None) if kid_group is None: raise errors.AnsibleError("child group is not defined: (%s)" % line) else: group.add_child_group(kid_group) # [webservers:vars] # http_port=1234 # maxRequestsPerChild=200 def _parse_group_variables(self): group = None for line in self.lines: line = line.strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: raise errors.AnsibleError("can't add vars to undefined group: %s" % line) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): group = None elif line == '': pass elif group: if "=" not in line: raise errors.AnsibleError("variables assigned to group must be in key=value form") else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) def get_host_variables(self, host): return {}
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2016 Eugene Frolov <eugene@frolov.net.ru> # # All Rights Reserved. # # License
d under the Apache License, Version 2.0 (the "Lic
ense"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import sqlalchemy as sa from sqlalchemy import orm _engine = None _session_maker = None DB_CONNECTION = "sqlite:////tmp/restalchemy-%s.db" % uuid.uuid4() def get_engine(): global _engine if _engine is None: _engine = sa.create_engine(DB_CONNECTION, echo=True) return _engine def get_session(): return orm.sessionmaker(bind=get_engine())
e GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import datetime import time from osv import fields, osv from tools.translate import _ import tools from tools import ustr class gap_analysis(osv.Model): _inherit = "gap_analysis" _name = "gap_analysis" def generate_project(self, cr, uid, ids, context=None): project_pool = self.pool.get('project.project') task_pool = self.pool.get('project.task') for gap in self.browse(cr, uid, ids, context=context): partner_id = gap.partner_id and gap.partner_id.id or False notes = gap.note or '' project_vals = { 'name': gap.name, 'description': notes, 'user_id': gap.user_id.id, 'partner_id': partner_id, 'gap_analysis_id': gap.id, } project_id = project_pool.create(cr, uid, project_vals, context=context) for gap_line in gap.gap_lines: if gap_line.to_project and gap_line.keep: time4dev = 0 time4tech = 0 time4fct = 0 time4test = gap_line.testing or 0 if gap_line.effort: if gap_line.effort.unknown: time4dev = gap_line.duration_wk else: time4dev = gap_line.effort.duration for workload in gap_line.workloads: if workload.type.category == "Technical Analysis": time4tech += workload.duration else: time4fct += workload.duration # Create Tasks if time4dev > 0 or time4tech > 0 or time4fct > 0 or time4test > 0: maintask_vals = { 'name': gap_line.functionality.name[0:100], 'code_gap': gap_line.code or "", 'project_id': project_id, 'notes': ustr(gap_line.functionality.description or gap_line.functionality.name), 'partner_id': partner_id, 'gap_category_id': gap_line.category and gap_line.category.id or False, 'user_id': gap.user_functional and gap.user_functional.id or False, 'gap_line_id': gap_line.id, 'to_report': True, 'org_planned_hours': 0, 'planned_hours': 0, 'remaining_hours': 0, } maintask_id = task_pool.create(cr, uid, maintask_vals, context=context) maintask_id = [int(maintask_id)] if time4test > 0: task_vals4test = {
'name': gap_line.functionality.name[0:100] + " [TEST]",
'code_gap': gap_line.code or "", 'project_id': project_id, 'notes': ustr(gap_line.functionality.description or gap_line.functionality.name), 'partner_id': partner_id, 'org_planned_hours': time4test, 'planned_hours': time4test, 'remaining_hours': time4test, 'parent_ids': [(6,0,maintask_id)], 'gap_category_id': gap_line.category and gap_line.category.id or False, 'user_id': gap.user_test and gap.user_test.id or False, 'gap_line_id': gap_line.id, } task_pool.create(cr, uid, task_vals4test, context=context) if time4dev > 0: task_vals4dev = { 'name': gap_line.functionality.name[0:100] + " [DEV]", 'code_gap': gap_line.code or "", 'project_id': project_id, 'notes': ustr(gap_line.functionality.description or gap_line.functionality.name), 'partner_id': partner_id, 'org_planned_hours': time4dev, 'planned_hours': time4dev, 'remaining_hours': time4dev, 'parent_ids': [(6,0,maintask_id)], 'gap_category_id': gap_line.category and gap_line.category.id or False, 'user_id': gap.user_dev and gap.user_dev.id or False, 'gap_line_id': gap_line.id, } task_pool.create(cr, uid, task_vals4dev, context=context) if time4tech > 0: task_vals4tech = { 'name': gap_line.functionality.name[0:100] + " [TECH]", 'code_gap': gap_line.code or "", 'project_id': project_id, 'notes': ustr(gap_line.functionality.description or gap_line.functionality.name), 'partner_id': partner_id, 'org_planned_hours': time4tech, 'planned_hours': time4tech, 'remaining_hours': time4tech, 'parent_ids': [(6,0,maintask_id)], 'gap_category_id': gap_line.category and gap_line.category.id or False, 'user_id': gap.user_technical and gap.user_technical.id or False, 'gap_line_id': gap_line.id, } task_pool.create(cr, uid, task_vals4tech, context=context) if time4fct > 0: task_vals4fct = { 'name': gap_line.functionality.name[0:100] + " [FUNC]", 'code_gap': gap_line.code or "", 'project_id': project_id, 'notes': ustr(gap_line.functionality.description or gap_line.functionality.name), 'partner_id': partner_id, 'org_planned_hours': time4fct, 'planned_hours': time4fct, 'remaining_hours': time4fct, 'parent_ids': [(6,0,maintask_id)], 'gap_category_id': gap_line.functionality.category and gap_line.functionality.category.id or False, 'user_id': gap.user_functional and gap.user_functional.id or False, 'gap_line_id': gap_line.id, } task_pool.create(cr, uid, task_vals4fct, context=context) if project_id: return { 'type': 'ir.actions.act_window', 'name':"Generated Project", 'view_mode': 'form', 'view_id': False, 'view_type': 'form', 'res_model': 'project.pro
#!/usr/bin/env python """Contains the Data Model for a cool Resource. """ __author__ = "Sanjay Joshi" __copyright__ = "IBM Copyright 2017" __credits__ = ["Sanjay Joshi"] __license__ = "Apache 2.0" __version__ = "1.0" __mai
ntainer__ = "Sanjay Joshi" __email__ = "joshisa@us.ibm.com" __status__ = "Prototype" schema = { 'url': 'corpora/ada_diabetes/concepts', 'schema': { 'cloudhost': { 'type': 'string', 'default': 'Powered by IBM Bluemix and Python Eve' },
'base16': { 'type': 'string', 'default': '######' }, 'hex': { 'type': 'string', 'default': '##-##-##' }, 'organization': { 'type': 'string', 'default': 'Doh!MissingOrg' } }, 'allow_unknown': True }
import astra def gpu_fp(pg, vg, v): v_id = astra.data2d.create('-vol', vg, v) rt_id = astra.data2d.create('-sino', pg) fp_cfg = astra.astra_dict('FP_CUDA') fp_cfg['VolumeDataId'] = v_id fp_cfg['ProjectionDataId'] = rt_id fp_id = astra.algorithm.create(fp_cfg) astra.algorithm.run(fp_id) out = astra.data2d.get(rt_id) astra.algorithm.delete(fp_id) astra.data2d.delete(rt_id) astra.data2d.delete(v_id) return out def gpu_bp(pg, vg, rt, supersampling=1): v_id = astra.data2d.create('-vol', vg) rt_id = astra.data2d.create('-sino', pg, data=rt) bp_cfg = astra.astra_dict('BP_CUDA') bp_cfg['ReconstructionDataId'] = v_id bp_cfg['ProjectionDataId'] = rt_id bp_id = astra.algorithm.create(bp_cfg) astra.algorithm.run(bp_id) out = astra.data2d.get(v_id) astra.algorithm.delete(bp_id) astra.data2d.delete(rt_id) astra.data2d.delete(v_id) return out def gpu_fbp(pg, vg, rt): rt_id = astra.data2d.create('-sino', pg, data=rt) v_id = astra.data2d.create('-vol', vg) fbp_cfg = astra.astra_dict('FBP_CUDA') fbp_cfg['ReconstructionDataId'] = v_id fbp_cfg['ProjectionDataId'] = rt_id #fbp_cfg['FilterType'] = 'none' fbp_id = astra.algorithm.create(fbp_cfg) astra.algorithm.run(fbp_id, 100) out = astra.data2d.get(v_id) astra.algorithm.delete(fbp_id) astra.data2d.delete(rt_id) astra.data2d.delete(v_id) return out def gpu_sirt(pg, vg, rt, n_iters=100): rt_id = astra.data2d.create('-sino', pg, data=rt) v_id = astra.data2d.create('-vol', vg) sirt_cfg = astra.astra_dict('SIRT_CUDA') sirt_cfg['ReconstructionDataId'] = v_id sirt_cfg['ProjectionDataId'] = rt_id #sirt_cfg['option'] = {} #sirt_cfg['option']['MinConstraint'] = 0 sirt_id = astra.algorithm.create(sir
t_cfg) astra.algorithm.run(sirt_id, n_iters) out = astra.data2d.get(v_id) astra.algorithm.delete(sirt_id) astra.data2d.
delete(rt_id) astra.data2d.delete(v_id) return out
# Copyright 2019 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Locates imports that violate cirq's submodule dependencies. Specifically, this test treats the modules as a tree structure where `cirq` is the root, each submodule is a node and each python file is a leaf node. While a node (module) is in the process of being imported, it is not allowed to import nodes for the first time other than its children. If a module was imported earlier by `cirq.__init__`, it may be imported. This is currently only enforced for the first level of submodules under cirq, not sub-submodules. Usage: dev_tools/import_test.py [-h] [--time] [--others] optional arguments: -h, --help show this help message and exit --time print a report of the modules that took the longest to import --others also track packages other than cirq and print when they are imported """ from typing import List import argparse import collections import os.path import subprocess import sys import time parser = argparse.ArgumentParser( description="Locates imports that violate cirq's submodule dependencies." ) parser.add_argument( '--time', action='store_true', help='print a report of the modules that took the longest to import', ) parser.add_argument( '--others', action='store_true', help='also track packages other than cirq and print when they are imported', ) def verify_import_tree(depth: int = 1, track_others: bool = False, timeit: bool = False) -> bool: """Locates imports that violate cirq's submodule dependencies by instrumenting python import machinery then importing cirq. Logs when each submodule (up to the given depth) begins and ends executing during import and prints an error when any import within a submodule causes a neighboring module to be imported for the first time. The indent pattern of the printed output will match the module tree structure if the imports are all valid. Otherwise an error is printed indicating the location of the invalid import. Output for valid imports: Start cirq ... Start cirq.study End cirq.study Start cirq.circuits End cirq.circuits Start cirq.schedules End cirq.schedules ... End cirq Output for an invalid import in `cirq/circuits/circuit.py`: Start cirq ... Start cirq.study End cirq.study Start cirq.circuits ERROR: cirq.circuits.circuit imported cirq.vis Start cirq.vis End cirq.vis ... # Possibly more errors caused by the first. End cirq.circuits Start cirq.schedules End cirq.schedules ... End cirq Invalid import: cirq.circuits.circuit imported cirq.vis Args: depth: How deep in the module tree to verify. If depth is 1, verifies that submodules of cirq like cirq.ops doesn't import cirq.circuit. If depth is 2, verifies that submodules and sub-submodules like cirq.ops.raw_types doesn't import cirq.ops.common_gates or cirq.circuit. track_others: If True, logs where cirq first imports an external package in addition to logging when cirq modules are imported. timeit: Measure the import time of cirq and each submodule and print a report of the worst. Includes times for external packages used by cirq if `track_others` is True. Returns: True is no import issues, False otherwise. """ fail_list = [] start_times = {} load_times = {} current_path: List[str] = [] currently_running_paths: List[List[str]] = [[]] import_depth = 0 indent = ' ' * 2 def wrap_module(module): nonlocal import_depth start_times[module.__name__] = time.perf_counter() path = module.__name__.split('.') if path[0] != 'cirq': if len(path) == 1: print(f'{indent * import_depth}Other {module.__name__}') return module currently_running_paths.append(path) if len(path) == len(current_path) + 1 and path[:-1] == current_path: # Move down in tree current_path.append(path[-1]) else: # Jump somewhere else in the tree handle_error(currently_running_paths[-2], path) current_path[:] = path if len(path) <= depth + 1: print(f'{indent * import_depth}Start {module.__name__}') import_depth += 1 return module def after_exec(module): nonlocal import_depth load_times[module.__name__] = time.perf_counter() - start_times[module.__name__] path = module.__name__.split('.') if path[0] != 'cirq': return assert path == currently_running_paths.pop(), 'Unexpected import state' if len(path) <= depth + 1: import_depth -= 1 print(f'{indent * import_depth}End {module.__name__}') if path == current_path: # No submodules were here current_path.pop() elif len(path) == len(current_path) - 1 and
path == current_path[:-1]: # Move up in tree current_path.pop() else: # Jump somewhere else in the tree current_path[:] = path[:-1] def handle_error(import_from, import_to): if import_from[: depth + 1] != import_to[: depth + 1]:
msg = f"{'.'.join(import_from)} imported {'.'.join(import_to)}" fail_list.append(msg) print(f'ERROR: {msg}') # Import wrap_module_executions without importing cirq orig_path = list(sys.path) project_dir = os.path.dirname(os.path.dirname(__file__)) cirq_dir = os.path.join(project_dir, 'cirq') sys.path.append(cirq_dir) # Put cirq/_import.py in the path. from cirq._import import wrap_module_executions # type: ignore sys.path[:] = orig_path # Restore the path. sys.path.append(project_dir) # Ensure the cirq package is in the path. # note that with the cirq.google injection we do change the metapath with wrap_module_executions('' if track_others else 'cirq', wrap_module, after_exec, False): # Import cirq with instrumentation import cirq # pylint: disable=unused-import sys.path[:] = orig_path # Restore the path. if fail_list: print() # Only print the first because later errors are often caused by the # first and not as helpful. print(f'Invalid import: {fail_list[0]}') if timeit: worst_loads = collections.Counter(load_times).most_common(15) print() print('Worst load times:') for name, dt in worst_loads: print(f'{dt:.3f} {name}') return not fail_list FAIL_EXIT_CODE = 65 def test_no_circular_imports(): """Runs the test in a subprocess because cirq has already been imported before in an earlier test but this test needs to control the import process. """ status = subprocess.call([sys.executable, __file__]) if status == FAIL_EXIT_CODE: # coverage: ignore raise Exception('Invalid import. See captured output for details.') elif status != 0: # coverage: ignore raise RuntimeError('Error in subprocess') if __name__ == '__main__': args = parser.parse_args() success = verify_import_tree(track_others=args.others, timeit=args.time) sys.exit(0 if success else FAIL_EXIT_CODE)
from flask import jsonify from flask_sqlalchemy import SQLAlchemy from sqlalchemy import Table, Column, Integer, ForeignKey from src.webservice.base import Base from src.webservice._action import Action db = SQLAlchemy() Base.query = db.session.query_property() class Input(Base): __tablename__ = 'tbl_InputPin' id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer()) name = db.Column(db.String(50)) parent_id = db.Column(db.Integer, db.ForeignKey('tbl_Arduino.id')) parent = db.relationship('Device') time_between_clicks = db.Column(db.Float(50)) actions = db.relationship("Action", secondary="tbl_InputPin_Action") @staticmethod def get_all_inputs(): inputs = Input.query.outerjoin(Input.actions).all()
output = [] for input in inputs: actions = [] actions_id = [] for action in input.actions: actions.append(action.name) actions_id.append(action.id) input_data = {'id': input.id, 'name': input.name, 'device_name': input.parent.name, 'actions_id': actions_id, 'number': input.number, 'time
_between_clicks': input.time_between_clicks, 'actions': actions} output.append(input_data) db.session.commit() return jsonify({'response': output}) @staticmethod def update_input(request): data = request.get_json() input = db.session.query(Input).filter_by(id=data['id']).first() if 'name' in data: input.name = data['name'] if 'time_between_clicks' in data: input.time_between_clicks = data['time_between_clicks'] if 'actions_id': actions = Action.get_actions(data['actions_id']) input.actions = actions db.session.commit() return jsonify({'result': 'User has been changed'})
from distutils.core import setup setup( name='Chroma', version='0.2.0', author='Seena Burns', author_email='hello@seenaburns.com', url='https://github.com/seenaburns/Chroma', license=open('LICENSE.txt').read(), description='Color handling made simple.', long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(), packages=['chroma'], classifiers=( 'Development Status :: 5 - Production/Stable', 'Intende
d Audience :: Dev
elopers', 'Natural Language :: English', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7' ), )
self.data)) else: raise ValueError( "must specify beta0 or provide an estimater with the model" ) else: self.beta0 = _conv(beta0) self.delta0 = _conv(delta0) # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit # platforms. # XXX: some other FORTRAN compilers may not agree. self.ifixx = _conv(ifixx, dtype=numpy.int32) self.ifixb = _conv(ifixb, dtype=numpy.int32) self.job = job self.iprint = iprint self.errfile = errfile self.rptfile = rptfile self.ndigit = ndigit self.taufac = taufac self.sstol = sstol
self.partol = partol self.maxit = maxit self.stpb = _conv(stpb) self.stpd = _conv(st
pd) self.sclb = _conv(sclb) self.scld = _conv(scld) self.work = _conv(work) self.iwork = _conv(iwork) self.output = None self._check() def _check(self): """ Check the inputs for consistency, but don't bother checking things that the builtin function odr will check. """ x_s = list(self.data.x.shape) if isinstance(self.data.y, numpy.ndarray): y_s = list(self.data.y.shape) if self.model.implicit: raise odr_error("an implicit model cannot use response data") else: # implicit model with q == self.data.y y_s = [self.data.y, x_s[-1]] if not self.model.implicit: raise odr_error("an explicit model needs response data") self.set_job(fit_type=1) if x_s[-1] != y_s[-1]: raise odr_error("number of observations do not match") n = x_s[-1] if len(x_s) == 2: m = x_s[0] else: m = 1 if len(y_s) == 2: q = y_s[0] else: q = 1 p = len(self.beta0) # permissible output array shapes fcn_perms = [(q, n)] fjacd_perms = [(q, m, n)] fjacb_perms = [(q, p, n)] if q == 1: fcn_perms.append((n,)) fjacd_perms.append((m, n)) fjacb_perms.append((p, n)) if m == 1: fjacd_perms.append((q, n)) if p == 1: fjacb_perms.append((q, n)) if m == q == 1: fjacd_perms.append((n,)) if p == q == 1: fjacb_perms.append((n,)) # try evaluating the supplied functions to make sure they provide # sensible outputs arglist = (self.beta0, self.data.x) if self.model.extra_args is not None: arglist = arglist + self.model.extra_args res = self.model.fcn(*arglist) if res.shape not in fcn_perms: print res.shape print fcn_perms raise odr_error("fcn does not output %s-shaped array" % y_s) if self.model.fjacd is not None: res = self.model.fjacd(*arglist) if res.shape not in fjacd_perms: raise odr_error( "fjacd does not output %s-shaped array" % (q, m, n)) if self.model.fjacb is not None: res = self.model.fjacb(*arglist) if res.shape not in fjacb_perms: raise odr_error( "fjacb does not output %s-shaped array" % (q, p, n)) # check shape of delta0 if self.delta0 is not None and self.delta0.shape != self.data.x.shape: raise odr_error( "delta0 is not a %s-shaped array" % self.data.x.shape) def _gen_work(self): """ Generate a suitable work array if one does not already exist. """ n = self.data.x.shape[-1] p = self.beta0.shape[0] if len(self.data.x.shape) == 2: m = self.data.x.shape[0] else: m = 1 if self.model.implicit: q = self.data.y elif len(self.data.y.shape) == 2: q = self.data.y.shape[0] else: q = 1 if self.data.we is None: ldwe = ld2we = 1 elif len(self.data.we.shape) == 3: ld2we, ldwe = self.data.we.shape[1:] else: # Okay, this isn't precisely right, but for this calculation, # it's fine ldwe = 1 ld2we = self.data.we.shape[1] if self.job % 10 < 2: # ODR not OLS lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) else: # OLS not ODR lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + 5*q + q*(p+m) + ldwe*ld2we*q) if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\ and self.work.dtype.str.endswith('f8'): # the existing array is fine return else: self.work = numpy.zeros((lwork,), float) def set_job(self, fit_type=None, deriv=None, var_calc=None, del_init=None, restart=None): """ Sets the "job" parameter is a hopefully comprehensible way. If an argument is not specified, then the value is left as is. The default value from class initialization is for all of these options set to 0. Parameters ---------- fit_type : {0, 1, 2} int 0 -> explicit ODR 1 -> implicit ODR 2 -> ordinary least-squares deriv : {0, 1, 2, 3} int 0 -> forward finite differences 1 -> central finite differences 2 -> user-supplied derivatives (Jacobians) with results checked by ODRPACK 3 -> user-supplied derivatives, no checking var_calc : {0, 1, 2} int 0 -> calculate asymptotic covariance matrix and fit parameter uncertainties (V_B, s_B) using derivatives recomputed at the final solution 1 -> calculate V_B and s_B using derivatives from last iteration 2 -> do not calculate V_B and s_B del_init : {0, 1} int 0 -> initial input variable offsets set to 0 1 -> initial offsets provided by user in variable "work" restart : {0, 1} int 0 -> fit is not a restart 1 -> fit is a restart Notes ----- The permissible values are different from those given on pg. 31 of the ODRPACK User's Guide only in that one cannot specify numbers greater than the last value for each variable. If one does not supply functions to compute the Jacobians, the fitting procedure will change deriv to 0, finite differences, as a default. To initialize the input variable offsets by yourself, set del_init to 1 and put the offsets into the "work" variable correctly. """ if self.job is None: job_l = [0, 0, 0, 0, 0] else: job_l = [self.job / 10000 % 10, self.job / 1000 % 10, self.job / 100 % 10, self.job / 10 % 10, self.job % 10] if fit_type in (0, 1, 2): job_l[4] = fit_type if deriv in (0, 1, 2, 3): job_l[3] = deriv if var_calc in (0, 1, 2): job_l[2] = var_calc if del_init in (0, 1): job_l[1] = del_init if restart in (0, 1): job_l[0] = restart self.job = (job_l[0]*10000 + job_l[1]*1000 + job_l[2]*100 + job_l[3]*10 + job_l[4]) def set_iprint(self, init=None, so_init=None, iter=None, so_iter=None, iter_step=None, final=None, so_final=None): """ Set the iprint parameter for the printing of computation reports. If any of the arguments are specified here, then they are set in the iprint member. If iprint is not set manually or with this method, then ODRPACK defaults to no printing. If no filename is specified wi
# -*- encoding: utf-8 -*- ################################################################################ # # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # # # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU Affero General Public License as publ
ished by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ import clv_medicament_template_wkf
# # [The "BSD license"] # Copyright (c) 2012 Terence Parr # Copyright (c) 2012 Sam Harwell # Copyright (c) 2014 Eric Vergnaud # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #/ # A DFA walker that knows how to dump them to serialized strings.#/ from io import StringIO from antlr4.Utils import str_list class DFASerializer(object): def __init__(self, dfa, literalNames=None, symbolicNames=None): self.dfa = dfa self.literalNames = literalNames self.symbolicNames = symbolicNames def __str__(self): return unicode(self) def __unicode__(self): if self.dfa
.s0 is None: return None with StringIO() as buf: for s in self.dfa.sortedStates(): n = 0 if s.edges is not None: n = len(s.edges) for i in range(0, n): t = s.edges[i] if t is not None and t.stateNumber != 0x7FFFFFFF: buf.write(self.getStateString(s)) label = self.getEdgeLabel(i)
buf.write(u"-") buf.write(label) buf.write(u"->") buf.write(self.getStateString(t)) buf.write(u'\n') output = buf.getvalue() if len(output)==0: return None else: return output def getEdgeLabel(self, i): if i==0: return u"EOF" if self.literalNames is not None and i<=len(self.literalNames): return self.literalNames[i-1] elif self.symbolicNames is not None and i<=len(self.symbolicNames): return self.symbolicNames[i-1] else: return unicode(i-1) def getStateString(self, s): n = s.stateNumber baseStateStr = ( u":" if s.isAcceptState else u"") + u"s" + unicode(n) + \ ( u"^" if s.requiresFullContext else u"") if s.isAcceptState: if s.predicates is not None: return baseStateStr + u"=>" + str_list(s.predicates) else: return baseStateStr + u"=>" + unicode(s.prediction) else: return baseStateStr class LexerDFASerializer(DFASerializer): def __init__(self, dfa): super(LexerDFASerializer, self).__init__(dfa, None) def getEdgeLabel(self, i): return u"'" + unichr(i) + u"'"
import requests import json class DisqusAPI(object): """ Lightweight solution to make API calls to Disqus: More info: https://disqus.com/api/docs """ def __init__(self, api_key,
api_secret, version='3.0', formats='json' ): self.api_key = api_key self.api_secret = api_secret self.version = version self.formats = formats def get(s
elf, method, **kwargs): """ Make get requests to retrieve data from Disqus """ endpoint = 'https://disqus.com/api/{version}/{method}.{formats}' url = endpoint.format( version=self.version, method=method.replace('.', '/'), formats=self.formats ) kwargs.update({ 'api_key': self.api_key, 'api_secret': self.api_secret, }) response = requests.get(url, params=kwargs) # TODO: support other formats like rss if self.formats == 'json': return json.loads(response.content.decode())
#!/usr/bin/env python2 # Copyright (c) 2015 The Aureus Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test mulitple rpc user config option rpcauth # from test_framework.test_framework import AureusTestFramework from test_framework.util import * import base64 try: import http.client as httplib except ImportError: import httplib try: import urllib.parse as urlparse except ImportError: import urlparse class HTTPBasicsTest (AureusTestFramework): def setup_nodes(self): return start_nodes(4, self.options.tmpdir) def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain(self.options.tmpdir) #Append rpcauth to aureus.conf before initialization rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" with open(os.path.join(self.options.tmpdir+"/node0", "aureus.conf"), 'a') as f: f.write(rpcauth+"\n") f.write(rpcauth2+"\n") def run_test(self): ################################################## # Check correctness of the rpcauth config option # ################################################## url = urlparse.urlparse(self.nodes[0].url) #Old authpair authpair = url.username + ':' + url.password #New authpair generated via share/rpcuser tool rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" #Second authpair with different username rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" authpairnew = "rt:"+password headers = {"Authorization": "Basic " + base64.b64encode(authpair)} conn = httplib.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}
', headers) resp = conn.getresponse() assert_equal(resp.status==401, False) conn.close() #Use new authpair to confirm both work headers = {"Authorization": "Basic " +
base64.b64encode(authpairnew)} conn = httplib.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status==401, False) conn.close() #Wrong login name with rt's password authpairnew = "rtwrong:"+password headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} conn = httplib.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status==401, True) conn.close() #Wrong password for rt authpairnew = "rt:"+password+"wrong" headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} conn = httplib.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status==401, True) conn.close() #Correct for rt2 authpairnew = "rt2:"+password2 headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} conn = httplib.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status==401, False) conn.close() #Wrong password for rt2 authpairnew = "rt2:"+password2+"wrong" headers = {"Authorization": "Basic " + base64.b64encode(authpairnew)} conn = httplib.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status==401, True) conn.close() if __name__ == '__main__': HTTPBasicsTest ().main ()
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>) # Eric Antones <eantones@nuobit.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) from odoo import fields, models class ResPartner(models.Model): _inherit = "res.partner" sale_journal_id = fields.Many2one( "account.journal", "Default journal", domain
=[("type", "=", "sale")] ) purchase_journal_id = fields.Many2one( "account.journal", "Default journal", domain=[("type", "=", "
purchase")] )
uery_qualities",)) # reset qual b = self.buildRead() # check flags: for x in ( "is_paired", "is_proper_pair", "is_unmapped", "mate_is_unmapped", "is_reverse", "mate_is_reverse", "is_read1", "is_read2", "is_secondary", "is_qcfail", "is_duplicate", "is_supplementary"): setattr(b, x, True) self.assertEqual(getattr(b, x), True) checkFieldEqual(self, a, b, ("flag", x,)) setattr(b, x, False) self.assertEqual(getattr(b, x), False) checkFieldEqual(self, a, b) def testUpdate2(self): '''issue 135: inplace update of sequence and quality score. This does not work as setting the sequence will erase the quality scores. ''' a = self.buildRead() a.query_sequence = a.query_sequence[5:10] self.assertEqual(pysam.toQualityString(a.query_qualities), None) a = self.buildRead() s = pysam.toQualityString(a.query_qualities) a.query_sequence = a.query_sequence[5:10] a.query_qualities = pysam.fromQualityString(s[5:10]) self.assertEqual(pysam.toQualityString(a.query_qualities), s[5:10]) def testLargeRead(self): '''build an example read.''' a = pysam.AlignedSegment() a.query_name = "read_12345" a.query_sequence = "ACGT" * 200 a.flag = 0 a.reference_id = 0 a.reference_start = 20 a.mapping_quality = 20 a.cigartuples = ((0, 4 * 200), ) a.next_reference_id = 0 a.next_reference_start = 200 a.template_length = 167 a.query_qualities = pysam.fromQualityString("1234") * 200 return a def testUpdateTlen(self): '''check if updating tlen works''' a = self.buildRead() oldlen = a.template_length oldlen *= 2 a.template_length = oldlen self.assertEqual(a.template_length, oldlen) def testPositions(self): a = self.buildRead() self.assertEqual(a.get_reference_positions(), [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59]) self.assertEqual(a.get_aligned_pairs(), [(0, 20), (1, 21), (2, 22), (3, 23), (4, 24), (5, 25), (6, 26), (7, 27), (8, 28), (9, 29), (None, 30), (10, 31), (11, 32), (12, 33), (13, 34), (14, 35), (15, 36), (16, 37), (17, 38), (18, 39), (19, None), (20, 40), (21, 41), (22, 42), (23, 43), (24, 44), (25, 45), (26, 46), (27, 47), (28, 48), (29, 49), (30, 50), (31, 51), (32, 52), (33, 53), (34, 54), (35, 55), (36, 56), (37, 57), (38, 58), (39, 59)]) self.assertEqual( a.get_reference_positions(), [x[1] for x in a.get_aligned_pairs() if x[0] is not None and x[1] is not None]) # alen is the length of the aligned read in genome self.assertEqual(a.reference_length, a.get_aligned_pairs()[-1][0] + 1) # aend points to one beyond last aligned base in ref self.assertEqual(a.get_reference_positions()[-1], a.reference_end - 1) def testFullReferencePositions(self): '''see issue 26''' a = self.buildRead() a.cigar = [(4, 30), (0, 20), (1, 3), (0, 47)] self.assertEqual(100, len(a.get_reference_positions(full_length=True))) def testBlocks(self): a = self.buildRead() self.assertEqual(a.get_blocks(), [(20, 30), (31, 40), (40, 60)]) def test_get_aligned_pairs_soft_clipping(self): a = pysam.AlignedSegment() a.query_name = "read_12345" a.query_sequence = "ACGT" * 10 a.flag = 0 a.reference_id = 0 a.reference_start = 20 a.mapping_quality = 20 a.cigartuples = ((4, 2), (0, 35), (4, 3)) a.query_qualities = pysam.fromQualityString("1234") * 10 self.assertEqual(a.get_aligned_pairs(), [(0, None), (1, None)] + [(qpos, refpos) for (qpos, refpos) in zip( range(2, 2 + 35), range(20, 20 + 35))] + [(37, None), (38, None), (39, None)] ) self.assertEqual(a.get_aligned_pairs(True), # [(0, None), (1, None)] + [(qpos, refpos) for (qpos, refpos) in zip( range(2, 2 + 35), range(20, 20 + 35))] # [(37, None), (38, None), (39, None)] ) def test_get_aligned_pairs_hard_clipping(self): a = pysam.AlignedSegment() a.query_name = "read_12345" a.query_sequence = "ACGT" * 10 a.flag = 0 a.reference_id = 0 a.reference_start = 20 a.mapping_quality = 20 a.cigartuples = ((5, 2), (0, 35), (5, 3)) a.query_qualities = pysam.fromQualityString("1234") * 10 self.assertEqual(a.get_aligned_pairs(), # No seq, no seq pos [(qpos, refpos) for (qpos, refpos) in zip( range(0, 0 + 35), range(20, 20 + 35))]) self.assertEqual(a.get_aligned_pairs(True), [(qpos, refpos) for (qpos, refpos) in
zip( range(0, 0 + 35), range(20, 20 + 35))]) def test_get_aligned_pairs_skip(self): a = pysam.AlignedSegment() a.query_name = "read_12345" a.query_sequence = "ACGT" * 10 a.flag = 0 a.reference_id = 0 a.reference_start = 20
a.mapping_quality = 20 a.cigartuples = ((0, 2), (3, 100), (0, 38)) a.query_qualities = pysam.fromQualityString("1234") * 10 self.assertEqual(a.get_aligned_pairs(), [(0, 20), (1, 21)] + [(None, refpos) for refpos in range(22, 22 + 100)] + [(qpos, refpos) for (qpos, refpos) in zip( range(2, 2 + 38), range(20 + 2 + 100, 20 + 2 + 100 + 38))]) self.assertEqual(a.get_aligned_pairs(True), [(0, 20), (1, 21)] + # [(None, refpos) for refpos in range(21, 21+100)] + [(qpos, refpos) for (qpos, refpos) in zip( range(2, 2 + 38), range(20 + 2 + 100, 20 + 2 + 100 + 38))]) def test_get_aligned_pairs_match_mismatch(self): a = pysam.AlignedSegment() a.query_name = "read_12345" a.query_sequence = "ACGT" * 10 a.flag = 0 a.reference_id = 0 a.reference_start = 20 a.mapping_quality = 20 a.cigartuples = ((7, 20), (8, 20)) a.query_qualities = pysam.fromQualityString("1234") * 10 self.assertEqual(a.get_aligned_pairs(), [(qpos, refpos) for (qpos, refpos) in zip( range(0, 0 + 40), range(20, 20 + 40))]) self.assertEqual(a.get_aligned_pairs(True), [(qpos, refpos) for (qpos, refpos) in zip( range(0, 0 + 40), range(20, 20 + 40))]) def test_get_aligned_pairs_padding(self): a = pysam.AlignedSegment() a.query_name = "read_12345" a.query_sequence = "ACGT" * 10 a.flag = 0 a.reference_id = 0 a.reference_start = 20 a.mapping_quality = 20 a.cigartuples = ((7, 20), (6, 1), (8, 19)) a.query_qualities = pysam.fromQualityString("1234") * 10 def inner(): a.get_aligned_pairs() # padding is not bei
from django import views from django.shortcuts import render, get_object_or_404 from django.views.generic import TemplateView from django.views.generic.edit import CreateView from .models import * from .forms import * import requests import http from django.urls import reverse_lazy from django.views.decorators.csrf import csrf_exempt from django.http import JsonResponse class Template404(TemplateView): template_name = "404.html" class Home(TemplateView): template_name = 'home/home.html' class Topics(views.View): def get(self, request, *args, **kwargs): return render(request, "home/resources/topics.html", {'topics': Topic.objects.all()}) class Resources(views.View): def get(self, request, pk, *args, **kwargs): topic = get_object_or_404(Topic, pk=pk) return render(request, "home/resources/resources.html", {'resources': topic.resource_set.all(), 'topic' : topic}) class BlogPostList(views.View): def get(self, request, *args, **kwargs): posts = BlogPost.objects.all() return render(request, "home/blog/index.html", {'posts': posts}) class BlogPostView(views.View): def get(self, request, pk, *args, **kwargs): post = get_object_or_404(BlogPost, pk=pk) return render(request, "home/blog/blog_post.html", {'post': post}) class Leaderboard(views.View): def get(self, request, *args, **kwargs): users = Users.objects.all() for user in users: connected = False while not connected: try: user_name = user.github_handle response = requests.get('https://api.github.com/search/issues?sort=created&q=author:{}&type:pr&per_page=100'.format(user_name), verify = False).json() pr_count = 0 print(response) for obj in response['items']: if('pull_request' in obj):
if('2018-09-30T
00:00:00Z'<obj['created_at']<'2018-10-31T23:59:59Z'): pr_count += 1 user.pr_count = pr_count user.save() connected = True except: pass return render(request, 'home/leaderboard.html', {'users': users}) class RegisterUser(CreateView): form_class = RegisterUserForm template_name = "home/registeruser.html" success_url = reverse_lazy('home:home') @csrf_exempt def GithubEmailCheck(request): github_handle = request.POST.get('github_handle') email = request.POST.get('email') print("Received ", github_handle) users = Users.objects.all() for user in users: if user.github_handle == github_handle: return JsonResponse({'message' : 'Duplicate Github Handle'}) if user.email == email: return JsonResponse({'message' : 'Duplicate Email'}) return JsonResponse({'message' : 'New'}) @csrf_exempt def GithubCheck(request): github_handle = request.POST.get('github_handle') response = requests.get("https://api.github.com/users/{}".format(github_handle), verify = False).json() print("https://api.github.com/users/{}".format(github_handle)) if ('login' in response): print("Found") return JsonResponse({'message' : 'Found'}) else: return JsonResponse({'message' : 'Not Found'})
i
mport sys r, c = map(int, input().split()) while r and c: lines = [input().strip() for i in range(r)] rotatedLines = [] for i in range(c): rotatedLines.append("".join([lines[j][i] for j in range(r)])) rotatedLines.sort(key=lambda s: s.lower()) for i in range(r): print("".join([rotatedLines[j][i] for j in range(c)])) print() r, c = map(int, in
put().split())
import json import sqlite3 def get_room(id, dbfile): ret = None con = sqlite3.connect(dbfile) for row in con.execute("select json from rooms where id=?",(id,)): jsontext = row[0] # Outputs the JSON response #print("json = " + j
sontext) d = json.loads(jsontext) d['id'] = id ret = Room(**d) break con.close() return ret class Room(): def __init__(self, id=0, name="A room", description="An empty room", neighbors={}): self.id = id self.name = name self.description = description self.neighbors = neighbors def _neighbor(self, direction): if direction in self.neighbors:
return self.neighbors[direction] else: return None def north(self): return self._neighbor('n') def south(self): return self._neighbor('s') def east(self): return self._neighbor('e') def west(self): return self._neighbor('w')
# Generated by Django 2.0.5 on 2018-05-10 22:56 from django.db import mi
grations class Migration(migrations.Migrati
on): dependencies = [("studies", "0001_initial")] operations = [migrations.AlterModelOptions(name="extension", options={"ordering": ("-id",)})]
from markupsafe import escape import re from pymongo.objectid import ObjectId from pymongo.errors import InvalidId from app.people.people_model import People from app.board.board_model import BoardTopic, BoardNode from beaker.cache import CacheManager from beaker.util import parse_cache_config_options from lib.filter import none2string,mentions,video, urlink from lib.utils import html_escape, br_escape cache_opts = { 'cache.type': 'file', 'cache.data_dir': '/tmp/caches/data', 'cache.lock_dir': '/tmp/caches/lock', 'cache.regions': 'short_term, long_term', #'cache.short_term.type': 'ext:memcached', #'cache.short_term.url': '127.0.0.1.11211', 'cache.short_term.type': 'file', 'cache.short_term.expire': '1200', 'cache.long_term.type': 'file', 'cache.long_term.expire': '3600', } cache = CacheManager(**parse_cache_config_options(cache_opts)) @cache.region('short_term', 'cached_people') def get_cached_people(people_id): try: people = People.objects.with_id(people_id) return people except InvalidId, error: pass return None def fetch_cached_people(people_id, reflush=False): if reflush: cache.region_invalidate(get_cached_people, None, 'cached_people', people_id) return get_cached_people(people_id) @cache.region('long_term', 'cached_board_topic') def get_cached_board_topic(topic_id): try: topic = BoardTopic.objects.with_id(topic_id) if topic is None: return None if topic.content: topic.html_content = urlink(escape(topic.content)) #urlink((mentions(youku(escape(topic.content)) ) ) , trim_url_limit=30) else: topic.html_content = '' return topic except Exception, error: return None return None def fetch_cached_board_topic(topic_id, reflush=False): if reflush: cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic', topic_id) return get_cached_board_topic(topic_id) @cache.region('long_term', 'cached_board_topic_morecontent') def get_cached_board_topic_morecontent(topic_id): try: topic = fetch_cached_board_topic(topic_id) if topic is None: return None html_more_content = '' if topic.more_content: html_more_content = br_escape(u
rlink(escape(topic.more_content))) #urlink((mentions(youku(escape(topic.
content)) ) ) , trim_url_limit=30) extra_content = '' if topic.video_urls: video_html = '<p></p>' for url in topic.video_urls: video_html += video(url) extra_content = video_html return html_more_content + extra_content except Exception, error: return None return None def fetch_cached_board_topic_morecontent(topic_id, reflush=False): if reflush: cache.region_invalidate(get_cached_board_topic, None, 'cached_board_topic_morecontent', topic_id) return get_cached_board_topic_morecontent(topic_id) @cache.region('long_term', 'cached_board_nodelist') def get_cached_board_nodelist(cache='board_nodelist'): try: nodelist = BoardNode.get_top_nodes() return list(nodelist) except InvalidId, error: pass return None def fetch_cached_board_nodelist(reflush=False): if reflush: cache.region_invalidate(get_cached_board_nodelist, None, 'cached_board_nodelist', 'board_nodelist') return get_cached_board_nodelist('board_nodelist')
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import unittest from multiprocessing import Process import signal import numpy import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.layers.io import ListenAndServ from paddle.fluid.layers.io import Recv from paddle.fluid.layers.io import Send import paddle.fluid.layers.ops as ops from paddle.fluid.transpiler.details import program_to_code class TestProgram2Code(unittest.TestCase): def test_print(self): place = fluid.CPUPlace() self.init_serv(place) self.init_client(place, 9123) def init_serv(self, place): main = fluid.Program() with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False) with serv.do(): out_var = main.global_block().create_var( name="scale_0.tmp_0", psersistable=True, dtyp
e="float32", shape=[32, 32]) x = layers.data( shape=[32, 32], dtype='float32', name="X", append_batch_size=False) fluid.initializer.Constant(value=1.0)(x, main.global_block()) ops._scale(x=x, scale=10.0, out=out_var) program_to_code(main) def init_client(self, place, port): main = fluid.Program() with fluid.program_guard(main): x = layers.data( shape=[32, 32], dtype='float32', name='X', append_batch_size=False) fluid.initializer.Constant(value=2.3)(x, main.global_block()) get_var = main.global_block().create_var( name="scale_0.tmp_0", # server side var dtype="float32", persistable=False, shape=[32, 32]) fluid.initializer.Constant(value=2.3)(get_var, main.global_block()) Send("127.0.0.1:%d" % port, [x]) o = Recv("127.0.0.1:%d" % port, [get_var]) program_to_code(main) if __name__ == "__main__": unittest.main()
__authors__ = "" __copyright__ = "(c) 2014, pymal" __license__ = "BSD License" __contact__ = "Name Of Curr
ent Guardian of this file <email@address>" USER_AGENT = 'api-ind
iv-0829BA2B33942A4A5E6338FE05EFB8A1' HOST_NAME = "http://myanimelist.net" DEBUG = False RETRY_NUMBER = 4 RETRY_SLEEP = 1 SHORT_SITE_FORMAT_TIME = '%b %Y' LONG_SITE_FORMAT_TIME = '%b %d, %Y' MALAPPINFO_FORMAT_TIME = "%Y-%m-%d" MALAPPINFO_NONE_TIME = "0000-00-00" MALAPI_FORMAT_TIME = "%Y%m%d" MALAPI_NONE_TIME = "00000000"
from unittest import TestCase import os from opencog.atomspace import AtomSpace, TruthValue, Atom, types from opencog.bindlink import stub_bindlink, bindlink, single_bindlink,\ first_n_bindlink, af_bindlink, \ satisfaction_link, satisfying_set, \ satisfying_element, first_n_satisfying_set, \ execute_atom, evaluate_atom from opencog.type_constructors import * from opencog.utilities import initialize_opencog, finalize_opencog from test_functions import green_count, red_count __author__ = 'Curtis Faith' class BindlinkTest(TestCase): bindlink_atom = None getlink_atom = None atomspace = AtomSpace() starting_size = 0 def setUp(self): print "setUp - atomspace = ", self.atomspace # Clear atoms from previous test self.atomspace.clear() # Initialize Python initialize_opencog(self.atomspace) set_type_ctor_atomspace(self.atomspace) # Define several animals and something of a different type as well InheritanceLink( ConceptNode("Frog"), ConceptNode("animal")) InheritanceLink( ConceptNode("Zebra"), ConceptNode("animal")) InheritanceLink( ConceptNode("Deer"), ConceptNode("animal")) InheritanceLink( ConceptNode("Spaceship"), ConceptNode("machine")) # Define a graph search query self.bindlink_atom = \ BindLink( # The variable node to be grounded. VariableNode("$var"), # The pattern to be grounded. InheritanceLink( VariableNode("$var"), ConceptNode("animal") ), # The grounding to be returned.
VariableNode("$var") # bindlink needs a handle ) # Define a pattern to be grounded self.getlink_atom = \ GetLink( InheritanceLink( VariableNode("$var"), ConceptNode("animal") ) ) # Remember the starting atomspace size. self.starting_size = self.atomspace.size() def tearDown(self): print "tearDown -
atomspace = ", self.atomspace # Can't do this; finalize can be called only once, ever, and # then never again. The second call will never follow through. # Also, cannot create and delete atomspaces here; this will # confuse the PythonEval singletonInstance. # finalize_opencog() # del self.atomspace def test_stub_bindlink(self): # Remember the starting atomspace size. This test should not # change the atomspace. starting_size = self.atomspace.size() # Run bindlink. atom = stub_bindlink(self.atomspace, self.bindlink_atom) self.assertTrue(atom is not None) # Check the ending atomspace size, it should be the same. ending_size = self.atomspace.size() self.assertEquals(ending_size, starting_size) def _check_result_setlink(self, atom, expected_arity): # Check if the atom is a SetLink self.assertTrue(atom is not None) self.assertEquals(atom.type, types.SetLink) # Check the ending atomspace size, it should have added one SetLink. ending_size = self.atomspace.size() self.assertEquals(ending_size, self.starting_size + 1) # The SetLink should have expected_arity items in it. self.assertEquals(atom.arity, expected_arity) def test_bindlink(self): atom = bindlink(self.atomspace, self.bindlink_atom) self._check_result_setlink(atom, 3) def test_single_bindlink(self): atom = single_bindlink(self.atomspace, self.bindlink_atom) self._check_result_setlink(atom, 1) def test_first_n_bindlink(self): atom = first_n_bindlink(self.atomspace, self.bindlink_atom, 5) self._check_result_setlink(atom, 3) def test_af_bindlink(self): atom = af_bindlink(self.atomspace, self.bindlink_atom) # The SetLink is empty. ??? Should it be. self._check_result_setlink(atom, 0) def test_satisfying_set(self): atom = satisfying_set(self.atomspace, self.getlink_atom) self._check_result_setlink(atom, 3) def test_satisfying_element(self): atom = satisfying_element(self.atomspace, self.getlink_atom) self._check_result_setlink(atom, 1) def test_first_n_satisfying_set(self): atom = first_n_satisfying_set(self.atomspace, self.getlink_atom, 5) self._check_result_setlink(atom, 3) def test_satisfy(self): satisfaction_atom = SatisfactionLink( VariableList(), # no variables SequentialAndLink( EvaluationLink( GroundedPredicateNode("py: test_functions.stop_go"), ListLink( ConceptNode("green light") ) ), EvaluationLink( GroundedPredicateNode("py: test_functions.stop_go"), ListLink( ConceptNode("green light") ) ), EvaluationLink( GroundedPredicateNode("py: test_functions.stop_go"), ListLink( ConceptNode("red light") ) ), EvaluationLink( GroundedPredicateNode("py: test_functions.stop_go"), ListLink( ConceptNode("traffic ticket") ) ) ) ) atom = satisfaction_link(self.atomspace, satisfaction_atom) self.assertTrue(atom is not None and atom.mean <= 0.5) self.assertEquals(green_count(), 2) self.assertEquals(red_count(), 1) def test_execute_atom(self): result = execute_atom(self.atomspace, ExecutionOutputLink( GroundedSchemaNode("py: test_functions.add_link"), ListLink( ConceptNode("one"), ConceptNode("two") ) ) ) list_link = ListLink( ConceptNode("one"), ConceptNode("two") ) self.assertEquals(result, list_link) def test_evaluate_atom(self): result = evaluate_atom(self.atomspace, EvaluationLink( GroundedPredicateNode("py: test_functions.bogus_tv"), ListLink( ConceptNode("one"), ConceptNode("two") ) ) ) self.assertEquals(result, TruthValue(0.6, 0.234))
#!/usr/bin/env python # -*- coding: <encoding name> -*- __author__ = "i_pogorelko" __email__ = "i.pogorelko@g
mail.com" __date__ = "2014-11-16" text='Proin eget tortor risus. Cras ultricies ligula sed mag
na dictum porta.\ Donec rutrum congue leo eget malesuada.' def percentage_1(text): print '' print 'input: ', text text = text.lower() text2 = '' for x in text: if ord(x) >= ord('a') and ord(x) <= ord('z'): text2 = text2 + x d = {} m = 0 for j in text2: if d.has_key(j): d[j] += 1.0 else: d[j] = 1.0 m += 1 for key in d: d[key] = float("%.1f" % ((d[key]/m)*100)) print '\noutput: ', d return d def percentage_2(text): return percentage_1(text) percentage_1(text)
#parser_testing.py import os, sys, re, StringIO sys.path.append('/Users/Jason/Dropbox/JournalMap/scripts/GeoParsers') #from jmap_geoparser_re import * from jmap_geoparser import * #def test_parsing(): t
est = "blah blah blah 45º 23' 12'', 123º 23' 56'' and blah blah blah 32º21'59''N, 115º 23' 14''W blah blah blah" coords = coordinateParser.searchString(test) for coord in coords: assert coordinate(coord).calcDD(), "Coordinate Transform Error for "+str(coord) test = "45.234º, 123.43º" assert coordinate(coordi
nateParser.parseString(test)).calcDD() == {'latitude': 45.234, 'longitude': 123.43} test = "-45º 23' 12'', -123º 23' 56''" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.38667, 'longitude': 123.39889} test = "32º21'59''N, 115º 23' 14''W" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 32.36639, 'longitude': -115.38722} test = "12 43 56 North, 23 56 12 East" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 12.73222, 'longitude': 23.93667} test = "52 15 10N, 0 01 54W" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.25278, 'longitude': -0.03167} test = "52 35 31N, 1 28 05E" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.59194, 'longitude': 1.46806} test = "30° 47' N, 34° 46' E" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 30.78333, 'longitude': 34.76667} ''' test = "AT; 1 spm, CN 3-41, 21°00′ N, 112°30′ E" for result, start, end in coordinateParser.scanString(test): assert coordinate(result).calcDD() == {'latitude': 21.0, 'longitude': 112.5} test = '27°43.886, 34°15.663' assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 27.73143, 'longitude': 34.26105} test = '49°17’13”N, 13°40’18”E' assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 49.28694, 'longitude': 13.67167} test = '45.9215º; -76.6219º' assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -45.9215, 'longitude': 76.6219} test = "latitude 32°47′47″ S and longitude 26°50′56″ E" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': -32.79639, 'longitude': 26.84889} test = "N15°46′ W87°00'" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 15.76667, 'longitude': -87.0} test = "latitude of 35°13', longitude of 4°11'" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 35.21667, 'longitude': 4.18333} test = "expects to find coordinates: 52 degrees, 42 minutes north, 124 degrees, 50 minutes west" assert coordinate(coordinateParser.parseString(test)).calcDD() == {'latitude': 52.7, 'longitude': -124.83333} # Should return an exception, but instead calculates latitude as 6º 10' #test = "expects to find coordinates: 5°70'N, 73°46'W" # Minutes greater than 60 #test = "expects not to find: 4.5–5.0 " '''
import binascii de
f b2h(the_bytes): return binascii.hexlify(the_bytes).decode("utf8") def b2h_rev(the_bytes): return binascii.hexlify(bytearray(reversed(the_bytes))).dec
ode("utf8")
# -*- coding: utf-8 -*- __author__ = 'frank' from flask import Flask, request, url_for, render_template, g, session, flash from flask_wtf.csrf import CsrfProtect from flask_debugtoolbar import DebugToolbarExtension from flask.ext.login import LoginManager from flask.ext.moment import Moment from . import filters, permissions from .utils import signout_user from .config import load_config config = load_config() login_manager = LoginManager() login_manager.session_protection = 'strong' login_manager.login_view = 'auth.signin' login_manager.login_message = '请先登陆以获得相应操作权限' # convert python's encoding to utf8 import sys reload(sys) sys.setdefaultencoding('utf8') def register_login_manager(app): """注册login_manager""" login_manager.init_app(app) def register_jinja(app): # inject vars into template context @app.context_processor def inject_vars(): from .models import Category from datetime import date year = date.today().strftime("%Y") return dict(Permission=permissions.Permission, categories=Category.query.all(), year=year) # url generator for pagination def url_for_other_page(page): """Generate url for pagination""" view_args = request.view_args.copy() args = request.args.copy().to_dict() combined_args = dict(view_args.items() + args.items()) combined_args['page'] = page return url_for(request.endpoint, **combined_args) # same effect # args = request.args.copy() # args['page'] = page # return url_for(request.endpoint, **args) app.jinja_env.globals['url_for_other_page'] = url_for_other_page def register_routes(app): from .controllers import admin, site, user, auth app.register_blueprint(site.bp, url_prefix='') app.register_blueprint(admin.bp, url_prefix='/admin') app.register_blueprint(user.bp, url_prefix='/user') app.register_blueprint(auth.bp, url_prefix='/auth') def register_error_handle(app): @app.errorhandler(403) def page_403(error): return render_template('site/403.html'), 403 @app.errorhandler(404) def page_404(error): return render_template('site/404.html'), 404 @app.errorhandler(405) def page_405(error): return render_template('site/405.html'), 405 @app.errorhandler(500) def page_500(error): return render_template('site/500.html'), 500 def register_db(app): from .models import db db.init_app(app) def register_mail(app): from .utils import mail mail.init_app(app) def get_mail_handler(): import logging from logging.handlers import SMTPHandler credentials = (con
fig.SMTP_USER, config.SMTP_PASSWORD) mail_handler = SMTPHandler(config.SMTP_SERVER, config.SMTP_FROM, config.SMTP_ADMIN, 's
f-log', credentials, ()) from logging import Formatter mail_handler.setFormatter(Formatter(''' Message type: %(levelname)s Location: %(pathname)s:%(lineno)d Module: %(module)s Function: %(funcName)s Time: %(asctime)s Message: %(message)s ''')) mail_handler.setLevel(logging.ERROR) return mail_handler def register_logger(app): """send error log to admin by smtp""" if not app.debug: app.logger.addHandler(get_mail_handler()) def register_moment(app): moment = Moment(app) def get_current_user(): """获取当前user,同时进行session有效性的检测 放在utils.py会造成环路引用 """ # 对应utils.py中signin_user方法 # 豆瓣登陆则验证邮箱, 非豆瓣登陆不验证邮箱直接返回空值退出 if 'signin_method' not in session: return None if 'user_id' not in session: return None # else: # for k,v in session.iteritems(): # print k,v from .models import User # 此处是user_id而不是douban_id user = User.query.filter(User.id == session['user_id']).first() if not user: signout_user() return None return user def create_app(): app = Flask(__name__) app.config.from_object(config) # CSRF protect CsrfProtect(app) if app.debug: DebugToolbarExtension(app) register_jinja(app) register_routes(app) register_error_handle(app) register_db(app) register_logger(app) register_login_manager(app) register_moment(app) register_mail(app) app.jinja_env.filters['markdown'] = filters.markdown app.jinja_env.filters['normalize'] = filters.normalize app.jinja_env.filters[ 'engrolename_chinrolename'] = filters.engrolename_chinrolename app.jinja_env.filters['ismyself'] = filters.ismyself @app.before_request def before_request(): g.user = get_current_user() if g.user: if not g.user.is_activated: flash('账户尚未激活,请先登陆' + g.user.email + '查收验证邮件并激活账户') signout_user() if g.user.is_banned: flash('账户已被禁用, 请联系管理员') signout_user() return app app = create_app()
from django.http import HttpResponse, HttpRequest from typing import Optional import ujson from django.utils
.translation import ugettext as _ from zerver.lib.actions import
do_mute_topic, do_unmute_topic from zerver.lib.request import has_request_variables, REQ from zerver.lib.response import json_success, json_error from zerver.lib.topic_mutes import topic_is_muted from zerver.lib.streams import ( access_stream_by_id, access_stream_by_name, access_stream_for_unmute_topic_by_id, access_stream_for_unmute_topic_by_name, check_for_exactly_one_stream_arg, ) from zerver.lib.validator import check_int from zerver.models import get_stream, Stream, UserProfile def mute_topic(user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str) -> HttpResponse: if stream_name is not None: (stream, recipient, sub) = access_stream_by_name(user_profile, stream_name) else: assert stream_id is not None (stream, recipient, sub) = access_stream_by_id(user_profile, stream_id) if topic_is_muted(user_profile, stream.id, topic_name): return json_error(_("Topic already muted")) do_mute_topic(user_profile, stream, recipient, topic_name) return json_success() def unmute_topic(user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str) -> HttpResponse: error = _("Topic is not muted") if stream_name is not None: stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error) else: assert stream_id is not None stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error) if not topic_is_muted(user_profile, stream.id, topic_name): return json_error(error) do_unmute_topic(user_profile, stream, topic_name) return json_success() @has_request_variables def update_muted_topic(request: HttpRequest, user_profile: UserProfile, stream_id: Optional[int]=REQ(validator=check_int, default=None), stream: Optional[str]=REQ(default=None), topic: str=REQ(), op: str=REQ()) -> HttpResponse: check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream) if op == 'add': return mute_topic( user_profile=user_profile, stream_id=stream_id, stream_name=stream, topic_name=topic, ) elif op == 'remove': return unmute_topic( user_profile=user_profile, stream_id=stream_id, stream_name=stream, topic_name=topic, )
try: if ask_vault_pass: vault_pass = getpass.getpass(prompt="Vault password: ") if ask_vault_pass and confirm_vault: vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") if vault_pass != vault_pass2: raise errors.AnsibleError("Passwords do not match") if ask_new_vault_pass: new_vault_pass = getpass.getpass(prompt="New Vault password: ") if ask_new_vault_pass and confirm_new: new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") if new_vault_pass != new_vault_pass2: raise errors.AnsibleError("Passwords do not match") except EOFError: pass # enforce no newline chars at the end of passwords if vault_pass: vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip() if new_vault_pass: new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip() return vault_pass, new_vault_pass def ask_passwords(self): ''' prompt for connection and become passwords if needed ''' op = self.options sshpass = None becomepass = None become_prompt = '' try: if op.ask_pass: sshpass = getpass.getpass(prompt="SSH password: ") become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper() if sshpass: sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') else: become_prompt = "%s password: " % op.become_method.upper() if op.become_ask_pass: becomepass = getpass.getpass(prompt=become_prompt) if op.ask_pass and becomepass == '': becomepass = sshpass if becomepass: becomepass = to_bytes(becomepass) except EOFError: pass return (sshpass, becomepass) def normalize_become_options(self): ''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER if self.options.become: pass elif self.options.sudo: self.options.
become = True self.options.become_method = 'sudo' elif self.options.su: self.options.become = True self.options.become_method = 'su' def validate_conflicts(self, vault_opts=False, runas_opts=False): ''' check for conflicting options ''' op = self.options if vault_opts: # Check for vault related conflicts if (op.ask_vault_pass and op.vault_password_file): self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") if runas_opts: # Check for privilege escalation conflicts if (op.su or op.su_user or op.ask_su_pass) and \ (op.sudo or op.sudo_user or op.ask_sudo_pass) or \ (op.su or op.su_user or op.ask_su_pass) and \ (op.become or op.become_user or op.become_ask_pass) or \ (op.sudo or op.sudo_user or op.ask_sudo_pass) and \ (op.become or op.become_user or op.become_ask_pass): self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " "and su arguments ('-su', '--su-user', and '--ask-su-pass') " "and become arguments ('--become', '--become-user', and '--ask-become-pass')" " are exclusive of each other") @staticmethod def expand_tilde(option, opt, value, parser): setattr(parser.values, option.dest, os.path.expanduser(value)) @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False): ''' create an options parser for most ansible scripts ''' #FIXME: implemente epilog parsing #OptionParser.format_epilog = lambda self, formatter: self.epilog # base opts parser = SortedOptParser(usage, version=CLI.version("%prog")) parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") if runtask_opts: parser.add_option('-i', '--inventory-file', dest='inventory', help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') parser.add_option('-M', '--module-path', dest='module_path', help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None, action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) if fork_opts: parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') if vault_opts: parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file", action="callback", callback=CLI.expand_tilde, type=str) if subset_opts: parser.add_option('-t', '--tags', dest='tags', default='all', help="only run plays and tasks tagged with these values") parser.add_option('--skip-tags', dest='skip_tags', help="only run plays and tasks whose tags do not match these values") if output_opts: parser.add_option('-o', '--one-line', dest='one_line', action='store_true', help='condense output') parser.add_option('-t', '--tree', dest='tree', default=None, help='log output to this directory') if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root) (deprecated, use become)') parser.add_option('-S', '--su', default=C.DEFAULT
# -*- coding: utf-8 -*- import datetime from django.conf import settings from django.test import TestCase, override_settings from django.utils import timezone from django_dynamic_fixture import G from apps.events.models import AttendanceEvent, Event class EventOrderedByRegistrationTestCase(TestCase): def setUp(self): self.FEATURED_TIMEDELTA_SETTINGS = settings # Override settings so that the tests will work even if we update the default delta self.FEATURED_TIMEDELTA_SETTINGS.OW4_SETTINGS['events']['OW4_EVENTS_FEATURED_DAYS_FUTURE'] = 7 self.FEATURED_TIMEDELTA_SETTINGS.OW4_SETTINGS['events']['OW4_EVENTS_FEATURED_DAYS_PAST'] = 7 def test_registration_no_push_forward(self): """ Tests that an AttendanceEvent with registration date far in the future is sorted by its event end date, like any other event. """ today = timezone.now() month_ahead = today + datetime.timedelta(days=30) month_ahead_plus_five = month_ahead + datetime.timedelta(days=5) normal_event = G(Event, event_start=month_ahead, event_end=month_ahead) pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five) G(AttendanceEvent, registration_start=month_ahead_plus_five, registration_end=month_ahead_plus_five, event=pushed_event) expected_order = [normal_event, pushed_event] with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS): self.assertEqual(list(Event.by_registration.all()), expected_order) def test_registration_start_pushed_forward(self): """
Tests that an AttendanceEvent with registration date within the "featured delta" (+/- 7 days from today) will be pushed ahead in the event
list, thus sorted by registration start rather than event end. """ today = timezone.now() three_days_ahead = today + datetime.timedelta(days=3) month_ahead = today + datetime.timedelta(days=30) month_ahead_plus_five = month_ahead + datetime.timedelta(days=5) normal_event = G(Event, event_start=month_ahead, event_end=month_ahead) pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five) G(AttendanceEvent, registration_start=three_days_ahead, registration_end=three_days_ahead, event=pushed_event) expected_order = [pushed_event, normal_event] with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS): self.assertEqual(list(Event.by_registration.all()), expected_order) def test_registration_past_push_forward(self): """ Tests that an AttendanceEvent with a registration date in the past, outside the "featured delta" (+/- 7 days) will be sorted by the event's end date. """ today = timezone.now() month_ahead = today + datetime.timedelta(days=30) month_ahead_plus_five = month_ahead + datetime.timedelta(days=5) month_back = today - datetime.timedelta(days=30) normal_event = G(Event, event_start=month_ahead, event_end=month_ahead) pushed_event = G(Event, event_start=month_ahead_plus_five, event_end=month_ahead_plus_five) G(AttendanceEvent, registration_start=month_back, registration_end=month_back, event=pushed_event) expected_order = [normal_event, pushed_event] with override_settings(settings=self.FEATURED_TIMEDELTA_SETTINGS): self.assertEqual(list(Event.by_registration.all()), expected_order)
from __future__ import absolute_import from django import forms from django.contrib import messages from django.core.urlresolvers import reverse from sentry.models import Project, Team from sentry.web.forms.add_project import AddProjectForm from sentry.web.frontend.base import OrganizationView from sentry.utils.http import absolute_uri ERR_NO_TEAMS = 'You cannot create a new project because there are no teams to assign it to.' class AddProjectWithTeamForm(AddProjectForm): team = forms.ChoiceField( choices=(), required=True, help_text='The team controls who has access to this project.', ) class Meta: fields = ('name', 'team') model = Project def __init__(self, user, team_list, *args, **kwargs): super(AddProjectWithTeamForm, self).__init__(*args, **kwargs) self.team_list = team_list if len(self.team_list) == 1: del self.fields['team'] else: self.fields['team'].choices = ( (t.slug, t.name) for t in team_list ) self.fields['team'].widget.choices = self.fields['team'].choices def clean_team(self): value = self.cleaned_data['team'] for team in self.team_list: if value == team.slug: return team return None def save(self, actor, ip_address): team = self.cleaned_data.get('team', self.team_list[0]) return super(AddProjectWithTeamForm, self).save(actor, team, ip_address) class CreateProjectView(OrganizationView): # TODO(dcramer): I'm 95% certain the access is incorrect here as it would # be probably validating against global org access, and all we care about is # team admin required_scope = 'team:write' def get_form(self, request, organization, team_list):
return AddProjectWithTeamForm(request.user, team_list, request.POST or None, initial={ 'team': request.GET.get('team'), }) def handle(self, request, organization): team_list = [ t for t in Team.objects.get_for_user( organization=organization, user=request.user, ) if request.access.has_team_scope(t, self.required_scope) ] if not te
am_list: messages.error(request, ERR_NO_TEAMS) return self.redirect(reverse('sentry-organization-home', args=[organization.slug])) form = self.get_form(request, organization, team_list) if form.is_valid(): project = form.save(request.user, request.META['REMOTE_ADDR']) return self.redirect(absolute_uri('/{}/{}/settings/install/'.format( organization.slug, project.slug, ))) context = { 'form': form, } return self.respond('sentry/create-project.html', context)
import os os.environ["PYSDL2_DLL_PATH"] = os.getcwd() import sdl2 import win32gui def get_windows_bytitle(title_text, exact = False): """ Gets window by title text. [Windows Only] """ def _window_callback(hwnd, all_windows): all_windows.append((hwnd, win32gui.GetWindowText(hwnd))) windows = [] win32gui.EnumWindows(_window_callback, windows) if exact: return [hwnd for hwnd, title in windows if title_text == title] else: return [hwnd for hwnd, title in windows if title_text in title] sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) #This will return a handle to an open 'Notepad.exe' window. window_handle = get_windows_bytitle("Untitled", False) #Create a window so that the hint below can be set a = sdl2.SDL_CreateWindow("test window", sdl2.SDL_WINDOWPOS_UNDEFINED,sdl2.SDL_WINDOWPOS_UNDEFINED, 200,200, 0 ) #Set hint as recommended by SDL documentation: https://wiki.libsdl.org/SDL_CreateWindowFrom#Remarks result = sdl2.SDL_SetHint(sdl2.SDL_HINT_VIDEO_WINDOW_SHARE_PIXEL_FORMAT, hex(id(a))) print(sdl2.SDL_GetError()) np_window = sdl2.SDL_CreateWindowFrom(window_handle[0]) print(sdl2.SDL_GetError()) np_sur =
sdl2.SDL_GetWindowSurface(np_window) print(sdl2.SDL_GetError()) save_sur = sdl2.SDL_CreateRGBSurface(0,np_sur[0].w,np_sur[0].h,32,0,0,0,0) pri
nt(sdl2.SDL_GetError()) r = sdl2.SDL_BlitSurface(np_sur, None, save_sur, None) print(sdl2.SDL_GetError()) result = sdl2.SDL_SaveBMP(save_sur,'test.bmp') print(sdl2.SDL_GetError()) sdl2.SDL_FreeSurface(save_sur) print(sdl2.SDL_GetError())
are using the service. # # Special thanks : # thanks person :5ynl0rd,kiddies aka peneter,ne0 d4rk fl00der,oghie,parc0mx,me0nkz,suryal0e,zee_eichel # mirwan aka cassaprogy,shadow_maker,suddent_death,aip,r3d3,dawflin,n1nj4,hakz, # leXel,s3my0n,MaXe,Andre Corleone ,Shamus,and all my friend . # thanks communty : Tecon-crew<[url]http://tecon-crew.org[/url]> # Void-labs <[url]http://void-labs.org[/url]> # Makassar ethical hacker<[url]http://makassarhacker.com/>[/url] # Intern0t <[url]http://forum.intern0t.net/>[/url] # Deadc0de <[url]http://forum.deadc0de.or.id/>[/url] #----------------------------------------------- import shodan,sys,time,base64,os from time import sleep from shodan import WebAPI __author__='amltbXlyb21hbnRpY2Rldmls' __email__ ='PHJvbWFudGljZGV2aWwuamltbXlAZ21haWwuY29tPg==' __api__ ='Z4xjUqqsaQbFgYrnn3EBuoJsSC0VZTyI'#request youre api key and paste in here _lolz_ = WebAPI(__api__) def tayping(title): try: for i in title: print "\b%s"%i, sys.stdout.flush() time.sleep(0.005) except ImportError: print "Some Error", def check(): try: checking = "[C]Checking module..." tayping(checking) sleep(2) import shodan except ImportError: error ="\n[!]You must install Shodan Module in here :\n[url]http://pypi.python.org/packages/source/s/shodan/...[/url]" tayping(error_module) except KeyboardInterrupt: print "\n[*]Exiting program...\n" sys.exit(1) else : succes="\n[*]Shodan module is available..." tayping(succes) sleep(2) try: api_check="\n[C]Checking Api key.." tayping(api_check) sleep(2) check_Api = len(__api__) if check_Api==0: error_api= "\n[!] Api key is not available\n[!]You must request Api key in here :[url]http://www.shodanhq.com/api_doc[/url]\n\n\n\n" tayping(error_api) sleep(2) elif check_Api != 0: succces = "\n[*]Api key is available\n\n\n\n" tayping(succces) sleep(3) except KeyboardInterrupt: print "\n[*] Exiting program...\n" sys.exit(0) def clear(): if sys.platform in ('linux-i386', 'linux2', 'darwin'): SysCls = 'clear' elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos': SysCls = 'cls' else: SysCls = 'unknown' return SysCls def title(): __Auth__= base64.b64decode(__author__) __Eml__ = base64.b64decode(__email__) title=''' ////////////////////////////////////////////////////// ___________ .__ .__ __ \_ _____/___ _________ | | ____ |__|_/ |_ | __)_ \ \/ /\____ \ | | / _ \ | |\ __\\ | \ > < | |_> >| |__( <_> )| | | | /_______ //__/\_ \| __/ |____/ \____/ |__| |__| \/ \/|__|/ Toolkit Coder by : %s Contach : %s ////////////////////////////////////////////////////// '''%(__Auth__,__Eml__) tayping(title) def expoitdb(): try: searching_Exploit= raw_input('[+]Search a exploit :') print '[!]You search [%s] Exploit'% searching_Exploit wtf = _lolz_.exploitdb.search(searching_Exploit) more = wtf['total'] print '[!]Found [%s] exploit with result [%s]'%(more,searching_Exploit) try: display =raw_input('[!]See all list exploit found?(y/n)') if display =='y': ds = wtf['matches'] for i in ds : print'%s: %s' % (i['id'],i['description']) except Exception,err: print'[%s]'%err try: display_exploit=raw_input('[+]Select type exploit?(y/n)') if display_exploit =='y': print'choois types : remote, webapps, dos, local, shellcode ?' rock =raw_input('') print 'youre chooise [%s] exploits'%rock r = wtf['matches'] for i in r: if rock ==i['type']: print'%s: %s' % (i['id'],i['description']) except Exception,err: print'[%s]'%err try: view_exploit=raw_input('[+]Select exploit to view ?(y/n)') if view_exploit =='y': print'[+]Enter exploit id to view :' v = raw_input('') lols=wtf['matches'] for i in lols: if v == str(i['id']): File_exploit = _lolz_.exploitdb.download(i['id']) print 'Filename: %s'% File_exploit['filename'] print 'Content-type: %s' % File_exploit['content-type'] print File_exploit['data'] download_exploit= raw_input('[+]download the exploit ?(y/n)') if download_exploit=='y': dwnload = open(File_exploit['filename'], 'w') dwnload.write(File_exploit['data']) dwnload.close() print'%s successfully download' % File_exploit['filename'] except Exception,err: print'[%s]'%err try_again=raw_input('[+]Do you want to try again ?(y/n):') while try_again=='y': os.system(clear()) title() expoitdb() try_again=raw_input('[+]Do you want to try again ?(y/n):') main() except KeyboardInterrupt, IOError: print '\nYou pressed Ctrl+C or exited...' main() sys.exit(1) def metasploit(): try: module_search=raw_input('[!]Search module metasploit :') print'[!]We will search metasploit module' m_m = _lolz_.msf.search(module_search) result = m_m[
'total'] print 'Modules found: %s'%result result2 = m_m['matches'] for i in result2: print '%s: %s' % (i['type'], i['name']) download =raw_input('[+]Download module : (y/n)') if download =='y': file = _lolz_.msf.download(i['fullname']) print 'Filename: %s' % file['filename']
print 'Content-type: %s' % file['content-type'] print file['data'] try_again = raw_input('[+]Do you want to try again ?(y/n)') while try_again =='y': os.system(clear()) title() metasploit() try_again = raw_input('[+]Do you want to try again ?(y/n)') main() except Exception,err: print'[%s]'%err def host(): try: input_host = raw_input('[+]Input host :') host_result = _lolz_.host(input_host) ip =host_result['ip'] country=host_result.get('country', None) city =host_result.get('city', None) host_name =host_result['hostnames'] data =host_result['data'] resulting =""" Ip addres = %s Country = %s City = %s """%(ip,country,city,) tayping(resulting) for i in data : print """ Port = %s Banner = %s"""%(i['port'],i['banner']) try_again = raw_input('[!]try again ?(y/n)') while try_again =='y': host() try_again = raw_input('[!]try again ?(y/n)') except Exception,err: print'[%s]'%err main() def exit(): teks_exit='\nExiting..\nThanks for use this tools' tayping(teks_exit) sleep(2) sys.exit() def main(): try: os.system(clear()) title() menu = {'1':expoitdb, '2':metasploit, '3':host, '4':exit,} while True: print """ Input your chooise: 1) Search exploit
# importing
wxPython library, see the reference here : # http://www.wxpython.org/docs/api/wx-module.html # and an excelent step by step tutorial there : # http://zetcode.com/wxpython import wx from Controller import * # main function def main(): # each wx application must have a wx.App object app = wx.App() controller = Controller(title
= "BLANK_PY2WX") # entering the endless loop that catches all the events app.MainLoop() if __name__ == '__main__': main()
import sys from fabric.api
import * from fabric.contrib import * from fabric.contrib.project import rsync_project from defaults import fab from config import ssh, sudoers import {%= name %} @task def prepare_vm(): sudoers.setup_sudoers_on_vm() @task(de
fault=True) def system(): print 'start here'
# -*- coding: utf-8 -*- #---------------------------------------------------------------------------- # Menu for quickly adding waypoints when on move #---------------------------------------------------------------------------- # Copyright 2007-2008, Oliver White # # This program is free software: you c
an redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or #
(at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #--------------------------------------------------------------------------- from modules.base_module import RanaModule import cairo from time import time from math import pi def getModule(*args, **kwargs): return ClickMenu(*args, **kwargs) class ClickMenu(RanaModule): """Overlay info on the map""" def __init__(self, *args, **kwargs): RanaModule.__init__(self, *args, **kwargs) self.lastWaypoint = "(none)" self.lastWaypointAddTime = 0 self.messageLingerTime = 2 def handleMessage(self, message, messageType, args): if message == "addWaypoint": m = self.m.get("waypoints", None) if m is not None: self.lastWaypoint = m.newWaypoint() self.lastWaypointAddTime = time() def drawMapOverlay(self, cr): """Draw an overlay on top of the map, showing various information about position etc.""" # waypoins will be done in another way, so this is disabled for the time being # (x,y,w,h) = self.get('viewport') # # dt = time() - self.lastWaypointAddTime # if(dt > 0 and dt < self.messageLingerTime): # self.drawNewWaypoint(cr, x+0.5*w, y+0.5*h, w*0.3) # else: # m = self.m.get('clickHandler', None) # if(m != None): # m.registerXYWH(x+0.25*w,y+0.25*h,w*0.5,h*0.5, "clickMenu:addWaypoint") def drawNewWaypoint(self, cr, x, y, size): text = self.lastWaypoint cr.set_font_size(200) extents = cr.text_extents(text) (w, h) = (extents[2], extents[3]) cr.set_source_rgb(0, 0, 0.5) cr.arc(x, y, size, 0, 2 * pi) cr.fill() x1 = x - 0.5 * w y1 = y + 0.5 * h border = 20 cr.set_source_rgb(1, 1, 1) cr.move_to(x1, y1) cr.show_text(text) cr.fill()
field = None new_class._meta.parents[base] = field else: # .. and abstract ones. for field in parent_fields: new_class.add_to_class(field.name, copy.deepcopy(field)) # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base._meta.parents) # Inherit managers from the abstract base classes. new_class.copy_managers(base._meta.abstract_managers) # Proxy models inherit the non-abstract managers from their base, # unless they have redefined any of them. if is_proxy: new_class.copy_managers(original_base._meta.concrete_managers) # Inherit virtual fields (like GenericForeignKey) from the parent # class for field in base._meta.virtual_fields: if base._meta.abstract and field.name in field_names: raise FieldError('Local field %r in class %r clashes '\ 'with field of similar name from '\ 'abstract base class %r' % \ (field.name, name, base.__name__)) new_class.add_to_class(field.name, copy.deepcopy(field)) if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() register_models(new_class._meta.app_label, new_class) # Because of the way imports happen (recursively), we may or may not be # the first time this model tries to register with the framework. There # should only be one class for each model, so we always return the # registered version. return get_model(new_class._meta.app_label, name, seed_cache=False, only_installed=False) def copy_managers(cls, base_managers): # This is in-place sorting of an Options attribute, but that's fine. base_managers.sort() for _, mgr_name, manager in base_managers: val = getattr(cls, mgr_name, None) if not val or val is manager: new_manager = manager._copy_to_model(cls) cls.add_to_class(mgr_name, new_manager) def add_to_class(cls, name, value): if hasattr(value, 'contribute_to_class'): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """ Creates some methods once self._meta has been populated. """ opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False) # defer creating accessors on the foreign class until we are # certain it has been created def make_foreign_order_accessors(field, model, cls): setattr( field.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls) ) setattr( field.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls) ) add_lazy_relation( cls, opts.order_with_respect_to, opts.order_with_respect_to.rel.to, make_foreign_order_accessors ) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields])) if hasattr(cls, 'get_absolute_url'): cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url), cls.get_absolute_url) signals.class_prepared.send(sender=cls) class ModelState(object): """ A class for storing instance state """ def __init__(self, db=None): self.db = db # If true, uniqueness validation checks will consider this a new, as-yet-unsaved object. # Necessary for correct validation of new instances of objects with explicit (non-auto) PKs. # This impacts validation only; it has no effect on the actual save. self.adding = True class Model(six.with_metaclass(ModelBase)): _deferred = False def __init__(self, *args, **kwargs): signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. args_len = len(args) if args_len > len(self._meta.fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") fields_iter = iter(self._meta.fields) if not kwargs: # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter t
hrows it, the second # is *not* consumed. We rely on this, so don't change the order
# without changing the logic. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) else: # Slower, kwargs-ready version. for val, field in zip(args, fields_iter): setattr(self, field.attname, val) kwargs.pop(field.name, None) # Maintain compatibility with existing calls. if isinstance(field.rel, ManyToOneRel): kwargs.pop(field.attname, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # This slightly odd construct is so that we can access any # data-descriptor object (DeferredAttribute) without triggering its # __get__ method. if (field.attname not in kwargs and isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)): # This field will be populated on request. continue if kwargs: if isinstance(field.rel, ManyToOneRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: # Object instance was passed in. Special case: You can # pass in "None" for related objects if it's allowed. if rel_obj is None and field.null: val = None else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # ge
, "original_filename": os.path.split(self.filename)[1], "time": time_str, }, "Signal": { "quantity": quantity_str, "signal_type": "", }, } return metadict def _build_original_metadata(self,): """Builds a metadata dictionnary from the header""" original_metadata_dict = {} Ntot = (self._N_data_object+1)*(self._N_data_channels+1) #Iteration over Number of data objects for i in range(self._N_data_object): #Iteration over the Number of Data channels for j in range(self._N_data_channels): #Creating a dictionary key for each object k = (i+1)*(j+1) key = "Object_{:d}_Channel_{:d}".format(i,j) original_metadata_dict.update({key:{}}) #We load one full object header a = self._list_sur_file_content[k-1] #Save it as original metadata dictionary headerdict = {"H"+l.lstrip('_'):a[l] for l in a if l not in \ ("_62_points",'_61_Private_zone')} original_metadata_dict[key].update({"Header" : headerdict}) #The second dictionary might contain custom mountainsmap params parsedict = {} #Check if it is the case and append it to #original metadata if yes valid_comment = self._check_comments(a["_60_Comment"],'$','=') if valid_comment: parsedict = self._MS_parse(a["_60_Comment"],'$','=') parsedict = {l.lstrip('_'):m for l,m in parsedict.items()} original_metadata_dict[key].update({"Parsed" : parsedict}) return original_metadata_dict def _set_metadata_and_original_metadata(self,unpacked_dict): """Run successively _build_metadata and _build_original_metadata and set signal dictionary with results""" self.signal_dict['metadata'] = self._build_metadata(unpacked_dict) self.signal_dict['original_metadata'] = self._build_original_metadata() def _check_comments(self,commentsstr,prefix,delimiter): """Check if comment string is parsable into metadata dictionary. Some specific lines (empty or starting with @@) will be ignored, but any non-ignored line must conform to being a title line (beginning with the TITLESTART indicator) or being parsable (starting with Prefix and containing the key data delimiter). At the end, the comment is considered parsable if it contains minimum 1 parsable line and no non-ignorable non-parsable non-title line. Parameters ---------- commentstr: string containing comments prefix: string (or char) character assumed to start each line. '$' if a .sur file. delimiter: string that delimits the keyword from value. always '=' Returns ------- valid: boolean """ #Titlestart markers start with Prefix ($) followed by underscore TITLESTART = '{:s}_'.format(prefix) #We start by assuming that the comment string is valid #but contains 0 valid (= parsable) lines valid = True N_valid_lines = 0 for line in commentsstr.splitlines(): #Here we ignore any empty line or line starting with @@ ignore = False if not line.strip() or line.startswith('@@'): ignore = True #If the line must not be ignored if not ignore: #If line starts with a titlestart marker we it counts as valid if line.startswith(TITLESTART): N_valid_lines += 1 # if it does not we check that it has the delimiter and # starts with prefix else: #We check that line contains delimiter and prefix #if it does the count of valid line is increased if delimiter in line and line.startswith(prefix): N_valid_lines += 1 #Otherwise the whole comment string is thrown out else: valid = False #finally, it total number of valid line is 0 we throw out this comments if N_valid_lines ==0: valid = False #return falsiness of the string. return valid def _MS_parse(self, strMS, prefix, delimiter): """ Parses a string containing metadata information. The string can be read from the comment section of a .sur file, or, alternatively, a file containing them with a similar formatting. Parameters ---------- strMS: string containing metadata prefix: string (or char) character assumed to start each line. '$' if a .sur file. delimiter: string that delimits the keyword from value. always '=' Returns ------- dictMS: dictionnary in the correct hyperspy metadata format """ #dictMS is created as an empty dictionnary dictMS = {} #Title lines start with an underscore TITLESTART = '{:s}_'.format(prefix) for line in strMS.splitlines() : #Here we ignore any empty line or line starting with @@ ignore = False if not line.strip() or line.startswith('@@'): ignore = True #If the line must not be ignored if not ignore: if line.startswith(TITLESTART): #We strip keys from whitespace at the end and beginning keyMain = line[len(TITLESTART):].strip() dictMS[keyMain] = {} elif line.startswith(prefix): key, *liValue = line.split(delimiter) #Key is also stripped from beginning or end whitespace key = key[len(prefix):].strip() strValue = liValue[0] if len(liValue)>0 else "" # remove whitespace at the beginning of value strValue = strValue.strip() liValue = strValue.split(' ') try : if key == "Grating": dictMS[keyMain][key] = liValue[0] # we don't want to eval this one else : dictMS[keyMain][key] = eval(liValue[0]) except : dictMS[keyMain][key] = liValue[0] if len(liValue) > 1: dictMS[keyMain][key+'_units'] = liValue[1] return dictMS ### Post processing def post_process_RGB(self,signal): signal = signal.transpose() max_data = np.nanmax(signal.data) if max_data <=256: signal.change_dtype('uint8') signal.change_dtype('rgb8') elif max_data <=65536:
signal.change_dtype('uint8') signal.change_dtype('rgb8') else: warnings.warn("""RGB-announced data could not be converted to uint8 or uint16 datatype""") pass return signal ### pack/unpack binary quantities def _get_int16(self,file, default=None, signed=True): """Read a 16-bits int with a use
r-definable default value if no file is given""" if file is None : return default b = file.read(2) if sys.byteorder == 'big' : return struct.unpack('>h', b)[0] else : return struct.unpack('<h', b)[0] def _set_int16(self, file, val): file.write(struct.pack('<h', val)) def _get_str(self, file, size, default=None, encoding='latin-1'): """Read a str of defined size in bytes with a user-definable default value if no file is given""" if file is None : return default read_str = file.read(size).decode(encoding) return read_str.strip(' \t\n')
from .base_executor import ScriptExecutor from judgeenv import env class RubyExecutor(ScriptExecu
tor):
ext = '.rb' name = 'RUBY' address_grace = 65536 fs = ['.*\.(?:so|rb$)', '/etc/localtime$', '/dev/urandom$', '/proc/self', '/usr/lib/ruby/gems/'] test_program = 'puts gets' @classmethod def get_command(cls): return env['runtime'].get(cls.name.lower())
import attr from navmazing import NavigateToAttribute from navmazing import NavigateToSibling from cfme.common import Taggable from cfme.common import TagPageView from cfme.containers.provider import ContainerObjectAllBaseView from cfme.containers.provider import ContainerObjectDetailsBaseView from cfme.containers.provider import GetRandomInstancesMixin from cfme.containers.provider import Labelable from cfme.containers.provider import LoggingableView from cfme.modeling.base import BaseCollection from cfme.modeling.base import BaseEntity from cfme.utils.appliance.implementations.ui import CFMENavigateStep from cfme.utils.appliance.implementations.ui import navigator from cfme.utils.providers import get_crud_by_name class ServiceView(ContainerObjectAllBaseView, LoggingableView): """Container Nodes view""" @property def in_service(self): """Determine if the Service page is currently open""" return ( self.logged_in_as_current_user and self.navigation.currently_selected == ['Compute', 'Containers', 'Container Services'] ) class ServiceAllView(ServiceView): """Container Services All view""" SUMMARY_TEXT = "Container Services" @property def is_displayed(self): return self.in_service and super().is_displayed class ServiceDetailsView(ContainerObjectDetailsBaseView): """Container Services Details view""" SUMMARY_TEXT = "Container Services" @attr.s class Service(BaseEntity, Taggable, Labelable): PLURAL = 'Container Services' all_view = ServiceAllView details_view = ServiceDetailsView name = attr.ib() project_name = attr.ib() provider = attr.ib() @attr.s class ServiceCollection(GetRandomInstancesMixin, BaseCollection): """Collection object for :py:class:`Service`.""" ENTITY = Service def all(self): # container_services table has ems_id, join with ext_mgmgt_systems on id for provider name # Then join with container_projects on the id for the project service_table = self.appliance.db.client['container_services'] ems_table = self.appliance.db.client['ext_management_systems'] project_table = self.appliance.db.client['container_projects'] service_query = ( self.appliance.db.client.session .query(service_table.name, project_table.name, ems_table.name) .join(ems_table, service_table.ems_id == ems_table.id) .join(project_table, service_table.container_project_id == project_table.id)) provider = None # filtered if self.filters.get('provider'): provider = self.filters.get('provider') service_query = service_query.filter(ems_table.name == provider.name) services = [] for name, project_name, ems_name in service_query.all(): services.append(self.instantiate(name=name, project_name=project_name, provider=provider or get_crud_by_name(ems_name))) return services @navigator.register(ServiceCollection, 'All') class All(CFMENavigateStep): prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn') VIEW = ServiceAllView def step(self, *args, **kwargs): self.prerequisite_view.navigation.select('Compute', 'Containers', 'Container Services') def resetter(self, *args, **kwargs): # Reset view and selection self.view.toolbar.view_selector.select("List View") self.view.paginator.reset_selection() @navigator.register(Service, 'Details') class Details(CFMENavigateStep): prerequisite = NavigateToAttribute('parent', 'All') VIEW = ServiceDetailsView def step(self, *args, **kwargs): search_visible = self.prerequisite_view.entities.search.is_displayed self.prerequisite_view.entities.get_entity(name=self.obj.name, project_name=self.obj.project_name, surf_pages=not search_visible,
use_search=search_visible).click() @navigator.register(Service, 'EditT
ags') class EditTags(CFMENavigateStep): VIEW = TagPageView prerequisite = NavigateToSibling('Details') def step(self, *args, **kwargs): self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
import sys import gzip import logging from csv import DictReader from datetime import datetime from . import app, connect_db from ..constants import DEFAULT_GENOME_BUILD, BENIGN, UNCERTAIN, UNKNOWN, PATHOGENIC from ..extensions import mongo from ..backend import build_variant_doc, get_variant_category, update_variant_task, create_variant_task, run_variant_tasks from ..services.notifier import UpdateNotifier logging.basicConfig(format="%(levelname)s (%(name)s %(lineno)s): %(message)s") logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) def iter_variants(filename): with gzip.open(filename, 'rt') as ifp: for row in DictReader(ifp, dialect='excel-tab'): yield row def did_variant_category_change(old_doc, new_doc): old_category = get_variant_category(old_doc) new_category = get_variant_category(new_doc) return old_category != new_category def iter_variant_updates(db, variants): for variant in variants: new_doc = build_variant_doc(DEFAULT_GENOME_BUILD, **variant) doc_id = new_doc['_id'] old_doc = db.variants.find_one({ '_id': doc_id }) if did_variant_category_change(old_doc, new_doc): yield (old_doc, new_doc) def main(clinvar_filename): db = connect_db() notifier = UpdateNotifier(db, app.config) started_at = datetime.utcnow() task_list = [] variant_iterator = iter_variants(clinvar_filename) for i, (old_doc, new_doc) in enumerate(iter_variant_updates(db, variant_iterator)): if i % 10000 == 0: logger.debug('Processed {} variants'.format(i)) if old_doc: # Variant is already known, either: # - someone subscribed before it was added to clinvar, or # - it was already in clinvar, and we might have new annotations task = update_variant_task(db, old_doc, new_doc) else: # Add clinvar annotations with empty subscriber data task = create_variant_task(db, new_doc) task_list.append(task) results = run_variant_tasks(db, task_list, notifier=notifier) logger.debug('Variants updated. Results: {}'.format(results)) db.updates.insert_one({ 'started_at': started_at, 'finished_at': datetime.utcnow(), 'inserted_count': results['inserted'], 'modified_count': results['modified'], 'notified_count': results['notified'], }) def parse_args(): import argparse parser = argparse.ArgumentParser(description='Update ClinVar data') parser.add_argument(
'clinvar_filename', metavar='CLINVAR_ALLELES_TSV_GZ', type=str, help='clinvar_alleles.single.b*.tsv.gz from github.com/
macarthur-lab/clinvar pipeline') return parser.parse_args() if __name__ == '__main__': args = parse_args() main(args.clinvar_filename)
r, phi = self.ldf.calculate_core_distance_and_angle(x, y, core_x, core_y) p_ground = self.ldf.calculate_ldf_value(r, phi, size, zenith, azimuth) num_particles = self.simulate_particles_for_density( p_ground * detector.get_area()) return num_particles class BaseLdf(object): """Base LDF class No particles! Always returns a particle density of 0. """ def calculate_ldf_value(self, r, n_electrons=None, s=None): return 0. def calculate_core_distance(self, x, y, x0, y0, theta, phi): """Calculate core distance The core distance is the distance of the detector to the shower core, measured *on the shower front*. For derivations, see logbook. :param x,y: detector pos
ition in m. :param x0,y0: shower core position in m. :param theta,phi: shower axis direction in radians.
:return: distance from detector to the shower core in shower front plane in m. """ x = x - x0 y = y - y0 return sqrt(x ** 2 + y ** 2 - (x * cos(phi) + y * sin(phi)) ** 2 * sin(theta) ** 2) class NkgLdf(BaseLdf): """The Nishimura-Kamata-Greisen function""" # shower parameters # Age parameter and Moliere radius from Thoudam2012 sec 5.6. _n_electrons = 10 ** 4.8 _s = 1.7 _r0 = 30. def __init__(self, n_electrons=None, s=None): """NKG LDF setup :param n_electrons: Shower size (number of electrons). :param s: Shower age parameter. """ if n_electrons is not None: self._n_electrons = n_electrons if s is not None: self._s = s self._cache_c_s_value() def _cache_c_s_value(self): """Store the c_s value The c_s value does not change if s and r0 are fixed. """ self._c_s = self._c(self._s) def calculate_ldf_value(self, r, n_electrons=None, s=None): """Calculate the LDF value :param r: core distance in m. :param n_electrons: number of electrons in the shower. :param s: shower age parameter. :return: particle density in m ** -2. """ if n_electrons is None: n_electrons = self._n_electrons if s is None: s = self._s return self.ldf_value(r, n_electrons, s) def ldf_value(self, r, n_electrons, s): """Calculate the LDF value Given a core distance, shower size, and shower age. As given in Fokkema2012 eq 7.2. :param r: core distance in m. :param n_electrons: number of electrons in the shower. :param s: shower age parameter. :return: particle density in m ** -2. """ if s == self._s: c_s = self._c_s else: c_s = self._c(s) r0 = self._r0 return (n_electrons * c_s * (r / r0) ** (s - 2) * (1 + r / r0) ** (s - 4.5)) def _c(self, s): """Part of the LDF As given in Fokkema2012 eq 7.3. :param s: shower age parameter. :return: c(s) """ r0 = self._r0 return (gamma(4.5 - s) / (2 * pi * r0 ** 2 * gamma(s) * gamma(4.5 - 2 * s))) class KascadeLdf(NkgLdf): """The KASCADE modified NKG function""" # shower parameters # Values from Fokkema2012 sec 7.1. _n_electrons = 10 ** 4.8 _s = 0.94 # Shape parameter _r0 = 40. _alpha = 1.5 _beta = 3.6 def ldf_value(self, r, n_electrons, s): """Calculate the LDF value Given a core distance, shower size, and shower age. As given in Fokkema2012 eq 7.4. :param r: core distance in m. :param n_electrons: number of electrons in the shower. :param s: shower shape parameter. :return: particle density in m ** -2. """ if s == self._s: c_s = self._c_s else: c_s = self._c(s) r0 = self._r0 alpha = self._alpha beta = self._beta return (n_electrons * c_s * (r / r0) ** (s - alpha) * (1 + r / r0) ** (s - beta)) def _c(self, s): """Part of the LDF As given in Fokkema2012 eq 7.5. :param s: shower shape parameter. :return: c(s) """ r0 = self._r0 beta = self._beta alpha = self._alpha return (gamma(beta - s) / (2 * pi * r0 ** 2 * gamma(s - alpha + 2) * gamma(alpha + beta - 2 * s - 2))) class EllipsLdf(KascadeLdf): """The NKG function modified for leptons and azimuthal asymmetry""" # shower parameters # Values from Montanus, paper to follow. _n_electrons = 10 ** 4.8 _s1 = -.5 # Shape parameter _s2 = -2.6 # Shape parameter _r0 = 30. _zenith = 0. _azimuth = 0. def __init__(self, n_electrons=None, zenith=None, azimuth=None, s1=None, s2=None): if n_electrons is not None: self._n_electrons = n_electrons if zenith is not None: self._zenith = zenith if azimuth is not None: self._azimuth = azimuth if s1 is not None: self._s1 = s1 if s2 is not None: self._s2 = s2 self._cache_c_s_value() def _cache_c_s_value(self): """Store the c_s value The c_s value does not change if s1, s2 and r0 are fixed. """ self._c_s = self._c(self._s1, self._s2) def calculate_ldf_value(self, r, phi, n_electrons=None, zenith=None, azimuth=None): """Calculate the LDF value for a given core distance and polar angle :param r: core distance in m. :param phi: polar angle in rad. :param n_electrons: number of electrons in the shower. :return: particle density in m ** -2. """ if n_electrons is None: n_electrons = self._n_electrons if zenith is None: zenith = self._zenith if azimuth is None: azimuth = self._azimuth return self.ldf_value(r, phi, n_electrons, zenith, azimuth, self._s1, self._s2) def ldf_value(self, r, phi, n_electrons, zenith, azimuth, s1, s2): """Calculate the LDF value Given a core distance, core polar angle, zenith angle, azimuth angle, shower size and three shape parameters (r0, s1, s2) . As given by Montanus, paper to follow. .. warning:: The value 11.24 in the expression: muoncorr is only valid for: s1 = -.5, s2 = - 2.6 and r0 = 30. :param r: core distance in m. :param phi: polar angle in rad. :param n_electrons: number of electrons in the shower. :param zenith: zenith angle in rad. :param azimuth: azimuth angle in rad. :param s1: shower shape parameter. :param s2: shower shape parameter. :return: particle density in m ** -2. """ if s1 == self._s1 and s2 == self._s2: c_s = self._c_s else: c_s = self._c(s1, s2) r0 = self._r0 zenith = self._zenith azimuth = self._azimuth relcos = cos(phi - azimuth) ell = sqrt(1 - sin(zenith) * sin(zenith) * relcos * relcos) shift = -0.0575 * sin(2 * zenith) * r * relcos k = shift + r * ell term1 = k / r0 term2 = 1 + k / r0 muoncorr = 1 + k / (11.24 * r0) # See warning in docstring. with warnings.catch_warnings(record=True): p = (n_electrons * c_s * cos(zenith) * term1 ** s1 * term2 ** s2 * muoncorr) return p def _c(self, s1, s2): """Normalization of the LDF As given in Montanus, paper to follow. :param s1: shower shape parameter. :param s2: shower shape parameter. :return: c(s1,s2) """ r0 = self._r0 return (gamma(-s2) / (2 * pi * r0 ** 2 * gamma(s1 + 2) * gamma(-s1 - s2 - 2)))
:vartype time: float """ def __init__(self, left, right, node, source, dest, time): self.left = left self.right = right self.node = node self.source = source self.dest = dest self.time = time class Variant(SimpleContainer): """ A variant is represents the observed variation among the samples for a given site. A variant consists (a) of a reference to the :class:`.Site` instance in question; (b) the **alleles** that may be observed at the samples for this site; and (c) the **genotypes** mapping sample IDs to the observed alleles. Each element in the ``alleles`` tuple is a string, representing the actual observed state for a given sample. The first element of this tuple is guaranteed to be the same as the site's ``ancestral_state`` value. The list of alleles is also guaranteed not to contain any duplicates. However, allelic values may be listed that are not referred to by any samples. For example, if we have a site that is fixed for the derived state (i.e., we have a mutation over the tree root), all genotypes will be 1, but the alleles list will be equal to ``('0', '1')``. Other than the ancestral state being the first allele, the alleles are listed in no particular order, and the ordering should not be relied upon. The ``genotypes`` represent the observed allelic states for each sample, such that ``var.alleles[var.genotypes[j]]`` gives the string allele for sample ID ``j``. Thus, the elements of the genotypes array are indexes into the ``alleles`` list. The genotypes are provided in this way via a numpy array to enable efficient calculations. Modifying the attributes in this class will have **no effect** on the underlying tree sequence data. :ivar site: The site object for this variant. :vartype site: :class:`.Site` :ivar alleles: A tuple of the allelic values that may be observed at the samples at the current site. The first element of this tuple is always the sites's ancestral state. :vartype alleles: tuple(str) :ivar genotypes: An array of indexes into the list ``alleles``, giving the state of each sample at the current site. :vartype genotypes: numpy.ndarray """ def __init__(self, site, alleles, genotypes): self.site = site self.alleles = alleles self.genotypes = genotypes # Deprecated aliases to avoid breaking existing code. self.position = site.position self.index = site.id class Edgeset(SimpleContainer): def __init__(self, left, right, parent, children): self.left = left self.right = right self.parent = parent self.children = children def __repr__(self): return "{{left={:.3f}, right={:.3f}, parent={}, children={}}}".format( self.left, self.right, self.parent, self.children) class Provenance(SimpleContainer): def __init__(self, id_=None, timestamp=None, record=None): self.id = id_ self.timestamp = timestamp self.record = record def add_deprecated_mutation_attrs(site, mutation): """ Add in attributes for the older deprecated way of defining mutations. These attributes will be removed in future releases and are deliberately undocumented in version 0.5.0. """ mutation.position = site.position mutation.index = site.id return mutation class SparseTree(object): """ A SparseTree is a single tree in a :class:`.TreeSequence`. The SparseTree implementation differs from most tree implementations by using **integer node IDs** to refer to nodes rather than objects. Thus, when we wish to find the parent of the node with ID '0', we use ``tree.parent(0)``, which returns another integer. If '0' does not have a parent in the current tree (e.g., if it is a root), then the special value :const:`.NULL_NODE` (:math:`-1`) is returned. The children of a node are found using the :meth:`.children` method. To obtain information about a particular node, one may either use ``tree.tree_sequence.node(u)`` to obtain the corresponding :class:`Node` instance, or use the :meth:`.time` or :meth:`.population` shorthands. Tree traversals in various orders is possible using the :meth:`.SparseTree.nodes` iterator. Sparse trees are not intended to be instantiated directly, and are obtained as part of a :class:`.TreeSequence` using the :meth:`.trees` method. """ def __init__(self, ll_sparse_tree, tree_sequence): self._ll_sparse_tree = ll_sparse_tree self._tree_sequence = tree_sequence @property def tree_sequence(self): """ Returns the tree sequence that this tree is from. :return: The parent tree sequence for this tree. :rtype: :class:`.TreeSequence` """ return self._tree_sequence def get_branch_length(self, u): # Deprecated alias for branch_length return self.branch_length(u) def branch_length(self, u): """ Returns the length of the branch (in generations) joining the specified node to its parent. This is equivalent to >>> tree.time(tree.parent(u)) - tree.time(u) Note that this is not related to the value returned by :attr:`.length`, which describes the length of the interval covered by the tree in genomic coordinates. :param int u: The node of interest. :return: The branch length from u to its parent. :rtype: float """ return self.time(self.get_parent(u)) - self.time(u) def get_total_branch_length(self): # Deprecated alias for total_branch_length return self.total_branch_length @property def total_branch_length(self): """ Returns the sum of all the branch lengths in this tree (in units of generations). Thi
s is equivalent to >>> sum( >>> tree.branch_length(u) for u in tree.nodes() >>> if u not in self.roots) :return: The sum of all the b
ranch lengths in this tree. :rtype: float """ return sum( self.get_branch_length(u) for u in self.nodes() if u not in self.roots) def get_mrca(self, u, v): # Deprecated alias for mrca return self.mrca(u, v) def mrca(self, u, v): """ Returns the most recent common ancestor of the specified nodes. :param int u: The first node. :param int v: The second node. :return: The most recent common ancestor of u and v. :rtype: int """ return self._ll_sparse_tree.get_mrca(u, v) def get_tmrca(self, u, v): # Deprecated alias for tmrca return self.tmrca(u, v) def tmrca(self, u, v): """ Returns the time of the most recent common ancestor of the specified nodes. This is equivalent to:: >>> tree.time(tree.mrca(u, v)) :param int u: The first node. :param int v: The second node. :return: The time of the most recent common ancestor of u and v. :rtype: float """ return self.get_time(self.get_mrca(u, v)) def get_parent(self, u): # Deprecated alias for parent return self.parent(u) def parent(self, u): """ Returns the parent of the specified node. Returns the :const:`.NULL_NODE` if u is the root or is not a node in the current tree. :param int u: The node of interest. :return: The parent of u. :rtype: int """ return self._ll_sparse_tree.get_parent(u) # Quintuply linked tree structure. def left_child(self, u): return self._ll_sparse_tree.get_left_child(u) def right_child(self, u): return self._ll_sparse_tree.get_right_child(u) def left_sib(self, u): return self._ll_sparse_tree.get_left_sib(u) def right_sib(self, u): return self._ll_sparse_tree.get_right_sib(u) # TODO do we also have right_root? @property def left_r
import pymongo from flask import g from flas
k import current_app as app def get_db(): if not hasattr(g, 'conn'): print(app.config) g.conn = pymongo.MongoClient( app.config['MONGODB_HOST'], int(app.config['MONGODB_PORT']) ) if not hasattr(g, 'db'): g.db = g.conn[app.config['MONGODB_DB']] return g.db # todo # @app.teardown
_appcontext # def teardown_db(exception): # conn = getattr(g, 'conn', None) # if conn is not None: # conn.close()