prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
language governing permissions and # limitations under the License. import socket import sys import time import eventlet eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall from neutron.agent.l2.extensions import manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron.common import constants as n_constants from neutron.common import topics from neutron.common import utils as n_utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm LOG = logging.getLogger(__name__) class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin): # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC target = oslo_messaging.Target(version='1.1') def __init__(self, context, agent, sg_agent): super(SriovNicSwitchRpcCallbacks, self).__init__() self.context = context self.agent = agent self.sg_agent = sg_agent def port_update(self, context, **kwargs): LOG.debug("port_update received") port = kwargs.get('port') # Put the port mac address in the updated_devices set. # Do not store port details, as if they're used for processing # notifications there is no guarantee the notifications are # processed in the same order as the relevant API requests. mac = port['mac_address'] pci_slot = None if port.get('binding:profile'): pci_slot = port['binding:profile'].get('pci_slot') if pci_slot: self.agent.updated_devices.add((mac, pci_slot)) LOG.debug("port_update RPC received for port: %(id)s with MAC " "%(mac)s and PCI slot %(pci_slot)s slot", {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot}) else: LOG.debug("No PCI Slot for port %(id)s with MAC %(mac)s; " "skipping", {'id': port['id'], 'mac': mac, 'pci_slot': pci_slot}) class SriovNicSwitchAgent(object): def __init__(self, physical_devices_mappings, exclude_devices, polling_interval): self.polling_interval = polling_interval self.conf = cfg.CONF self.setup_eswitch_mgr(physical_devices_mappings, exclude_devices) configurations = {'device_mappings': physical_devices_mappings} self.agent_state = { 'binary': 'neutron-sriov-nic-agent', 'host': self.conf.host, 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'start_flag': True} # Stores port update notifications for processing in the main loop self.updated_devices = set() self.mac_to_port_id_mapping = {} self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, self.sg_plugin_rpc) self._setup_rpc() self.ext_manager = self._create_agent_extension_manager( self.connection) # The initialization is complete; we can start receiving messages self.connection.consume_in_threads() # Initialize iteration counter self.iter_num = 0 def _setup_rpc(self): self.agent_id = 'nic-switch-agent.%s' % socket.gethostname() LOG.info(_LI("RPC agent_id: %s"), self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) # RPC network init # Handle updates from service self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self, self.sg_agent)] # Define the listening consumers for the agent consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, start_listening=False) report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) def _report_state(self): try: devices = len(self.eswitch_mgr.get_assigned_devices_info()) self.agent_state.get('configurations')['devices'] = devices self.state_rpc.report_state(self.context, self.agent_state) self.agent_state.pop('start_flag', None) except Exception: LOG.exception(_LE("Failed reporting state!")) def _create_agent_extension_manager(self, connection): ext_manager.register_opts(self.conf) mgr = ext_manager.AgentExtensionsManager(self.conf) mgr.initialize(connection, 'sriov') return mgr def setup_eswitch_mgr(self, device_mappings, exclude_devices={}): self.eswitch_mgr = esm.ESwitchManager() self.eswitch_mgr.discover_devices(device_mappings, exclude_devices) def scan_devices(self, registered_devices, updated_devices): curr_devices = s
elf.eswitch_mgr.get_assigned_devices_info() device_info = {} device_info['current'] = curr_devices device_info['added'] = curr_devices - registered_devices # we don't want to process updates for devices that don't exist device_info['updated'] = updated_devices & curr_devices # we need to clean up after devices are removed device_info['removed'] = registered_devices - curr_devices return dev
ice_info def _device_info_has_changes(self, device_info): return (device_info.get('added') or device_info.get('updated') or device_info.get('removed')) def process_network_devices(self, device_info): resync_a = False resync_b = False self.sg_agent.prepare_devices_filter(device_info.get('added')) if device_info.get('updated'): self.sg_agent.refresh_firewall() # Updated devices are processed the same as new ones, as their # admin_state_up may have changed. The set union prevents duplicating # work when a device is new and updated in the same polling iteration. devices_added_updated = (set(device_info.get('added')) | set(device_info.get('updated'))) if devices_added_updated: resync_a = self.treat_devices_added_updated(devices_added_updated) if device_info.get('removed'): resync_b = self.treat_devices_removed(device_info['removed']) # If one of the above operations fails => resync with plugin return (resync_a | resync_b) def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True): if self.eswitch_mgr.device_exists(device, pci_slot): try: self.eswitch_mgr.set_device_spoofcheck(device, pci_slot, spoofcheck) except Exception: LOG.warning(_LW("Failed to set spoofcheck for device %s"), device) LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
from conn import Connection import dispatch import socket class Acceptor(Connection): def __init__(self, port): self.dispatcher = dispatch.Dispatch(1) self.dispatcher.start() self.sock = socket.
socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.sock.bind(("127.0.0.1", port)) self.sock.listen(1024) def handleRead(self): cli, addr = self.sock.accept() # cli.setblocking(0) self.disp
atcher.dispatch(cli)
from flask import Flask from flask.ext.script import Manager app = Flask(__name__) manager = Manager(app) @app.route(
'/') def index(): return
'<h1>Hello World!</h1>' @app.route('/user/<name>') def user(name): return '<h1>Hello, {name}!</h1>'.format(**locals()) if __name__ == '__main__': manager.run()
# # Copyrigh
t (c) 2017 Sugimoto Takaaki # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed t
o in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import urllib import json from collections import OrderedDict # dictionary of api url d = OrderedDict() d['btc']='https://api.cryptonator.com/api/ticker/btc-usd' d['ltc']='https://api.cryptonator.com/api/ticker/ltc-usd' d['doge']='https://api.cryptonator.com/api/ticker/doge-usd' d['xrp']='https://api.cryptonator.com/api/ticker/xrp-usd' d['eth']='https://api.cryptonator.com/api/ticker/eth-usd' d['mona']='https://api.cryptonator.com/api/ticker/mona-usd' outputString = "" for url in d.values(): sock = urllib.urlopen(url) jsonString = sock.read() sock.close() jsonCurrency = json.loads(jsonString) price = jsonCurrency['ticker']['price'] outputString = outputString + price + " " print outputString
is_admin=False) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], osapi_compute_ext_list=['Simple_tenant_usage']) def _test_verify_index(self, start, stop): req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage?start=%s&end=%s' % (start.isoformat(), stop.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.admin_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 200) res_dict = jsonutils.loads(res.body) usages = res_dict['tenant_usages'] for i in xrange(TENANTS): self.assertEqual(int(usages[i]['total_hours']), SERVERS * HOURS) self.assertEqual(int(usages[i]['total_local_gb_usage']), SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS) self.assertEqual(int(usages[i]['total_memory_mb_usage']), SERVERS * MEMORY_MB * HOURS) self.assertEqual(int(usages[i]['total_vcpus_usage']), SERVERS * VCPUS * HOURS) self.assertFalse(usages[i].get('server_usages')) def test_verify_index(self): self._test_verify_index(START, STOP) def test_verify_index_future_end_time(self): future = NOW + datetime.timedelta(hours=HOURS) self._test_verify_index(START, future) def test_verify_show(self): self._test_verify_show(START, STOP) def test_verify_show_future_end_time(self): future = NOW + datetime.timedelta(hours=HOURS) self._test_verify_show(START, future) def _get_tenant_usages(self, detailed=''): req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage?' 'detailed=%s&start=%s&end=%s' % (detailed, START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.admin_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 200) res_dict = jsonutils.loads(res.body) return res_dict['tenant_usages'] def test_verify_detailed_index(self): usages = self._get_tenant_usages('1') for i in xrange(TENANTS): servers = usages[i]['server_usages'] for j in xrange(SERVERS): self.assertEqual(int(servers[j]['hours']), HOURS) def test_verify_simple_index(self): usages = self._get_tenant_usages(detailed='0') for i in xrange(TENANTS): self.assertIsNone(usages[i].get('server_usages')) def test_verify_simple_index_empty_param(self): # NOTE(lzyeval): 'detailed=&start=..&end=..' usages = self._get_tenant_usages() for i in xrange(TENANTS): self.assertIsNone(usages[i].get('server_usages')) def _test_verify_show(self, start, stop): tenant_id = 0 req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage/' 'faketenant_%s?start=%s&end=%s' % (tenant_id, start.isoformat(), stop.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 200) res_dict = jsonutils.loads(res.body) usage = res_dict['tenant_usage'] servers = usage['server_usages'] self.assertEqual(len(usage['server_usages']), SERVERS) uuids = ['00000000-0000-0000-0000-00000000000000%02d' % (x + (tenant_id * SERVERS)) for x in xrange(SERVERS)] for j in xrange(SERVERS): delta = STOP - START uptime = delta.days * 24 * 3600 + delta.seconds self.assertEqual(int(servers[j]['uptime']), uptime) self.assertEqual(int(servers[j]['hours']), HOURS) self.assertIn(servers[j]['instance_id'], uuids) def test_verify_show_cant_view_other_tenant(self): req = webob.Request.blank( '/v2/faketenant_1/os-simple-tenant-usage/' 'faketenant_0?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" rules = { "compute_extension:simple_tenant_usage:show": common_policy.parse_rule([ ["role:admin"], ["project_id:%(project_id)s"] ]) } common_policy.set_rules(common_policy.Rules(rules)) try:
res = req.get_response(fakes.wsgi_app( fake_auth_context=self.alt_user_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 403) finally: policy.reset() def test_get_tenants_usage_with_bad_start_date(self): future = NOW + datetime.t
imedelta(hours=HOURS) tenant_id = 0 req = webob.Request.blank( '/v2/faketenant_0/os-simple-tenant-usage/' 'faketenant_%s?start=%s&end=%s' % (tenant_id, future.isoformat(), NOW.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context, init_only=('os-simple-tenant-usage',))) self.assertEqual(res.status_int, 400) class SimpleTenantUsageSerializerTest(test.TestCase): def _verify_server_usage(self, raw_usage, tree): self.assertEqual('server_usage', tree.tag) # Figure out what fields we expect not_seen = set(raw_usage.keys()) for child in tree: self.assertIn(child.tag, not_seen) not_seen.remove(child.tag) self.assertEqual(str(raw_usage[child.tag]), child.text) self.assertEqual(len(not_seen), 0) def _verify_tenant_usage(self, raw_usage, tree): self.assertEqual('tenant_usage', tree.tag) # Figure out what fields we expect not_seen = set(raw_usage.keys()) for child in tree: self.assertIn(child.tag, not_seen) not_seen.remove(child.tag) if child.tag == 'server_usages': for idx, gr_child in enumerate(child): self._verify_server_usage(raw_usage['server_usages'][idx], gr_child) else: self.assertEqual(str(raw_usage[child.tag]), child.text) self.assertEqual(len(not_seen), 0) def test_serializer_show(self): serializer = simple_tenant_usage.SimpleTenantUsageTemplate() today = timeutils.utcnow() yesterday = today - datetime.timedelta(days=1) raw_usage = dict( tenant_id='tenant', total_local_gb_usage=789, total_vcpus_usage=456, total_memory_mb_usage=123, total_hours=24, start=yesterday, stop=today, server_usages=[dict( instance_id='00000000-0000-0000-0000-0000000000000000', name='test', hours=24, memory_mb=1024, local_gb=50, vcpus=1, tenant_id='tenant', flavor='m1.small', started_at=yesterday, ended_at=today, state='term
#!/usr/bin/env python3 """ This housekeeping script reads a GFF3 file and writes a new one, adding a 'gene' row for any RNA feature which doesn't have one. The coordinates of the RNA will be copied. The initial use-case here was a GFF file dumped from WebApollo which had this issue. In this particular use case, the orphan mRNAs have ID attributes but no Parent though this is corrected. INPUT EXAMPLE: ### ChromosomeII_BmicrotiR1 IGS mRNA 1467897 1468187 . + . Name=ChromosomeII_BmicrotiR1:1467871-1468187;ID=101D714C468A44840D49A6FAAD27AFE5 ChromosomeII_BmicrotiR1 IGS exon 1467897 1468187 . + . Name=DE1443B2DABA5DEDBDEBE79EB433EEB8;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=DE1443B2DABA5DEDBDEBE79EB433EEB8 ChromosomeII_BmicrotiR1 IGS CDS 1467897 1468187 . + 0 Name=101D714C468A44840D49A6FAAD27AFE5-CDS;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=101D714C468A44840D49A6FAAD27AFE5-CDS Author: Joshua Orvis """ import argparse from biocode import gff def main(): parser = argparse.ArgumentParser( description='Adds gene features for RNAs which lack them') ## output file to be written parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input GFF3 file' ) parser.add_argument('-o', '--output', type=str, required=True, help='Output GFF3 file to write' ) args = parser.parse_args() infile = open(args.input) ofh = open(args.output, 'wt') for line in infile: if line.startswith('#'): ofh.write(line) continue line = line.rstrip() cols = line.split("\t") if len(cols) != 9: ofh.write("{0}\n".format(line) ) continue id = gff.column_9_value(col
s[8], 'ID') parent = gff.column_9_value(cols[8], 'Parent') if cols[2].endswith('RNA') and parent i
s None: gene_cols = list(cols) gene_cols[2] = 'gene' gene_cols[8] = gff.set_column_9_value(gene_cols[8], 'ID', "{0}.gene".format(id)) ofh.write("{0}\n".format("\t".join(gene_cols)) ) cols[8] = gff.set_column_9_value(cols[8], 'Parent', "{0}.gene".format(id)) ofh.write("{0}\n".format("\t".join(cols)) ) else: ofh.write("{0}\n".format(line) ) if __name__ == '__main__': main()
import numpy as np from numpy.testing import assert_equal, assert_array_equal from scipy.stats import rankdata, tiecorrect class TestTieCorrect(object): def test_empty(self): """An empty array requires no correction, should return 1.0.""" ranks = np.array([], dtype=np.float64) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_one(self): """A single element requires no correction, should return 1.0.""" ranks = np.array([1.0], dtype=np.float64) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_no_correction(self): """Arrays with no ties require no correction.""" ranks = np.arange(2.0) c = tiecorrect(ranks) assert_equal(c, 1.0) ranks = np.arange(3.0) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_basic(self): """Check a few basic examples of the tie correction factor.""" # One tie of two elements ranks = np.array([1.0, 2.5, 2.5]) c = tiecorrect(ranks) T = 2.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # One tie of two elements (same as above, but tie is not at the end) ranks = np.array([1.5, 1.5, 3.0]) c = tiecorrect(ranks) T = 2.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # One tie of three elements ranks = np.array([1.0, 3.0, 3.0, 3.0]) c = tiecorrect(ranks) T = 3.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # Two ties, lengths 2 and 3. ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0]) c = tiecorrect(ranks) T1 = 2.0 T2 = 3.0 N = ranks.size expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N) assert_equal(c, expected) def test_overflow(self): ntie, k = 2000, 5 a = np.repeat(np.arange(k), ntie) n = a.size # ntie * k out = tiecorrect(rankdata(a)) assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n)) class TestRankData(object): def test_empty(self): """stats.rankdata([]) should return an empty array.""" a = np.array([], dtype=int) r = rankdata(a) assert_array_equal(r, np.array([], dtype=np.float64)) r = rankdata([]) assert_array_equal(r, np.array([], dtype=np.float64)) def test_one(self): """Check stats.rankdata with an array of length 1.""" data = [100] a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, np.array([1.0], dtype=np.float64)) r = rankdata(data) assert_array_equal(r, np.array([1.0], dtype=np.float64)) def test_basic(self): """Basic tests of stats.rankdata.""" data = [100, 10, 50] expected = np.array([3.0, 1.0, 2.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) data = [40, 10, 30, 10, 50] expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) data = [20, 20, 20, 10, 10, 10] expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) # The docstring states explicitly that the argument is flattened. a2d = a.reshape(2, 3) r = rankdata(a2d) assert_array_equal(r, expected) def test_rankdata_object_string(self): min_rank = lambda a: [1 + sum(i < j for i in a) for j in a] max_rank = lambda a: [sum(i <= j for i in a) for j in a] ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)]) def average_rank(a): return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))] def dense_rank(a): b = np.unique(a) return [1 + sum(i < j for i in b) for j in a] rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank, average=average_rank, dense=dense_rank) def check_ranks(a): for method in 'min', 'max', 'dense', 'ordinal', 'average': out = rankdata(a, method=method) assert_array_equal(out, rankf[method](a)) val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz'] check_ranks(np.random.choice(val, 200)) check_ranks(np.random.choice(val, 200).astype('object')) val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object') check_ranks(np.random.choice(val, 200).astype('object')) def test_large_int(self
): data = np.array([2**60, 2**60
+1], dtype=np.uint64) r = rankdata(data) assert_array_equal(r, [1.0, 2.0]) data = np.array([2**60, 2**60+1], dtype=np.int64) r = rankdata(data) assert_array_equal(r, [1.0, 2.0]) data = np.array([2**60, -2**60+1], dtype=np.int64) r = rankdata(data) assert_array_equal(r, [2.0, 1.0]) def test_big_tie(self): for n in [10000, 100000, 1000000]: data = np.ones(n, dtype=int) r = rankdata(data) expected_rank = 0.5 * (n + 1) assert_array_equal(r, expected_rank * data, "test failed with n=%d" % n) _cases = ( # values, method, expected ([], 'average', []), ([], 'min', []), ([], 'max', []), ([], 'dense', []), ([], 'ordinal', []), # ([100], 'average', [1.0]), ([100], 'min', [1.0]), ([100], 'max', [1.0]), ([100], 'dense', [1.0]), ([100], 'ordinal', [1.0]), # ([100, 100, 100], 'average', [2.0, 2.0, 2.0]), ([100, 100, 100], 'min', [1.0, 1.0, 1.0]), ([100, 100, 100], 'max', [3.0, 3.0, 3.0]), ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]), ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]), # ([100, 300, 200], 'average', [1.0, 3.0, 2.0]), ([100, 300, 200], 'min', [1.0, 3.0, 2.0]), ([100, 300, 200], 'max', [1.0, 3.0, 2.0]), ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]), ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]), # ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]), ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]), ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]), ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]), ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]), # ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]), ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]), ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]), ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]), ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]), # ([10] * 30, 'ordinal', np.arange(1.0, 31.0)), ) def test_cases(): for values, method, expected in _cases: r = rankdata(values, method=method) assert_array_equal(r, expected)
import logging from ...engines.light import SimEngineLight from ...error
s import SimEngineError l = logging.getLogger(name=__name__) class SimEnginePropagatorBase(SimEngineLight): # pylint:disable=abstract-method def __init__(self, stack_pointer_tracker=None, project=None): super().__init__() # Used in the VEX engine self._project = project self.base_state = None self._load_callback = None # Used in the AIL engine self._stack_pointer_tracker = stack_pointer_tracker def process(self, state, *args, **kwargs
): self.project = kwargs.pop('project', None) self.base_state = kwargs.pop('base_state', None) self._load_callback = kwargs.pop('load_callback', None) try: self._process(state, None, block=kwargs.pop('block', None)) except SimEngineError as ex: if kwargs.pop('fail_fast', False) is True: raise ex l.error(ex, exc_info=True) return self.state
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import unittest import frappe from frappe.utils import flt, get_datetime from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry from erpnext.stock.doctype.stock_entry import test_stock_entry from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError class TestProductionOrder(unittest.TestCase): def check_planned_qty(self): set_perpetual_inventory(0) planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0 pro_doc = frappe.copy_doc(test_records[0]) pro_doc.insert() pr
o_doc.submit() # add raw materials to stores test_stock_entry.make
_stock_entry(item_code="_Test Item", target="Stores - _TC", qty=100, incoming_rate=100) test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100", target="Stores - _TC", qty=100, incoming_rate=100) # from stores to wip s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4)) for d in s.get("items"): d.s_warehouse = "Stores - _TC" s.fiscal_year = "_Test Fiscal Year 2013" s.posting_date = "2013-01-02" s.insert() s.submit() # from wip to fg s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4)) s.fiscal_year = "_Test Fiscal Year 2013" s.posting_date = "2013-01-03" s.insert() s.submit() self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name, "produced_qty"), 4) planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") self.assertEqual(planned1 - planned0, 6) return pro_doc def test_over_production(self): from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError pro_doc = self.check_planned_qty() test_stock_entry.make_stock_entry(item_code="_Test Item", target="_Test Warehouse - _TC", qty=100, incoming_rate=100) test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100", target="_Test Warehouse - _TC", qty=100, incoming_rate=100) s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7)) s.fiscal_year = "_Test Fiscal Year 2013" s.posting_date = "2013-01-04" s.insert() self.assertRaises(StockOverProductionError, s.submit) def test_make_time_log(self): from erpnext.manufacturing.doctype.production_order.production_order import make_time_log from frappe.utils import cstr from frappe.utils import time_diff_in_hours prod_order = frappe.get_doc({ "doctype": "Production Order", "production_item": "_Test FG Item 2", "bom_no": "BOM/_Test FG Item 2/001", "qty": 1, "wip_warehouse": "_Test Warehouse - _TC", "fg_warehouse": "_Test Warehouse 1 - _TC", "company": "_Test Company", "planned_start_date": "2014-11-25 00:00:00" }) prod_order.set_production_order_operations() prod_order.insert() prod_order.submit() d = prod_order.operations[0] d.completed_qty = flt(d.completed_qty) time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \ d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty, operation_id=d.name) self.assertEqual(prod_order.name, time_log.production_order) self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty) self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours) time_log.save() time_log.submit() manufacturing_settings = frappe.get_doc({ "doctype": "Manufacturing Settings", "allow_production_on_holidays": 0 }) manufacturing_settings.save() prod_order.load_from_db() self.assertEqual(prod_order.operations[0].status, "Completed") self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty) self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time)) self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time)) self.assertEqual(prod_order.operations[0].actual_operation_time, 60) self.assertEqual(prod_order.operations[0].actual_operating_cost, 100) time_log.cancel() prod_order.load_from_db() self.assertEqual(prod_order.operations[0].status, "Pending") self.assertEqual(flt(prod_order.operations[0].completed_qty), 0) self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0) self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0) time_log2 = frappe.copy_doc(time_log) time_log2.update({ "completed_qty": 10, "from_time": "2014-11-26 00:00:00", "to_time": "2014-11-26 00:00:00", "docstatus": 0 }) self.assertRaises(OverProductionLoggedError, time_log2.save) test_records = frappe.get_test_records('Production Order')
import numpy as np import cv2 from matplotlib import pylab as plt # Ref: http://www.pyimagesearch.com/2015/07/16/where-did-sift-and-surf-go-in-opencv-3/ picNumber = 1 filename = "/home/cwu/project/stereo-calibration/calib_imgs/3/left/left_" + str(picNumber) +".jpg" i
mg = cv2.imread(file
name) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) orb = cv2.ORB_create() # find the keypoints with STAR kp = orb.detect(img,None) # compute the descriptors with BRIEF kp, des = orb.compute(img, kp) img = cv2.drawKeypoints(img,kp,None,(0,255,0),4) cv2.imshow('img',img) cv2.waitKey(1000) cv2.imwrite('orb_keypoints.jpg',img)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import mezzanine.core.fields class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ] operations = [ migrations.CreateModel( name='Gallery', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page', on_delete=models.CASCADE)), ('content', mezzanine.core.fields.RichTextField(verbose_name='Content')), ('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blan
k=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'Gallery', 'verbose_name_plural': 'Galleries', }, bases=('pages.page', models.Model), ), migrations.CreateModel( name='GalleryImage', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=F
alse, auto_created=True, primary_key=True)), ('_order', models.IntegerField(null=True, verbose_name='Order')), ('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')), ('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)), ('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery', on_delete=models.CASCADE)), ], options={ 'ordering': ('_order',), 'verbose_name': 'Image', 'verbose_name_plural': 'Images', }, bases=(models.Model,), ), ]
# -*- coding: utf-8 -*- from wikitools.api im
port APIRequest from wikitools.wiki import Wiki from wikitools.page import Page from urllib2 import quote pairs = [ ['"', '"'], ['(', ')'], ['[', ']'], ['{', '}'], ['
<!--', '-->'], ['<', '>'], ['<gallery', '</gallery>'], ['<includeonly>', '</includeonly>'], ['<noinclude>', '</noinclude>'], ['<onlyinclude>', '</onlyinclude>'], ['<small>', '</small>'], ['<table>', '</table>'], ['<td>', '</td>'], ['<tr>', '</tr>'], ] wiki = Wiki('http://wiki.teamfortress.com/w/api.php') # Returns a list of unmatched element indices. def find_mismatch(text, pair): problems = [] for i, char in enumerate(text): if char == pair[0]: problems.append(i) if char == pair[1]: try: problems.pop() except IndexError: return [i] return problems params = { 'action': 'query', 'list': 'allpages', 'apfilterredir': 'nonredirects', 'aplimit': '500', } titles = set() req = APIRequest(wiki, params) for result in req.queryGen(): for article in result['query']['allpages']: titles.add(article['title']) titles = list(titles) titles.sort() print 'Found', len(titles), 'pages' for title in titles: page = Page(wiki, title) page.getWikiText() text = page.getWikiText().lower() printed_link = False for pair in pairs: if text.count(pair[0]) != text.count(pair[1]): if not printed_link: print '='*80 print 'https://wiki.teamfortress.com/w/index.php?action=edit&title=%s' % quote(title.encode('utf-8')) printed_link = True indices = find_mismatch(text, pair) print '-'*80 print pair for index in indices: print '-'*80 print text[index-100:index+100]
''' Created on Jun 6, 2012 @author: vr274 ''' import numpy as np from generic import TakestepSlice, TakestepInterface from pele.utils import rotations __all__ = ["RandomDisplacement", "UniformDisplacement", "RotationalDisplacement", "RandomCluster"] class RandomDisplacement(TakestepSlice): '''Random displacement on each individual coordinate RandomDisplacement is the most basic step taking routine. It simply displaces each coordinate my a random value. Parameters ---------- stepsize : float magnitue of random displacement ''' def __init__(self, stepsize=1.0): TakestepSlice.__init__(self, stepsize=stepsize) def takeStep(self, coords, **kwargs): coords[self.srange] += np.random.uniform(low=-self.stepsize, high=self.stepsize, size=coords[self.srange].shape) class UniformDis
placement(Tak
estepSlice): '''Displace each atom be a uniform random vector The routine generates a proper uniform random unitvector to displace atoms. ''' def takeStep(self, coords, **kwargs): c = coords[self.srange] for x in c.reshape(c.size/3,3): x += self.stepsize * rotations.vector_random_uniform_hypersphere(3) class RotationalDisplacement(TakestepSlice): '''Random rotation for angle axis vector RotationalDisplacement performs a proper random rotation. If the coordinate array contains positions and orientations, make sure to specify the correct slice for the angle axis coordinates. ''' def takeStep(self, coords, **kwargs): """ take a random orientational step """ c = coords[self.srange] for x in c.reshape(c.size/3,3): rotations.takestep_aa(x, self.stepsize) class RandomCluster(TakestepInterface): '''Generate a random configuration ''' def __init__(self, volume=1.0): self.volume = volume def takeStep(self, coords, **kwargs): coords[:] = np.random.random(coords.shape) * (self.volume**(1./3.))
impor
t _plotly_utils.basevalidators class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="sizemode", parent_name="scatterpolar.marker", **kwargs ): super(SizemodeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "info"), values=kwargs.pop("values", ["di
ameter", "area"]), **kwargs )
#!/usr/bin/python
# $Id:$ from base import Display, Screen, ScreenMode, Canvas from pyglet.libs.win32 import _kernel32, _user32, t
ypes, constants from pyglet.libs.win32.constants import * from pyglet.libs.win32.types import * class Win32Display(Display): def get_screens(self): screens = [] def enum_proc(hMonitor, hdcMonitor, lprcMonitor, dwData): r = lprcMonitor.contents width = r.right - r.left height = r.bottom - r.top screens.append( Win32Screen(self, hMonitor, r.left, r.top, width, height)) return True enum_proc_type = WINFUNCTYPE(BOOL, HMONITOR, HDC, POINTER(RECT), LPARAM) enum_proc_ptr = enum_proc_type(enum_proc) _user32.EnumDisplayMonitors(NULL, NULL, enum_proc_ptr, 0) return screens class Win32Screen(Screen): _initial_mode = None def __init__(self, display, handle, x, y, width, height): super(Win32Screen, self).__init__(display, x, y, width, height) self._handle = handle def get_matching_configs(self, template): canvas = Win32Canvas(self.display, 0, _user32.GetDC(0)) configs = template.match(canvas) # XXX deprecate config's being screen-specific for config in configs: config.screen = self return configs def get_device_name(self): info = MONITORINFOEX() info.cbSize = sizeof(MONITORINFOEX) _user32.GetMonitorInfoW(self._handle, byref(info)) return info.szDevice def get_modes(self): device_name = self.get_device_name() i = 0 modes = [] while True: mode = DEVMODE() mode.dmSize = sizeof(DEVMODE) r = _user32.EnumDisplaySettingsW(device_name, i, byref(mode)) if not r: break modes.append(Win32ScreenMode(self, mode)) i += 1 return modes def get_mode(self): mode = DEVMODE() mode.dmSize = sizeof(DEVMODE) _user32.EnumDisplaySettingsW(self.get_device_name(), ENUM_CURRENT_SETTINGS, byref(mode)) return Win32ScreenMode(self, mode) def set_mode(self, mode): assert mode.screen is self if not self._initial_mode: self._initial_mode = self.get_mode() r = _user32.ChangeDisplaySettingsExW(self.get_device_name(), byref(mode._mode), None, CDS_FULLSCREEN, None) if r == DISP_CHANGE_SUCCESSFUL: self.width = mode.width self.height = mode.height def restore_mode(self): if self._initial_mode: self.set_mode(self._initial_mode) class Win32ScreenMode(ScreenMode): def __init__(self, screen, mode): super(Win32ScreenMode, self).__init__(screen) self._mode = mode self.width = mode.dmPelsWidth self.height = mode.dmPelsHeight self.depth = mode.dmBitsPerPel self.rate = mode.dmDisplayFrequency class Win32Canvas(Canvas): def __init__(self, display, hwnd, hdc): super(Win32Canvas, self).__init__(display) self.hwnd = hwnd self.hdc = hdc
import re import sys def i
s_self_describing(n): for i in range(len(n)): c = n[i] if int(c) != len(re.findall(str(i), n)): return False return True with open(sys.argv[1], 'r') as fh: for line in fh.readlines(): line = line.strip() if line == '': continue print 1 if is_self_de
scribing(line) else 0
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### P
LEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_rebel_brigadier_general_rodian_female_01.iff" result.attribute_template_id = 9 result.stfName("npc_name","rodian_base_female") #### BEGIN MODIFICATIONS #### #### END
MODIFICATIONS #### return result
from app import db from app.model import DirectionStatistic import random import numpy as np import matplotlib.pyplot as plt from matplotlib.figure
import Figure def create_range_figure2(sender_id): fig = Figure() axis = fig.add_subplot(1, 1, 1) xs = range(100) ys = [random.randint(1, 50) for x in xs] axis.plot(xs, ys) return fig def create_range_figure(se
nder_id): sds = db.session.query(DirectionStatistic) \ .filter(DirectionStatistic.sender_id == sender_id) \ .order_by(DirectionStatistic.directions_count.desc()) \ .limit(1) \ .one() fig = Figure() direction_data = sds.direction_data max_range = max([r['max_range'] / 1000.0 for r in direction_data]) theta = np.array([i['direction'] / 180 * np.pi for i in direction_data]) radii = np.array([i['max_range'] / 1000 if i['max_range'] > 0 else 0 for i in direction_data]) width = np.array([13 / 180 * np.pi for i in direction_data]) colors = plt.cm.viridis(radii / max_range) ax = fig.add_subplot(111, projection='polar') ax.bar(theta, radii, width=width, bottom=0.0, color=colors, edgecolor='b', alpha=0.5) #ax.set_rticks([0, 25, 50, 75, 100, 125, 150]) ax.set_theta_zero_location("N") ax.set_theta_direction(-1) fig.suptitle(f"Range between sender '{sds.sender.name}' and receiver '{sds.receiver.name}'") return fig
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Registry for visualizations.""" import inspect from extensions.visualizations import models class Registry(object): """Registry of all visualizations.""" # Dict mapping visualization class names to their classes. visualizations_dict = {} @classmethod def _refresh_registry(cls): """Clears and adds new visualization instances to the registry."""
cls.visualizations_dict.clear() # Add new visualization instances to the registry.
for name, clazz in inspect.getmembers( models, predicate=inspect.isclass): if name.endswith('_test') or name == 'BaseVisualization': continue ancestor_names = [ base_class.__name__ for base_class in inspect.getmro(clazz)] if 'BaseVisualization' in ancestor_names: cls.visualizations_dict[clazz.__name__] = clazz @classmethod def get_visualization_class(cls, visualization_id): """Gets a visualization class by its id (which is also its class name). The registry will refresh if the desired class is not found. If it's still not found after the refresh, this method will throw an error. """ if visualization_id not in cls.visualizations_dict: cls._refresh_registry() if visualization_id not in cls.visualizations_dict: raise TypeError( '\'%s\' is not a valid visualization id.' % visualization_id) return cls.visualizations_dict[visualization_id] @classmethod def get_all_visualization_ids(cls): """Gets a visualization class by its id (which is also its class name). """ if not cls.visualizations_dict: cls._refresh_registry() return cls.visualizations_dict.keys()
__all__ = [ "getMin" ] __doc__ = "Different algorithms used for optimization" import Optizelle.Unconstrained.S
tate import Optizelle.Unconstrained.Functions from Optizelle.Utility import * from Optizelle.Properties import * from Optizelle.Functions import * def getMin(X, msg, fns, state, smanip=None): """Solves an unconstrained o
ptimization problem Basic solve: getMin(X,msg,fns,state) Solve with a state manipulator: getMin(X,msg,fns,state,smanip) """ if smanip is None: smanip = StateManipulator() # Check the arguments checkVectorSpace("X",X) checkMessaging("msg",msg) Optizelle.Unconstrained.Functions.checkT("fns",fns) Optizelle.Unconstrained.State.checkT("state",state) checkStateManipulator("smanip",smanip) # Call the optimization UnconstrainedAlgorithmsGetMin(X,msg,fns,state,smanip)
# Copyright 2020 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Timesketch API client library.""" import logging from . import error from . import resource logger = logging.getLogger('timesketch_api.user') class User(resource.BaseResource): """User object.""" def __init__(self, api): """Initializes the user object.""" self._object_data = None resource_uri = 'users/me/' super().__init__(api, resource_uri) def _get_data(self): """Returns dict from the first object of the resource data.""" if self._object_data: return self._object_data data = self.data objects = data.get('objects') if objects: self._object_data = objects[0] else: self._object_data = {} return self._object_data def change_password(self, new_password): """Change the password for the user. Args: new_password (str): String with the password. Raises: ValueError: If there was an error. Returns: Boolean: Whether the password was successfully modified. """ if not new_password: raise ValueError('No new password supplied.') if not isinstance(new_password, str): raise ValueError('Password needs to be a string value.') data = {'password': new_password} resource_url = f'{self.api.api_root}/{self.resource_uri}' response = self.api.session.post(resource_url, json=data) return error.check_return_status(response, logger) @property def groups(self): """Property that returns the groups the user belongs to.""" data = self._get_data() groups = data.get('groups', []) return [x.get('name', '') for x in groups] @property def is_active(self): """Property that returns bool indicating whether the user is active.""" data = self._get_data() return data.get('active', True) @property def is_admin(self): """Property that returns bool indicating whether the user is admin.""" data = self._get_data() return data.get('admin', False) @property def username(self): """Property that returns back the username of the current user."""
data = self._get_data() return data.get('username', 'Unknown') def __str__(self): """Returns a string representation of the username.""" use
r_strings = [self.username] if self.is_active: user_strings.append('[active]') else: user_strings.append('[inactive]') if self.is_admin: user_strings.append('<is admin>') return ' '.join(user_strings)
#!/usr/bin/python # -*- coding: utf-8 -*- import os import sys class Options: d
ef __init__(self): self.color =
"black" self.verbose = False pass
_response('stats.contributions_series', group='day', format='csv') assert r.status_code == 200 self.csv_eq(r, """date,count,total,average 2009-06-02,2,4.98,2.49 2009-06-01,1,5.00,5.0""") # Test the SQL query by using known dates, for weeks and months etc. class TestSiteQuery(TestCase): def setUp(self): super(TestSiteQuery, self).setUp() self.start = datetime.date(2012, 1, 1) self.end = datetime.date(2012, 1, 31) for k in xrange(0, 15): for name in ['addon_count_new', 'version_count_new']: date_ = self.start + datetime.timedelta(days=k) GlobalStat.objects.create(date=date_, name=name, count=k) def test_day_grouping(self): res = views._site_query('date', self.start, self.end)[0] assert len(res) == 14 assert res[0]['data']['addons_created'] == 14 # Make sure we are returning counts as integers, otherwise # DjangoJSONSerializer will map them to strings. assert type(res[0]['data']['addons_created']) == int assert res[0]['date'] == '2012-01-15' def test_week_grouping(self): res = views._site_query('week', self.start, self.end)[0] assert len(res) == 3 assert res[1]['data']['addons_created'] == 70 assert res[1]['date'] == '2012-01-08' def test_month_grouping(self): res = views._site_query('month', self.start, self.end)[0] assert len(res) == 1 assert res[0]['data']['addons_created'] == (14 * (14 + 1)) / 2 assert res[0]['date'] == '2012-01-02' def test_period(self): self.assertRaises(AssertionError, views._site_query, 'not_period', self.start, self.end) @mock.patch('olympia.stats.views._site_query') class TestSite(TestCase): def tests_period(self, _site_query): _site_query.return_value = ['.', '.'] for period in ['date
', 'week', 'month']: self.client.get(reverse('stats.site', args=['json', period])) assert _site_query.call_args[0][0] == period def tests_period_day(self, _site_query): _site_query.return_value = ['.', '.'] start = (datetime.date.today() - datetime.timedelta(days=3)) en
d = datetime.date.today() self.client.get(reverse('stats.site.new', args=['day', start.strftime('%Y%m%d'), end.strftime('%Y%m%d'), 'json'])) assert _site_query.call_args[0][0] == 'date' assert _site_query.call_args[0][1] == start assert _site_query.call_args[0][2] == end def test_csv(self, _site_query): _site_query.return_value = [[], []] res = self.client.get(reverse('stats.site', args=['csv', 'date'])) assert res._headers['content-type'][1].startswith('text/csv') def test_json(self, _site_query): _site_query.return_value = [[], []] res = self.client.get(reverse('stats.site', args=['json', 'date'])) assert res._headers['content-type'][1].startswith('text/json') def tests_no_date(self, _site_query): _site_query.return_value = ['.', '.'] self.client.get(reverse('stats.site', args=['json', 'date'])) assert _site_query.call_args[0][1] == ( datetime.date.today() - datetime.timedelta(days=365)) assert _site_query.call_args[0][2] == datetime.date.today() class TestCollections(amo.tests.ESTestCase): fixtures = ['bandwagon/test_models', 'base/users', 'base/addon_3615', 'base/addon_5369'] def setUp(self): super(TestCollections, self).setUp() self.today = datetime.date.today() self.collection = Collection.objects.get(pk=512) self.url = reverse('stats.collection', args=[self.collection.uuid, 'json']) for x in xrange(1, 4): data = {'date': self.today - datetime.timedelta(days=x - 1), 'id': int(self.collection.pk), 'count': x, 'data': search.es_dict({'subscribers': x, 'votes_up': x, 'votes_down': x, 'downloads': x})} CollectionCount.index(data, id='%s-%s' % (x, self.collection.pk)) self.refresh('stats') def tests_collection_anon(self): res = self.client.get(self.url) assert res.status_code == 403 def tests_collection_user(self): self.client.login(username='admin@mozilla.com', password='password') res = self.client.get(self.url) assert res.status_code == 200 def tests_collection_admin(self): self.client.login(username='admin@mozilla.com', password='password') self.collection.update(author=None) res = self.client.get(self.url) assert res.status_code == 200 def test_collection_json(self): self.client.login(username='admin@mozilla.com', password='password') res = self.client.get(self.url) content = json.loads(res.content) assert len(content) == 3 assert content[0]['count'] == 1 assert content[0]['data']['votes_down'] == 1 assert content[0]['data']['downloads'] == 1 def test_collection_csv(self): self.client.login(username='admin@mozilla.com', password='password') self.url = reverse('stats.collection', args=[self.collection.uuid, 'csv']) res = self.client.get(self.url) date = (self.today.strftime('%Y-%m-%d')) assert '%s,1,1,1,1,1' % date in res.content def get_url(self, start, end): return reverse('collections.stats.subscribers_series', args=[self.collection.author.username, self.collection.slug, 'day', start.strftime('%Y%m%d'), end.strftime('%Y%m%d'), 'json']) def test_collection_one_day(self): self.client.login(username='admin@mozilla.com', password='password') url = self.get_url(self.today, self.today) res = self.client.get(url) content = json.loads(res.content) assert len(content) == 1 assert content[0]['date'] == self.today.strftime('%Y-%m-%d') def test_collection_range(self): self.client.login(username='admin@mozilla.com', password='password') yesterday = self.today - datetime.timedelta(days=1) day_before = self.today - datetime.timedelta(days=2) url = self.get_url(day_before, yesterday) res = self.client.get(url) content = json.loads(res.content) assert len(content) == 2 assert content[0]['date'] == yesterday.strftime('%Y-%m-%d') assert content[1]['date'] == day_before.strftime('%Y-%m-%d') class TestXss(amo.tests.TestXss): def test_stats_page(self): url = reverse('stats.overview', args=[self.addon.slug]) self.assertNameAndNoXSS(url) def test_date_range_or_404_xss(self): with self.assertRaises(Http404): views.get_daterange_or_404(start='<alert>', end='20010101') def test_report_view_xss(self): req = RequestFactory().get('/', start='<alert>', end='20010101') assert views.get_report_view(req) == {} req = RequestFactory().get('/', last='<alert>') assert views.get_report_view(req) == {} class ArchiveTestCase(APIKeyAuthTestCase): fixtures = ['base/addon_3615'] def setUp(self): self.user = UserProfile.objects.get(email='del@icio.us') self.api_key = self.create_api_key(self.user, str(self.user.pk) + ':f') self.addon = Addon.objects.get(pk=3615) self.theme_update_count = ThemeUpdateCount( addon_id=3615, date='2016-01-18', count=123) def tearDown(self): self.clean_up_files() def clean_up_files(self): path = os.path.join(views.storage.location, '3615') if os.path.isdir(path): shutil.rmtree(path) def get(self, url=None): return self.client.get(url, HTTP_AUTHORIZATION=self.authorization())
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('core', '0015_auto_20150928_0850'), ] operations = [ migrations.CreateModel( nam
e='UserVote', fields=[ ('id', models.AutoField(auto_created=True, verbose_n
ame='ID', serialize=False, primary_key=True)), ('bidrag', models.ForeignKey(to='core.Bidrag')), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), migrations.AlterUniqueTogether( name='uservote', unique_together=set([('bidrag', 'user')]), ), ]
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras preprocessing layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_tensor INTEGER = "int" BINARY = "binary" class Discretization(Layer): """Buckets data into discrete ranges. This layer will place each element of its input data into one of several contiguous ranges and output either an integer index or a one-hot vector indicating which range each element was placed in. What happens in `adapt()`: The dataset is examined and sliced. Input shape: Any `tf.Tensor` or `tf.RaggedTensor` of dimension 2 or higher. Output shape: The same as the input shape if `output_mode` is 'int', or `[output_shape, num_buckets]` if `output_mode` is 'binary'. Attributes: bins: Optional boundary specification. Bins include the left boundary and exclude the right boundary, so `bins=[0., 1., 2.]` generates bins `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. output_mode: One of 'int', 'binary'. Defaults to 'int'. """ def __init__(self, bins, output_mode=INTEGER, **kwargs): super(Discretization, self).__init__(**kwargs) self._supports_ragged_inputs = True self.bins = bins self.output_mode = output_mode def get_config(self): config = { "bins": self.bins, "output_mode": self.output_mode, } base_config = super(Discretization, self).get_config() return dict(list(base_config.items()) + list(config.items())) def compute_output_shape(self, input_shape): if self.output_mode == INTEGER: return input_shape else: return tensor_shape.TensorShape([dim for dim in input_shape] + [len(self.bins)]) def compute_output_signature(self, input_spec): output_shape = self.compute_output_shape(input_spec.shape.as_list()) output_dtype = dtypes.int64 if isinstance(input_spec, sparse_tensor.SparseTensorSpec): return sparse_tensor.SparseTensorSpec( shape=output_shape, dtype=output_dtype) return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype) def call(self, inputs): if ragged_tensor.is_ragged(inputs): integer_buckets = ragged_functional_ops.map_flat_values( math_ops._bucketize, inputs, boundaries=self.bins) # pylint: disable=protected-access # Ragged map_flat_values doesn't touch the non-values tensors in the # ragged composite tensor. If this op is the only op a Keras model, # this can cause errors in Graph mode, so wrap the tensor in an identity. integer_buckets = array_ops.identity(integer_buckets) elif isinstance(inputs, sparse_tensor.SparseTensor): integer_buckets = math_ops._bucketize( # pylint: disable=protected-access inputs.values, boundaries=self.bins) else:
integer_buckets = math_ops._bucketize(inputs, boundaries=self.bins) #
pylint: disable=protected-access if self.output_mode == INTEGER: if isinstance(inputs, sparse_tensor.SparseTensor): return sparse_tensor.SparseTensor( indices=array_ops.identity(inputs.indices), values=integer_buckets, dense_shape=array_ops.identity(inputs.dense_shape)) return integer_buckets else: if isinstance(inputs, sparse_tensor.SparseTensor): raise ValueError("`output_mode=binary` is not supported for " "sparse input") # The 'bins' array is the set of boundaries between the bins. We actually # have 'len(bins)+1' outputs. # TODO(momernick): This will change when we have the ability to adapt(). return array_ops.one_hot(integer_buckets, depth=len(self.bins) + 1)
# The contents of this file are subject to the BitTorrent Open Source License # Version 1.1 (the License). You may not copy or use this file, in either # source code or executable form, except in compliance with the License. You # may obtain a copy of the License at http://www.bittorrent.com/license/. # # Software distributed
under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. from sha import sha from random import randint #this is ugly, hopefully os.entropy will be in 2.4 try: from entropy import entropy except ImportError: def entr
opy(n): s = '' for i in range(n): s += chr(randint(0,255)) return s def intify(hstr): """20 bit hash, big-endian -> long python integer""" assert len(hstr) == 20 return long(hstr.encode('hex'), 16) def stringify(num): """long int -> 20-character string""" str = hex(num)[2:] if str[-1] == 'L': str = str[:-1] if len(str) % 2 != 0: str = '0' + str str = str.decode('hex') return (20 - len(str)) *'\x00' + str def distance(a, b): """distance between two 160-bit hashes expressed as 20-character strings""" return intify(a) ^ intify(b) def newID(): """returns a new pseudorandom globally unique ID string""" h = sha() h.update(entropy(20)) return h.digest() def newIDInRange(min, max): return stringify(randRange(min,max)) def randRange(min, max): return min + intify(newID()) % (max - min) def newTID(): return randRange(-2**30, 2**30) ### Test Cases ### import unittest class NewID(unittest.TestCase): def testLength(self): self.assertEqual(len(newID()), 20) def testHundreds(self): for x in xrange(100): self.testLength class Intify(unittest.TestCase): known = [('\0' * 20, 0), ('\xff' * 20, 2L**160 - 1), ] def testKnown(self): for str, value in self.known: self.assertEqual(intify(str), value) def testEndianessOnce(self): h = newID() while h[-1] == '\xff': h = newID() k = h[:-1] + chr(ord(h[-1]) + 1) self.assertEqual(intify(k) - intify(h), 1) def testEndianessLots(self): for x in xrange(100): self.testEndianessOnce() class Disantance(unittest.TestCase): known = [ (("\0" * 20, "\xff" * 20), 2**160L -1), ((sha("foo").digest(), sha("foo").digest()), 0), ((sha("bar").digest(), sha("bar").digest()), 0) ] def testKnown(self): for pair, dist in self.known: self.assertEqual(distance(pair[0], pair[1]), dist) def testCommutitive(self): for i in xrange(100): x, y, z = newID(), newID(), newID() self.assertEqual(distance(x,y) ^ distance(y, z), distance(x, z)) class RandRange(unittest.TestCase): def testOnce(self): a = intify(newID()) b = intify(newID()) if a < b: c = randRange(a, b) self.assertEqual(a <= c < b, 1, "output out of range %d %d %d" % (b, c, a)) else: c = randRange(b, a) assert b <= c < a, "output out of range %d %d %d" % (b, c, a) def testOneHundredTimes(self): for i in xrange(100): self.testOnce() if __name__ == '__main__': unittest.main()
""" ex_compound_nomo_1.py Compound nomograph: (A+B)/E=F/(CD) Copyright (C) 2007-2009 Leif Roschier This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the
GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys sys.path.insert(0, "..") from pynomo.nomographer import * # type 1 A_params={ 'u_min':0.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$A$', 'tick_levels':2, 'tick_text_levels':1, } B_params={ 'u_min':0.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$B$', 'tick_levels
':2, 'tick_text_levels':1, } R1a_params={ 'u_min':0.0, 'u_max':10.0, 'function':lambda u:-u, 'title':'', 'tick_levels':0, 'tick_text_levels':0, 'tag':'r1' } block_1_params={ 'block_type':'type_1', 'width':10.0, 'height':10.0, 'f1_params':A_params, 'f2_params':B_params, 'f3_params':R1a_params, 'isopleth_values':[[1,7,'x']] } # type 4 R1b_params={ 'u_min':1.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$R_1$', 'tick_levels':0, 'tick_text_levels':0, 'tick_side':'right', 'title_draw_center':True, 'title_opposite_tick':False, 'tag':'r1' } E_params={ 'u_min':1.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$E$', 'tick_levels':3, 'tick_text_levels':1, 'tick_side':'right', 'title_draw_center':True, 'title_opposite_tick':False, } F_params={ 'u_min':1.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$F$', 'tick_levels':3, 'tick_text_levels':1, 'tick_side':'left', 'title_draw_center':True, 'title_opposite_tick':True, } R2a_params={ 'u_min':1.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$R_2$', 'tick_levels':0, 'tick_text_levels':0, 'tick_side':'left', 'title_draw_center':True, 'title_opposite_tick':False, 'tag':'r2' } block_2_params={ 'block_type':'type_4', 'f1_params':R1b_params, 'f2_params':E_params, 'f3_params':F_params, 'f4_params':R2a_params, 'mirror_x':True, 'isopleth_values':[['x',9,4,'x']] } # type 2 N R2b_params={ 'u_min':0.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$$', 'tick_levels':0, 'tick_text_levels':0, 'tag':'r2' } C_params={ 'u_min':0.5, 'u_max':5.0, 'function':lambda u:u, 'title':r'$C$', 'tick_levels':3, 'tick_text_levels':1, 'tick_side':'left', 'scale_type':'linear smart', } D_params={ 'u_min':1.0, 'u_max':10.0, 'function':lambda u:u, 'title':r'$D$', 'tick_levels':3, 'tick_text_levels':1, } block_3_params={ 'block_type':'type_2', 'width':10.0, 'height':10.0, 'f1_params':R2b_params, 'f2_params':C_params, 'f3_params':D_params, 'mirror_y':True, 'isopleth_values':[['x',1,'x']] } main_params={ 'filename':'ex_compound_nomo_1.pdf', 'paper_height':10.0, 'paper_width':10.0, 'block_params':[block_1_params,block_2_params,block_3_params], 'transformations':[('rotate',0.01),('scale paper',)], } Nomographer(main_params)
from django.contrib import admin from general.models import StaticPage admin
.site.register(StaticPag
e)
import sys from Bio import SeqIO SNPTOPEAKFILENAME = sys.argv[1] GENOMEFILENAME = sys.argv[2] DISTANCE = int(sys.argv[3]) BINDALLELESEQFILENAME = sys.argv[4] NONBINDALLELEFILENAME = sys.argv[5] FIRSTPEAKCOL = int(sys.argv[6]) # 0-INDEXED def getSNPInfo(SNPToPeakLine): # Get the SNP and peak location from the current line if SNPToPeakLine == "": # At the end of the SNP to peak file, so stop return [("", 0), ("", ""), ("", 0)] SNPToPeakLineElements = SNPToPeakLine.split("\t") return [(SNPToPeakLineElements[0], int(SNPToPeakLineElements[1])), (SNPToPeakLineElements[2], SNPToPeakLineElements[3]), (SNPToPeakLineElements[FIRSTPEAKCOL], int(SNPToPeakLineElements[FIRSTPEAKCOL+1]))] def getSequencesForSNPs(): # For each SNP, get the sequence of its peak +/- distances with the binding and non-binding alleles SNPToPeakFile = open(SNPTOPEAKFILENAME) [SNPLocation, SNPAlleles, peakLocation] = getSNPInfo(SNPToPeakFile.readline().strip()) lastPeakLocation = ("", 0) bindAlleleSeq = "" nonBindAlleleSeq = ""
bindAlleleSeqFile = open(BINDALLELESEQFILENAME, 'w+') nonBindAlleleSeqFile = open(NONBINDALLELEFILENAME, 'w+') numSharingPeak = 0 for seqRecord in SeqIO.parse(GENOMEFILENAME, "fasta"): # Iterate through the chromosomes and get the sequences surrounding each SNP in each chromosome # Combine SNPs that are in the same pea
k, and ASSUME THAT THEY ARE IN LD AND THE BINDING ALLELES CORRESPOND TO EACH OTHER while seqRecord.id == SNPLocation[0]: # Iterate through all SNPs on the current chromosome if peakLocation != lastPeakLocation: # At a new peak if lastPeakLocation[0] != "": # Record the last peak bindAlleleSeqFile.write("".join(bindAlleleSeq).upper() + "\n") nonBindAlleleSeqFile.write("".join(nonBindAlleleSeq).upper() + "\n") bindAlleleSeq = list(str(seqRecord.seq[peakLocation[1] - DISTANCE:peakLocation[1] + DISTANCE - 1])) nonBindAlleleSeq = list(str(seqRecord.seq[peakLocation[1] - DISTANCE:peakLocation[1] + DISTANCE - 1])) else: numSharingPeak = numSharingPeak + 1 SNPLocationInSeq = DISTANCE - (peakLocation[1] - SNPLocation[1]) - 1 bindAlleleSeq[SNPLocationInSeq] = SNPAlleles[0] nonBindAlleleSeq[SNPLocationInSeq] = SNPAlleles[1] lastPeakLocation = peakLocation [SNPLocation, SNPAlleles, peakLocation] = getSNPInfo(SNPToPeakFile.readline().strip()) print numSharingPeak bindAlleleSeqFile.write("".join(bindAlleleSeq).upper() + "\n") nonBindAlleleSeqFile.write("".join(nonBindAlleleSeq).upper() + "\n") SNPToPeakFile.close() bindAlleleSeqFile.close() nonBindAlleleSeqFile.close() if __name__=="__main__": getSequencesForSNPs()
on types, some non-trivial type conversion could have place. Basically a type is replaced with another one that has the closest match, and sometimes one argument of generated function comprises several arguments of the original function (usually two). Functions having error code as the return value and returning effective value in one of its arguments are transformed so that the effective value is returned in a regular fashion and run-time exception is being thrown in case of negative error code. """ from sys import version_info if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_pocketsphinx', [dirname(__file__)]) except ImportError: import _pocketsphinx return _pocketsphinx if fp is not None: try: _mod = imp.load_module('_pocketsphinx', fp, pathname, description) finally: fp.close() return _mod _pocketsphinx = swig_import_helper() del swig_import_helper else: import _pocketsphinx del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 def _swig_setattr_nondynamic_method(set): def set_attr(self,name,value): if (name == "thisown"): return self.this.own(value) if hasattr(self,name) or (name == "this"): set(self,name,value) else: raise AttributeError("You cannot add attributes to %s" % self) return set_attr import sphinxbase class Hypothesis(object): """Proxy of C Hypothesis struct""" thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr hypstr = _swig_property(_pocketsphinx.Hypothesis_hypstr_get, _pocketsphinx.Hypothesis_hypstr_set) best_score = _swig_property(_pocketsphinx.Hypothesis_best_score_get, _pocketsphinx.Hypothesis_best_score_set) prob = _swig_property(_pocketsphinx.Hypothesis_prob_get, _pocketsphinx.Hypothesis_prob_set) def __init__(self, *args): """__init__(Hypothesis self, char const * hypstr, int best_score, int prob) -> Hypothesis""" this = _pocketsphinx.new_Hypothesis(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _pocketsphinx.delete_Hypothesis __del__ = lambda self : None; Hypothesis_swigregister = _pocketsphinx.Hypothesis_swigregister Hypothesis_swigregister(Hypothesis) class Segment(object): """Proxy of C Segment struct""" thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr word = _swig_property(_pocketsphinx.Segment_word_get, _pocketsphinx.Segment_word_set) ascore = _swig_property(_pocketsphinx.Segment_ascore_get, _pocketsphinx.Segment_ascore_set) lscore = _swig_property(_pocketsphinx.Segment_lscore_get, _pocketsphinx.Segment_lscore_set) lback = _swig_property(_pocketsphinx.Segment_lback_get, _pocketsphinx.Segment_lback_set) prob = _swig_property(_pocketsphinx.Segment_prob_get, _pocketsphinx.Segment_prob_set) start_frame = _swig_property(_pocketsphinx.Segment_start_frame_get, _pocketsphinx.Segment_start_frame_set) end_frame = _swig_property(_pocketsphinx.Segment_end_frame_get, _pocketsphinx.Segment_end_frame_set) def fromIter(*args): """fromIter(ps_seg_t * itor) -> Segment""" return _pocketsphinx.Segment_fromIter(*args) fromIter = staticmethod(fromIter) __swig_destroy__ = _pocketsphinx.delete_Segment __del__ = lambda self : None; def __init__(self): """__init__(Segment self) -> Segment""" this = _pocketsphinx.new_Segment() try: self.this.append(this) except: self.this = this Segment_swigregister = _pocketsphinx.Segment_swigregister Segment_swigregister(Segment) def Segment_fromIter(*args): """Segment_fromIter(ps_seg_t * itor) -> Segment""" return _pocketsphinx.Segment_fromIter(*args) class NBest(object): """Proxy of C NBest struct""" thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr hypstr = _swig_property(_pocketsphinx.NBest_hypstr_get, _pocketsphinx.NBest_hypstr_set) score = _swig_property(_pocketsphinx.NBest_score_get, _pocketsphinx.NBest_score_set) def fromIter(*args): """fromIter(ps_nbest_t * itor) -> NBest""" return _pocketsphinx.NBest_fromIter(*args) fromIter = staticmethod(fromIter) def hyp(self): """hyp(NBest self) -> Hypothesis""" return _pocketsphinx.NBest_hyp(self) __swig_destroy__ = _pocketsphinx.delete_NBest __del__ = lambda self : None; def __init__(self): """__init__(NBest self) -> NBest""" this = _pocketsphinx.new_NBest() try: self.this.append(this) except: self.this = this NBest_swigregister = _pocketsphinx.NBest_swigregister NBest_swigregister(NBest) def NBest_fromIter(*args): """NBest_fromIter(ps_nbest_t * itor) -> NBest""" return _pocketsphinx.NBest_fromIter(*args) class SegmentIterator(object): """Proxy of C SegmentIterator struct""" thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr ptr = _swig_property(_pocketsphinx.SegmentIterator_ptr_get, _pocketsphinx.SegmentIterator_ptr_set) def __init__(self, *args): """__init__(SegmentIterator self, ps_seg_t * ptr) -> SegmentIterator""" this = _pocketsphinx.new_SegmentIterator(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _pocketsphinx.delete_SegmentIterator __del__ = lambda self : None; def next(self): """next(SegmentIterator self) -> Segment""" return _pocketsphinx.SegmentIterator_next(self) def __next__(self): """__next__(SegmentIterator self) -> Segment""" return _pocketsphinx.SegmentIterator___next__(self) SegmentIterator_swigregister = _pocketsphinx.SegmentIterator_swigregister SegmentIterator_swigregister(SegmentIterator) class NBestIterator(object): """Proxy of C NBestIterator struct""" thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr ptr = _swig_property(_pocketsphinx.NBestIterator_ptr_get, _pocketsphinx.NBestIterator_ptr_set) def __init__(self, *arg
s): """__init__(NBestIterator self, ps_nbest_t * ptr) -> NBestIterator""" this = _pocketsphinx.new_NBestIterator(*args) try: self.this.append(this) except: self.this = this __swig_destroy__ = _pocketsphinx.delete_NBestIterator __del__ = lambda self : None; def next(self): """next(NBestIterator self) -> NBes
t""" return _pocketsphinx.NBestIterator_next(self) def __next__(self):
from pagarme import card from pagarme import plan from tests.resources import pagarme_test from tests.resources.dictionaries import card_dictionary from tests.resources.dictionaries import customer_dictionary from tests.resources.dictionaries import plan_dictionary from tests.resources.dictionaries import transaction_dictionary CARD = card.create(card_dictionary.VALID_CARD) NO_
TRIAL_PLAN = plan.create(plan_dictionary.NO_TRIAL_PLAN) POSTBACK_URL = pagarme_test.create_postback_url() BOLETO_PERCENTAGE_SPLIT_RULE_SUBSCRIPTION = { "plan_id": NO_TRIAL_PLAN['id'], "customer": customer_dictionary.CUSTOMER, "payment_method": "boleto", "postback_url": POSTBACK_URL, "split_rules": transaction_dictionary.SPLIT_RULE_PERCENTAGE } BOLETO_SUBSCRIPTION = { "plan_id": NO_TRIAL_PLAN['id'], "customer": customer_dictionary.CUSTOMER, "payment_met
hod": "boleto", "postback_url": POSTBACK_URL } CHARGES = { "charges": "1" } CREDIT_CARD_PERCENTAGE_SPLIT_RULE_SUBSCRIPTION = { "plan_id": NO_TRIAL_PLAN['id'], "customer": customer_dictionary.CUSTOMER, "card_id": CARD['id'], "payment_method": "credit_card", "postback_url": POSTBACK_URL, "split_rules": transaction_dictionary.SPLIT_RULE_PERCENTAGE } CREDIT_CARD_SUBSCRIPTION = { "plan_id": NO_TRIAL_PLAN['id'], "customer": customer_dictionary.CUSTOMER, "card_id": CARD['id'], "payment_method": "credit_card", "postback_url": POSTBACK_URL } UPDATE = { "payment_method": "boleto" }
s file is part of pyasn1-modules software. # # Created by Russ Housley # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # import sys import unittest from pyasn1.codec.der.decoder import decode as der_decoder from pyasn1.codec.der.encoder import encode as der_encoder from pyasn1.type import univ from pyasn1_modules import pem from pyasn1_modules import rfc5652 from pyasn1_modules import rfc7292 class PKCS12TestCase(unittest.TestCase): pfx_pem_text = """\ MIIJ0wIBAzCCCY8GCSqGSIb3DQEHAaCCCYAEggl8MIIJeDCCBggGCSqGSIb3DQEHAaCCBfkE ggX1MIIF8TCCBe0GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAjuq0/+ 0pyutQICB9AEggTYZe/mYBpmkDvKsve4EwIVwo1TNv4ldyx1qHZW2Ih6qQCY+Nv1Mnv9we0z UTl4p3tQzCPWXnrSA82IgOdotLIez4YwXrgiKhcIkSSL+2yCmAoM+qkjiAIKq+l3UJ6Xhafe 2Kg4Ek/0RkHpe6GwjTtdefkpXpZgccMEopOtKQMLJWsDM7p77x/amn6yIk2tpskKqUY/4n8Y xEiTWcRtTthYqZQIt+q94nKLYpt0o880SVOfvdEqp5KII7cTg60GJL+n6oN6hmP0bsAMvnk9 1f8/lFKMi9tsNU/KnUhbDVpjJwBQkhgbqBx6GdtoqSLSlYNPVM0wlntwm1JhH4ybiQ5sNzqO 7FlWC5bcYwkvOlx1gGrshY5jK/WjbA4paBpxSkgobJReirY9BeqITnvokXlub4tehHhM20Ik 42pKa3kGaHmowvzflxqE+oysW5Oa9XbZxBCfkOMJ70o4hqa+n66+E/uKcN9NbKbTo3zt3xdt 6ypOwHb74t5OcWaGx3EZsw0n0/V+WoLSpXOBwpx08+1yh7LV29aNQ0oEzVVkF6YYRQZtdIMe s3xB2i6sjLal21ntk7iBzMJwVoi524SAZ/oW8SuDAn1c93AWWwKZLALv5V3FZ2pDiQXArcfz DH2d5HJyNx7OlvKzNgEngwSyEC1XbjnOsZVUqGFENuDTa/brH4oEJHEkyWTyDudrz8iCEO80 e1
PE4qqJ5CllN0CSVWqz4CxGDFIQXzR6ohn8f3dR3+DAaLYvAjBVMLJjk7+nfnB2L0HpanhT Fz9AxPPIDf5pBQQwM14l8wKjEHIyfqclupeKNokBUr1ykioPyCr3nf4Rqe0Z4EKIY4OCpW6n hrkWHmvF7OKR+bnuSk3jnBxjSN0Ivy5q9q3fntYrhscMGGR73umfi8Z29tM1vSP9jBZvirAo geGf/sfOI0ewRvJf/5abnNg/78Zyk8WmlAHVFzNGcM3u3vhnNpTI
VRuUyVkdSmOdbzeSfmqQ 2HPCEdC9HNm25KJt1pD6v6aP3Tw7qGl+tZyps7VB2i+a+UGcwQcClcoXcPSdG7Z1gBTzSr84 MuVPYlePuo1x+UwppSK3rM8ET6KqhGmESH5lKadvs8vdT6c407PfLcfxyAGzjH091prk2oRJ xB3oQAYcKvkuMcM6FSLJC263Dj+pe1GGEexk1AoysYe67tK0sB66hvbd92HcyWhW8/vI2/PM bX+OeEb7q+ugnsP+BmF/btWXn9AxfUqNWstyInKTn+XpqFViMIOG4e2xC4u/IvzG3VrTWUHF 4pspH3k7GB/EOLvtbsR0uacBFlsColJy0FaWT9rrdueU3YEiIRCC8LGi1XpUa8f5adeBKWN+ eRTrrF4o7uoNeGlnwZ7ebnb7k18Q0GRzzzTZPoMM4L703svfE/eNYWFHLY4NDQKSYgeum365 WAfZpHOX7YOc6oRGrGB+QuGoyikTTDO8xpcEmb8vDz4ZwHhN0PS056LNJeMoI0A/5DJb3e10 i1txlM48sbZBuIEIeixr52nwG4LuxqXGqShKaTfOrFxHjx4kI4/dp9dN/k8TGFsLWjuIgMJI 6nRHbWrxB3F0XKXagtLLep1MDwDwAuCyiW2YC0JzRvsJViIgjDA+eiHX0O6/8xiK9dzMQpIz TVHSEqFlhORp0DGB2zATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IADMA ZgA3ADEAYQBmADYANQAtADEANgA4ADcALQA0ADQANABhAC0AOQBmADQANgAtAGMAOABiAGUA MQA5ADQAYwAzAGUAOABlMGsGCSsGAQQBgjcRATFeHlwATQBpAGMAcgBvAHMAbwBmAHQAIABF AG4AaABhAG4AYwBlAGQAIABDAHIAeQBwAHQAbwBnAHIAYQBwAGgAaQBjACAAUAByAG8AdgBp AGQAZQByACAAdgAxAC4AMDCCA2gGCSqGSIb3DQEHAaCCA1kEggNVMIIDUTCCA00GCyqGSIb3 DQEMCgEDoIIDJTCCAyEGCiqGSIb3DQEJFgGgggMRBIIDDTCCAwkwggHxoAMCAQICEDbt9oc6 oQinRwE1826MiBEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAxMJYW5vbnltb3VzMCAXDTE2 MDcxOTIyMDAwMVoYDzIxMTYwNjI1MjIwMDAxWjAUMRIwEAYDVQQDEwlhbm9ueW1vdXMwggEi MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8trBCTBjXXA4OgSO5nRTOU5T86ObCgc71 J2oCuUigSddcTDzebaD0wcyAgf101hAdwMKQ9DvrK0nGvm7FAMnnUuVeATafKgshLuUTUUfK jx4Xif4LoS0/ev4BiOI5a1MlIRZ7T5Cyjg8bvuympzMuinQ/j1RPLIV0VGU2HuDxuuP3O898 GqZ3+F6Al5CUcwmOX9zCs91JdN/ZFZ05SXIpHQuyPSPUX5Vy8F1ZeJ8VG3nkbemfFlVkuKQq vteL9mlT7z95rVZgGB3nUZL0tOB68eMcffA9zUksOmeTi5M6jnBcNeX2Jh9jS3YYd+IEliZm mggQG7kPta8f+NqezL77AgMBAAGjVTBTMBUGA1UdJQQOMAwGCisGAQQBgjcKAwQwLwYDVR0R BCgwJqAkBgorBgEEAYI3FAIDoBYMFGFub255bW91c0B3aW5kb3dzLXgAMAkGA1UdEwQCMAAw DQYJKoZIhvcNAQEFBQADggEBALh+4qmNPzC6M8BW9/SC2ACQxxPh06GQUGx0D+GLYnp61ErZ OtKyKdFh+uZWpu5vyYYAHCLXP7VdS/JhJy677ynAPjXiC/LAzrTNvGs74HDotD966Hiyy0Qr ospFGiplHGRA5vXA2CiKSX+0HrVkN7rhk5PYkc6R+/cdosd+QZ8lkEa9yDWc5l//vWEbzwVy mJf/PRf8NTkWAK6SPV7Y37j1mhkJjOH9VkRxNrd6kcihRa4u0ImXaXEsec77ER0so31DKCrP m+rqZPj9NZSIYP3sMGJ4Bmm/n2YRdeaUzTdocfD3TRnKxs65DSgpiSq1gmtsXM7jAPs/Egrg tbWEypgxFTATBgkqhkiG9w0BCRUxBgQEAQAAADA7MB8wBwYFKw4DAhoEFKVgj/32UdEyuQcB rqr03dPnboinBBSU7mxdpB5LTCvorCI8Tk5OMiUzjgICB9A= """ def setUp(self): self.asn1Spec = rfc7292.PFX() def testDerCodec(self): substrate = pem.readBase64fromText(self.pfx_pem_text) asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) self.assertEqual(3, asn1Object['version']) oid = asn1Object['macData']['mac']['digestAlgorithm']['algorithm'] self.assertEqual(univ.ObjectIdentifier('1.3.14.3.2.26'), oid) md_hex = asn1Object['macData']['mac']['digest'].prettyPrint() self.assertEqual('0xa5608ffdf651d132b90701aeaaf4ddd3e76e88a7', md_hex) self.assertEqual( rfc5652.id_data, asn1Object['authSafe']['contentType']) data, rest = der_decoder( asn1Object['authSafe']['content'], asn1Spec=univ.OctetString()) self.assertFalse(rest) authsafe, rest = der_decoder(data, asn1Spec=rfc7292.AuthenticatedSafe()) self.assertFalse(rest) self.assertTrue(authsafe.prettyPrint()) self.assertEqual(data, der_encoder(authsafe)) for ci in authsafe: self.assertEqual(rfc5652.id_data, ci['contentType']) data, rest = der_decoder(ci['content'], asn1Spec=univ.OctetString()) self.assertFalse(rest) sc, rest = der_decoder(data, asn1Spec=rfc7292.SafeContents()) self.assertFalse(rest) self.assertTrue(sc.prettyPrint()) self.assertEqual(data, der_encoder(sc)) for sb in sc: if sb['bagId'] in rfc7292.pkcs12BagTypeMap: bv, rest = der_decoder( sb['bagValue'], asn1Spec=rfc7292.pkcs12BagTypeMap[sb['bagId']]) self.assertFalse(rest) self.assertTrue(bv.prettyPrint()) self.assertEqual(sb['bagValue'], der_encoder(bv)) for attr in sb['bagAttributes']: if attr['attrType'] in rfc5652.cmsAttributesMap: av, rest = der_decoder( attr['attrValues'][0], asn1Spec=rfc5652.cmsAttributesMap[attr['attrType']]) self.assertFalse(rest) self.assertTrue(av.prettyPrint()) self.assertEqual( attr['attrValues'][0], der_encoder(av)) def testOpenTypes(self): substrate = pem.readBase64fromText(self.pfx_pem_text) asn1Object, rest = der_decoder( substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) digest_alg = asn1Object['macData']['mac']['digestAlgorithm'] self.assertFalse(digest_alg['parameters'].hasValue()) authsafe, rest = der_decoder( asn1Object['authSafe']['content'], asn1Spec=rfc7292.AuthenticatedSafe(), decodeOpenTypes=True) self.assertFalse(rest) self.assertTrue(authsafe.prettyPrint()) self.assertEqual( asn1Object['authSafe']['content'], der_encoder(authsafe)) for ci in authsafe: self.assertEqual(rfc5652.id_data, ci['contentType']) sc, rest = der_decoder( ci['content'], asn1Spec=rfc7292.SafeContents(), decodeOpenTypes=True) self.assertFalse(rest) self.assertTrue(sc.prettyPrint()) self.assertEqual(ci['content'], der_encoder(sc)) for sb in sc: if sb['bagId'] == rfc7292.id_pkcs8ShroudedKeyBag: bv = sb['bagValue'] enc_alg = bv['encryptionAlgorithm']['algorithm'] self.assertEqual( rfc7292.pbeWithSHAAnd3_KeyTripleDES_CBC, enc_alg) enc_alg_param = bv['encryptionAlgorithm']['parameters'] self.assertEqual(2000, enc_alg_param['iterations']) suite = unittest.TestLoader().loadTestsFromMo
# You are climbing a stair case. It takes n steps to reach to the top. # # Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top? # # Note: Given n will be a positive integer. # # Example 1: # # Input: 2 # Output: 2 # Explanation: There are two ways to climb to the top. # 1. 1 step + 1 step # 2. 2 steps # # Example 2: # # Input: 3 # Output: 3 # Explanation: There are three ways to climb to the top. # 1. 1 step + 1 step + 1 step # 2. 1 step + 2 steps # 3. 2 steps +
1 step class
Solution(object): def climbStairs(self, n): """ :type n: int :rtype: int """ table = [1, 2] i = 2 while i < n: table.append(table[i-1] + table[i-2]) i += 1 return table[n-1] # Note: # Generate two trees one with 1 step and other with 2 step and add both
from django.h
ttp import Ht
tpResponse def hello_world(request): return HttpResponse("Hello, world.")
#-*- coding: utf-8 -*- from __future__ import un
icode_literals from __future__ import print_function import sys if sys.version_info[0] == 2: reload(sys) sys.setdefaultencoding('utf-8') from . i
mport config from . import parsers def main(): if len(sys.argv) == 2: filename = sys.argv[1] filename = parsers.to_unicode(filename) parsers.run(filename) else: msg = 'Usage: {} <metadata>'.format(sys.argv[0]) print(msg) print('\nPredefined Variables') for k, v in config.PREDEFINED_VARIABLE_TABLE.items(): print('{}\t: {}'.format(k, v)) if __name__ == '__main__': main()
# Copyright 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITI
ONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.image import base from tempest.lib.common.utils import data_utils from tempest.lib import decorators class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest): @decorators.related_bug('1420008') @decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e
') def test_create_image_owner_param(self): # NOTE: Create image with owner different from tenant owner by # using "owner" parameter requires an admin privileges. random_id = data_utils.rand_uuid_hex() image = self.admin_client.create_image( container_format='bare', disk_format='raw', owner=random_id) self.addCleanup(self.admin_client.delete_image, image['id']) image_info = self.admin_client.show_image(image['id']) self.assertEqual(random_id, image_info['owner']) @decorators.related_bug('1420008') @decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553') def test_update_image_owner_param(self): random_id_1 = data_utils.rand_uuid_hex() image = self.admin_client.create_image( container_format='bare', disk_format='raw', owner=random_id_1) self.addCleanup(self.admin_client.delete_image, image['id']) created_image_info = self.admin_client.show_image(image['id']) random_id_2 = data_utils.rand_uuid_hex() self.admin_client.update_image( image['id'], [dict(replace="/owner", value=random_id_2)]) updated_image_info = self.admin_client.show_image(image['id']) self.assertEqual(random_id_2, updated_image_info['owner']) self.assertNotEqual(created_image_info['owner'], updated_image_info['owner'])
""" File: DaqDevDiscovery01.py Library Call Demonstrated: mcculw.ul.get_daq_device_inventory() mcculw.ul.create_daq_device() mcculw.ul.release_daq_device() Purpose: Discovers DAQ devices and assigns board number to the detected devices. Demonstration: Displays the detected DAQ devices and flashes the LED of the selected device. Other Library Calls: mcculw.ul.ignore_instacal() mcculw.ul.flash_led() """ from __future__ import absolute_import, division, print_function from builtins import * # @UnusedWildImport import tkinter as tk from tkinter import StringVar from tkinter.ttk import Combobox # @UnresolvedImport from mcculw import ul from mcculw.enums import InterfaceType from mcculw.ul import ULError try: from ui_examples_util import UIExample, show_ul_error except ImportError: from .ui_examples_util import UIExample, show_ul_error class DaqDevDiscovery01(UIExample): def __init__(self, master): super(DaqDevDiscovery01, self).__init__(master) self.board_num = 0 self.device_created = False # Tell the UL to ignore any boards configured in InstaCal ul.ignore_instacal() self.create_widgets() def discover_devices(self): self.inventory = ul.get_daq_device_inventory(InterfaceType.ANY) if len(self.inventory) > 0: combobox_values = [] for device in self.inventory: combobox_values.append(str(device)) self.devices_combobox["values"] = combobox_values self.devices_combobox.current(0) self.status_label["text"] = (str(len(self.inventory)) + " DAQ Device(s) Discovered") self.devices_combobox["state"] = "readonly" self.flash_led_button["state"] = "normal" else: self.devices_combobox["values"] = [""] self.devices_combobox.current(0) self.status_label["text"] = "No Devices Discovered" self.devices_combobox["state"] = "disabled" self.flash_led_button["state"] = "disabled" def flash_led(self): try: # Flash the device LED ul.flash_led(self.board_num) except ULError as e: show_ul_error(e) def selected_device_changed(self, *args): # @UnusedVariable selected_index = self.devices_combobox.current() inventory_count = len(self.inventory) if self.device_created: # Release any previously configured DAQ device from the UL. ul.release_daq_device(self.board_num) self.device_created = False if inventory_count > 0 and selected_index < inventory_count: descriptor = self.inventory[selected_index] # Update the device ID label self.device_id_label["text"] = descriptor.unique_id # Create the DAQ device from the descriptor # For performance reasons, it is not recommended to create # and release the device every time hardware communication is # required. Instead, create the device once and do not release # it until no additional library calls will be made for this # device ul.create_daq_device(self.board_num, descriptor) self.device_created = True def create_widgets(self): '''Create the tkinter UI''' main_frame = tk.Frame(self) main_frame.pack(fill=tk.X, anchor=tk.NW) discover_button = tk.Button(main_frame) discover_button["text"] = "Discover DAQ Devices" discover_button["command"] = self.discover_devices discover_button.pack(padx=3, pady=3) self.status_label = tk.Label(main_frame) self.status_label["text"] = "Status" self.status_label.pack(anchor=tk.NW, padx=3, pady=3) results_group = tk.LabelFrame(self, text="Discovered Devices") results_group.pack(fill=tk.X, anchor=tk.NW, padx=3, pady=3) self.selected_device_textvar = StringVar() self.selected_device_textvar.trace('w', self.selected_device_changed) self.devices_combobox = Combobox( results_group, textvariable=self.selected_device_textvar) self.devices_combobox["state"] = "disabled" self.devices_combobox.pack(fill=tk.X, padx=3, pady=3) device_id_frame = tk.Frame(results_group) device_id_frame.pack(anchor=tk.NW) device_id_left_label = tk.Label(device_id_frame) device_id_left_label["text"] = "Device Identifier:" device_id_left_label.grid(row=0, column=0, sticky=tk.W, padx=3, pady=3) self.dev
ice_id_label = tk.Label(device_id_frame) self.device_id_label.grid(row=0, column=1, sticky=tk.W, padx=3, pady=3) self.flash_led_button = tk.Button(results_group) self.flash_led_button["text"] = "Flash LED" self.flash_led_button["command"] = self.flash_led self.flash_led_button["state"] = "disabled" self.flash_led_button.pack(padx=3, pady=3) button_frame
= tk.Frame(self) button_frame.pack(fill=tk.X, side=tk.RIGHT, anchor=tk.SE) quit_button = tk.Button(button_frame) quit_button["text"] = "Quit" quit_button["command"] = self.master.destroy quit_button.grid(row=0, column=1, padx=3, pady=3) # Start the example if this module is being run if __name__ == "__main__": # Start the example DaqDevDiscovery01(master=tk.Tk()).mainloop()
from django.db.models import Q from django_filters import rest_framework as filters from adesao.models import SistemaCultura, UFS from planotrabalho.models import Componente class SistemaCulturaFilter(filters.FilterSet): ente_federado = filters.CharFilte
r( field_name='ente_federado__nome__unaccent', lookup_expr='icontains') estado_sigla = filters.CharFilter(method='sigla_filter') cnpj_prefeitura = filters.CharFilter( field_name='sede__cnpj', lookup_expr='contains') situacao_adesao = filters.CharFilter( field_name='estado_processo', lookup_expr='exact') data_adesao = filters.DateFilter( field_name='data_publicacao_acordo') data_adesao_min = filters
.DateFilter( field_name='data_publicacao_acordo', lookup_expr=('gte')) data_adesao_max = filters.DateFilter( field_name='data_publicacao_acordo', lookup_expr=('lte')) data_componente_min = filters.DateFilter( field_name='data_componente_acordo', lookup_expr=('gte'), method='data_componente_min') data_componente_max = filters.DateFilter( field_name='data_componente_acordo', lookup_expr=('lte'), method='data_componente_max') data_lei_min = filters.DateFilter( field_name='legislacao__data_publicacao', lookup_expr=('gte')) data_lei_max = filters.DateFilter( field_name='legislacao__data_publicacao', lookup_expr=('lte')) data_orgao_gestor_min = filters.DateFilter( field_name='orgao_gestor__data_publicacao', lookup_expr=('gte')) data_orgao_gestor_max = filters.DateFilter( field_name='orgao_gestor__data_publicacao', lookup_expr=('lte')) data_orgao_gestor_cnpj_min = filters.DateFilter( field_name='orgao_gestor__comprovante_cnpj__data_envio', lookup_expr=('gte')) data_orgao_gestor_cnpj_max = filters.DateFilter( field_name='orgao_gestor__comprovante_cnpj__data_envio', lookup_expr=('lte')) data_conselho_min = filters.DateFilter( field_name='conselho__data_publicacao', lookup_expr=('gte')) data_conselho_max = filters.DateFilter( field_name='conselho__data_publicacao', lookup_expr=('lte')) data_conselho_lei_min = filters.DateFilter( field_name='conselho__lei__data_publicacao', lookup_expr=('gte')) data_conselho_lei_max = filters.DateFilter( field_name='conselho__lei__data_publicacao', lookup_expr=('lte')) data_fundo_cultura_min = filters.DateFilter( field_name='legislacao__data_publicacao', lookup_expr=('gte')) data_fundo_cultura_max = filters.DateFilter( field_name='legislacao__data_publicacao', lookup_expr=('lte')) data_fundo_cultura_cnpj_min = filters.DateFilter( field_name='fundo_cultura__comprovante_cnpj__data_envio', lookup_expr=('gte')) data_fundo_cultura_cnpj_max = filters.DateFilter( field_name='fundo_cultura__comprovante_cnpj__data_envio', lookup_expr=('lte')) data_plano_min = filters.DateFilter( field_name='plano__data_publicacao', lookup_expr=('gte')) data_plano_max = filters.DateFilter( field_name='plano__data_publicacao', lookup_expr=('lte')) data_plano_meta_min = filters.DateFilter( field_name='plano__metas__data_envio', lookup_expr=('gte')) data_plano_meta_max = filters.DateFilter( field_name='plano__metas__data_envio', lookup_expr=('lte')) orgao_gestor_dados_bancarios = filters.BooleanFilter(method='gestor_dados_bancarios_filter') fundo_cultura_dados_bancarios = filters.BooleanFilter(method='fundo_cultura_dados_bancarios_filter') situacao_lei_sistema = filters.ModelMultipleChoiceFilter( queryset=Componente.objects.all(), field_name='legislacao__situacao', to_field_name='situacao' ) situacao_orgao_gestor = filters.ModelMultipleChoiceFilter( queryset=Componente.objects.all(), field_name='orgao_gestor__situacao', to_field_name='situacao' ) situacao_conselho_cultural = filters.ModelMultipleChoiceFilter( queryset=Componente.objects.all(), field_name='conselho__situacao', to_field_name='situacao' ) situacao_fundo_cultura = filters.ModelMultipleChoiceFilter( queryset=Componente.objects.all(), field_name='fundo_cultura__situacao', to_field_name='situacao' ) situacao_plano_cultura = filters.ModelMultipleChoiceFilter( queryset=Componente.objects.all(), field_name='plano__situacao', to_field_name='situacao' ) municipal = filters.BooleanFilter(method='municipal_filter') estadual = filters.BooleanFilter(method='estadual_filter') class Meta: model = SistemaCultura exclude = ( 'oficio_cadastrador', 'oficio_prorrogacao_prazo',) def gestor_dados_bancarios_filter(self, queryset, name, value): queryset = queryset.exclude(orgao_gestor__banco='').exclude(orgao_gestor__agencia='').exclude(orgao_gestor__conta='').exclude(orgao_gestor__banco__isnull=True).exclude(orgao_gestor__agencia__isnull=True).exclude(orgao_gestor__conta__isnull=True) return queryset def fundo_cultura_dados_bancarios_filter(self, queryset, name, value): queryset = queryset.exclude(fundo_cultura__banco='').exclude(fundo_cultura__agencia='').exclude(fundo_cultura__conta='').exclude(fundo_cultura__banco__isnull=True).exclude(fundo_cultura__agencia__isnull=True).exclude(fundo_cultura__conta__isnull=True) return queryset def sigla_filter(self, queryset, name, value): try: inverseUf = {value: key for key, value in UFS.items()} cod_ibge = inverseUf[value.upper()] except Exception: cod_ibge = value return queryset.filter(Q(ente_federado__cod_ibge__startswith=cod_ibge)) def estadual_filter(self, queryset, name, value): pular_filtro = self.checar_filtro_municipal_estadual_ativos() if(pular_filtro): return queryset if value: queryset = queryset.filter(ente_federado__cod_ibge__lte=100) return queryset def municipal_filter(self, queryset, name, value): pular_filtro = self.checar_filtro_municipal_estadual_ativos() if(pular_filtro): return queryset if value: queryset = queryset.filter(ente_federado__cod_ibge__gt=100) return queryset def checar_filtro_municipal_estadual_ativos(self): try: estadual_filter = self.data.getlist('estadual')[0] municipal_filter = self.data.getlist('municipal')[0] except IndexError: return False if(estadual_filter == 'true' and municipal_filter == 'true'): return True return False class PlanoTrabalhoFilter(SistemaCulturaFilter): class Meta: model = SistemaCultura exclude = ( 'oficio_cadastrador', 'oficio_prorrogacao_prazo',)
lass InvalidQuery(Exception): """ The query passed to raw isn't a safe query to use with raw. """ pass class QueryWrapper(object): """ A type that indicates the contents are an SQL fragment and the associate parameters. Can be used to pass opaque data to a where-clause, for example. """ contains_aggregate = False def __init__(self, sql, params): self.data = sql, list(params) def as_sql(self, compiler=None, connection=None): return self.data class Q(tree.Node): """ Encapsulates filters as objects that can then be combined logically (using `&` and `|`). """ # Connection types AND = 'AND' OR = 'OR' default = AND def __init__(self, *args, **kwargs): super(Q, self).__init__(children=list(args) + list(kwargs.items())) def _combine(self, other, conn): if not isinstance(other, Q): raise TypeError(other) obj = type(self)() obj.connector = conn obj.add(self, conn) obj.add(other, conn) return obj def __or__(self, other): return self._combine(other, self.OR) def __and__(self, other): return self._combine(other, self.AND) def __invert__(self): obj = type(self)() obj.add(self, self.AND) obj.negate() return obj def clone(self): clone = self.__class__._new_instance( children=[], connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, 'clone'): clone.children.append(child.clone()) else: clone.children.append(child) return clone def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # We must promote any new joins to left outer joins so that when Q is # used as an expression, rows aren't filtered due to joins. clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False) query.promote_joins(joins) return clause @classmethod def _refs_aggregate(cls, obj, existing_aggregates): if not isinstance(obj, tree.Node): aggregate, aggregate_lookups = refs_aggregate(obj[0].split(LOOKUP_SEP), existing_aggregates) if not aggregate and hasattr(obj[1], 'refs_aggregate'): return obj[1].refs_aggregate(existing_aggregates) return aggregate, aggregate_lookups for c in obj.children: aggregate, aggregate_lookups = cls._refs_aggregate(c, existing_aggregates) if aggregate: return aggregate, aggregate_lookups return False, () def refs_aggregate(self, existing_aggregates): if not existing_aggregates: return False return self._refs_aggregate(self, existing_aggregates) class DeferredAttribute(object): """ A wrapper for a deferred-loading field. When the value is read from this object the first time, the query is executed. """ def __init__(self, field_name, model): self.field_name = field_name def __get__(self, instance, cls=None): """ Retrieves and caches the value from the datastore on the first lookup. Returns the cached value. """ non_deferred_model = instance._meta.proxy_for_model opts = non_deferred_model._meta assert instance is not None data = instance.__dict__ if data.get(self.field_name, self) is self: # self.field_name is the attname of the field, but only() takes the # actual name, so we need to translate it here. try: f = opts.get_field(self.field_name) except FieldDoesNotExist: f = [f for f in opts.fields if f.attname == self.field_name][0] name = f.name # Let's see if the field is part of the parent chain. If so we # might be able to reuse the already loaded value. Refs #18343. val = self._check_parent_chain(instance, name) if val is None: instance.refresh_from_db(fields=[self.field_name]) val = getattr(instance, self.field_name) data[self.field_name] = val return data[self.field_name] def __set__(self, instance, value): """ Deferred loading attributes can be set normally (which means there will never be a database lookup involved. """ instance.__dict__[self.field_name] = value def _check_parent_chain(self, instance, name): """ Check if the field value can be fetched from a parent field already loaded in the instance. This can be done if the to-be fetched field is a primary key field. """ opts = instance._meta
f = opts.get_field(name) link_field = opts.get_ance
stor_link(f.model) if f.primary_key and f != link_field: return getattr(instance, link_field.attname) return None class RegisterLookupMixin(object): def _get_lookup(self, lookup_name): try: return self.class_lookups[lookup_name] except KeyError: # To allow for inheritance, check parent class' class_lookups. for parent in inspect.getmro(self.__class__): if 'class_lookups' not in parent.__dict__: continue if lookup_name in parent.class_lookups: return parent.class_lookups[lookup_name] except AttributeError: # This class didn't have any class_lookups pass return None def get_lookup(self, lookup_name): from django.db.models.lookups import Lookup found = self._get_lookup(lookup_name) if found is None and hasattr(self, 'output_field'): return self.output_field.get_lookup(lookup_name) if found is not None and not issubclass(found, Lookup): return None return found def get_transform(self, lookup_name): from django.db.models.lookups import Transform found = self._get_lookup(lookup_name) if found is None and hasattr(self, 'output_field'): return self.output_field.get_transform(lookup_name) if found is not None and not issubclass(found, Transform): return None return found @classmethod def register_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if 'class_lookups' not in cls.__dict__: cls.class_lookups = {} cls.class_lookups[lookup_name] = lookup return lookup @classmethod def _unregister_lookup(cls, lookup, lookup_name=None): """ Remove given lookup from cls lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del cls.class_lookups[lookup_name] def select_related_descend(field, restricted, requested, load_fields, reverse=False): """ Returns True if this field should be used to descend deeper for select_related() purposes. Used by both the query construction code (sql.query.fill_related_selections()) and the model instance creation code (query.get_klass_info()). Arguments: * field - the field to be checked * restricted - a boolean field, indicating if the field list has been manually restricted using a requested clause) * requested - The select_related() dictionary. * load_fields - the set of fields to be loaded on this model * reverse - boolean, True if we are checking a reverse select related """ if not field.remote_field: return False if field.remote_field.parent_link and not reverse: return
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training Script for SpecGAN on a waveform dataset. This follows the origonal SpecGAN training, where the magnitude spectrums are normalized to sit between -1 and 1. """ import os import tensorflow as tf import numpy as np from tensorflow.keras import activations, utils from audio_synthesis.structures import spec_gan from audio_synthesis.models import wgan from audio_synthesis.datasets import waveform_dataset from audio_synthesis.utils import waveform_save_helper as save_helper # Setup Paramaters D_UPDATES_PER_G = 5 Z_DIM = 64 BATCH_SIZE = 64 EPOCHS = 1800 SAMPLING_RATE = 16000 GRIFFIN_LIM_ITERATIONS = 16 FFT_FRAME_LENGTH = 512 FFT_FRAME_STEP = 128 LOG_MAGNITUDE = True Z_IN_SHAPE = [4, 8, 1024] SPECTOGRAM_IMAGE_SHAPE = [-1, 128, 256, 1] CHECKPOINT_DIR = '_results/representation_study/SpeechMNIST/SpecGAN_HR/training_checkpoints/' RESULT_DIR = '_results/representation_study/SpeechMNIST/SpecGAN_HR/audio/' DATASET_PATH = 'data/SpeechMNIST_1850.npz' def main(): os.environ['CUDA_VISIBLE_DEVICES'] = '2' print('Num GPUs Available: ', len(tf.config.experimental.list_physical_devices('GPU'))) raw_dataset, magnitude_stats, _ =\ waveform_dataset.get_magnitude_phase_dataset( DATASET_PATH, FFT_FRAME_LENGTH, FFT_FRAME_STEP, LOG_MAGNITUDE ) raw_dataset = raw_dataset[:, :, :, 0] # Remove the phase information normalized_raw_dataset = [] pb_i = utils.Progbar(len(raw_dataset)) for data_point in raw_dataset: normalized_raw_dataset.append(waveform_dataset.normalize( data_point, *magnitude_stats )) pb_i.add(1) normalized_raw_dataset = np.array(normalized_raw_dataset) generator = spec_gan.Generator(activation=activations.tanh, in_shape=Z_IN_SHAPE) discriminator = spec_gan.Discriminator(input_shape=SPECTOGRAM_IMAGE_SHAPE) generator_optimizer = tf.
keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5, beta_2=0.9) get_waveform = lambda magnitude:\ save_helper.get_waveform_from_normalized_magnitude( magnitude, magnitude_stats, GRIFFIN_LIM_ITERATIONS, FFT_FRAME_L
ENGTH, FFT_FRAME_STEP, LOG_MAGNITUDE ) save_examples = lambda epoch, real, generated:\ save_helper.save_wav_data( epoch, real, generated, SAMPLING_RATE, RESULT_DIR, get_waveform ) spec_gan_model = wgan.WGAN( normalized_raw_dataset, generator, [discriminator], Z_DIM, generator_optimizer, discriminator_optimizer, discriminator_training_ratio=D_UPDATES_PER_G, batch_size=BATCH_SIZE, epochs=EPOCHS, checkpoint_dir=CHECKPOINT_DIR, fn_save_examples=save_examples ) spec_gan_model.restore('ckpt-129', 1290) spec_gan_model.train() if __name__ == '__main__': main()
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This module contains Google Search Ads operators. """ from tempfile import NamedTemporaryFile from typing import Any, Dict, Optional from airflow import AirflowException from airflow.models import BaseOperator from airflow.providers.google.cloud.hooks.gcs import GCSHook from airflow.providers.google.marketing_platform.hooks.search_ads import GoogleSearchAdsHook from airflow.utils.decorators import apply_defaults class GoogleSearchAdsInsertReportOperator(BaseOperator): """ Inserts a report request into the reporting system. .. seealso: For API documentation check: https://developers.google.com/search-ads/v2/reference/reports/request .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleSearchAdsInsertReportOperator` :param report: Report to be generated :type report: Dict[str, Any] :param api_version: The version of the api that will be requested for example 'v3'. :type api_version: str :param gcp_conn_id: The connection ID to use when fetching connection info. :type gcp_conn_id: str :param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the request must have domain-wide delegation enabled. :type delegate_to: str """ template_fields = ("report",) template_ext = (".json",) @apply_defaults def __init__( self, report: Dict[str, Any], api_version: str = "v2", gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, *args, **kwargs ): super().__init__(*args, **kwargs) self.report = report self.api_version = api_version self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to def execute(self, context: Dict): hook = GoogleSearchAdsHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, api_version=self.api_version, ) self.log.info("Generating Search Ads report") response = hook.insert_report(report=self.report) report_id = response.get("id") self.xcom_push(context, key="report_id", value=report_id) self.log.info("Report generated, id: %s", report_id) return response class GoogleSearchAdsDownloadReportOperator(BaseOperator): """ Downloads a report to GCS bucket. .. seealso: For API documentation check: https://developers.google.com/search-ads/v2/reference/reports/getFile .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleSearchAdsGetfileReportOperator` :param report_id: ID of the report. :type report_id: str :param bucket_name: The bucket to upload to. :type bucket_name: str :param report_name: The report name to set when uploading the local file. If not provided then report_id is used. :type report_name: str :param gzip: Option to compress local file or file data for upload :type gzip: bool :param api_version: The version of the api that will be requested for example 'v3'. :type api_version: str :param gcp_conn_id: The connection ID to use when fetching connection info. :type gcp_conn_id: str :param delegate_to: The account to impersonate, if any. For this to work, the service accountmaking the request must have domain-wide delegation enabled. :type delegate_to: str """ template_fields = ("report_name", "report_id", "bucket_name") @apply_defaults def __init__( self, report_id: str, bucket_name: str, report_name: Optional[str] = None, gzip: bool = True, chunk_size: int = 10 * 1024 * 1024, api_version: str = "v2", gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, *args, **kwargs ) -> None: super().__init__(*args, **kwargs) self.report_id = report_id self.api_version = api_version self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to self.report_id = report_id self.chunk_size = chunk_size self.gzip = gzip self.bucket_name = self._set_bucket_name(bucket_name) self.report_name = report_name def _resolve_file_name(self, name: str) -> str: csv = ".csv" gzip = ".gz" if not name.endswith(csv): name += csv if self.gzip: name += gzip return name @staticmethod def _set_bucket_name(name: str) -> str: bucket = name if not name.startswith("gs://") else name[5:] return bucket.strip("/") @staticmethod def _handle_report_fragment(fragment: bytes) -> bytes: fragment_records = fragment.split(b"\n", 1) if len(fragment_records) > 1: return fragment_records[1] return b"" def execute(self, context: Dict): hook = GoogleSearchAdsHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, api_version=self.api_version, ) gcs_hook = GCSHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to ) # Resolve file name of the report report_name = self.report_name or self.report_id report_name = self._resolve_file_name(report_name) response = hook.get(report_id=self.report_id) if not response['isReportReady']: raise AirflowException('Report {} is not ready yet'.format(self.report_id)) # Resolve report fragments fragments_count = len(response["files"]) # Download ch
unks of report's data self.log.info("Downloading Search Ads report %s", self.report_id) with NamedTemporaryFile() as temp_file: for i in range(fragments_count): byte_content = hook.get_file( report_fragment=i, report_id=self.report_id ) fragment = ( byte_content if i == 0
else self._handle_report_fragment(byte_content) ) temp_file.write(fragment) temp_file.flush() gcs_hook.upload( bucket_name=self.bucket_name, object_name=report_name, gzip=self.gzip, filename=temp_file.name, ) self.xcom_push(context, key="file_name", value=report_name)
from fruits import validate_fruit fruits = ["bana
na", "lemon", "apple", "orange", "batman"] print fruits def list_fruits(fruits, byName=True): if byName: # WARNING: this won't make a copy of the list and return it. It will change the list FOREVER fruits.sort() for index, fruit in enumerate(fruits): if validate_fruit(fruit): print "Fruit nr %d is %s" % (index, fruit) else: print "This %s is no fruit!" % (fr
uit) list_fruits(fruits) print fruits
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS I
S" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from kazoo.client import KazooClient from libzookeeper.conf impor
t PRINCIPAL_NAME def get_children_data(ensemble, namespace, read_only=True): zk = KazooClient(hosts=ensemble, read_only=read_only, sasl_server_principal=PRINCIPAL_NAME.get()) zk.start() children_data = [] children = zk.get_children(namespace) for node in children: data, stat = zk.get("%s/%s" % (namespace, node)) children_data.append(data) zk.stop() return children_data
#!/usr/bin/env python # Copyright (c) 2010-2013 by Yaco Sistemas <goinnn@gmail.com> or <pmartin@yaco.es> # # This program is free soft
ware: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the Lic
ense, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this programe. If not, see <http://www.gnu.org/licenses/>. import django import os import sys from django.conf import ENVIRONMENT_VARIABLE from django.core import management if len(sys.argv) == 1: os.environ[ENVIRONMENT_VARIABLE] = 'testing.settings' else: os.environ[ENVIRONMENT_VARIABLE] = sys.argv[1] if django.VERSION[0] == 1 and django.VERSION[1] <= 5: management.call_command('test', 'unit_tests') else: management.call_command('test', 'testing.unit_tests')
# 每个人都有一个preference的排序,在不违反每个人的preference的情况下得到总体的preference的排序 拓扑排序解决(https://instant.1point3acres.com/thread/207601) import itertools import collections def preferenceList1(prefList): # topological sort 1 pairs = [] for lis in prefList: for left, right in zip(lis, lis[1:]): pairs += (left, right), allItems
, res = set(itertools.chain(*pairs)), [] while pairs: free = allItems - set(zip(*pairs)[1]) if not free: None res += list(free) pairs = filter(free.isdisjoint, pairs) allItems -= free
return res + list(allItems) print(preferenceList1([[1, 2, 3, 4], ['a', 'b', 'c', 'd'], ['a', 1, 8], [2, 'b', 'e'], [3, 'c']]))
ributor( read_contrib, permissions=[ permissions.READ], save=True) private_node_one.add_contributor( write_contrib, permissions=[ permissions.READ, permissions.WRITE], save=True) return private_node_one @pytest.fixture() def private_node_one_anonymous_link(private_node_one): private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True) private_node_one_anonymous_link.nodes.add(private_node_one) private_node_one_anonymous_link.save() return private_node_one_anonymous_link @pytest.fixture() def private_node_one_private_link(private_node_one): private_node_one_private_link = PrivateLinkFactory(anonymous=False) private_node_one_private_link.nodes.add(private_node_one) private_node_one_private_link.save() return private_node_one_private_link @pytest.fixture() def private_node_one_url(private_node_one): return '/{}nodes/{}/'.format(API_BASE, private_node_one._id) @pytest.fixture() def private_node_two(admin, read_contrib, write_contrib): private_node_two = ProjectFactory( is_public=False, creator=admin, title='Private Two') private_node_two.add_contributor( read_contrib, permissions=[permissions.READ], save=True) private_node_two.add_contributor( write_contrib, permissions=[ permissions.READ, permissions.WRITE], save=True) return private_node_two @pytest.fixture() def private_node_two_url(private_node_two): return '/{}nodes/{}/'.format(API_BASE, private_node_two._id) @pytest.fixture() def public_node_one(admin, read_contrib, write_contrib): public_node_one = ProjectFactory( is_public=True, creator=admin, title='Public One') public_node_one.add_contributor( read_contrib, permissions=[permissions.READ], save=True) public_node_one.add_contributor( write_contrib, permissions=[ permissions.READ, permissions.WRITE], save=True) return public_node_one @pytest.fixture() def public_node_one_anonymous_link(public_node_one): public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True) public_node_one_anonymous_link.nodes.add(public_node_one) public_node_one_anonymous_link.save() return public_node_one_anonymous_link @pytest.fixture() def public_node_one_private_link(public_node_one): public_node_one_private_link = PrivateLinkFactory(anonymous=False) public_node_one_private_link.nodes.add(public_node_one) public_node_one_private_link.save() return public_node_one_private_link @pytest.fixture() def public_node_one_url(public_node_one): return '/{}nodes/{}/'.format(API_BASE, public_node_one._id) @pytest.fixture() def public_node_two(admin, read_contrib, write_contrib): public_node_two = ProjectFactory( is_public=True, creator=admin, title='Public Two') public_node_two.add_contributor( read_contrib, permissions=[permissions.READ], save=True) public_node_two.add_contributor( write_contrib, permissions=[ permissions.READ, permissions.WRITE], save=True) return public_node_two @pytest.fixture() def public_node_two_url(public_node_two): return '/{}nodes/{}/'.format(API_BASE, public_node_two._id) @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.usefixtures( 'admin', 'read_contrib', 'write_contrib', 'valid_contributors', 'private_node_one', 'private_node_one_anonymous_link', 'private_node_one_private_link', 'private_node_one_url', 'private_node_two', 'private_node_two_url', 'public_node_one', 'public_node_one_anonymous_link', 'public_node_one_private_link', 'public_node_one_url', 'public_node_two', 'public_node_two_url') class TestNodeDetailViewOnlyLinks: def test_private_node( self, app, admin, read_contrib, valid_contributors, private_node_one, private_node_one_url, private_node_one_private_link, private_node_one_anonymous_link, public_node_one_url, public_node_one_private_link, public_node_one_anonymous_link): # test_private_node_with_link_works_when_using_link res_normal = app.get(private_node_one_url, auth=read_contrib.auth) assert res_normal.status_code == 200 res_linked = app.get( private_node_one_url, {'view_only': private_node_one_private_link.key}) assert res_linked.status_code == 200 assert res_linked.json['data']['attributes']['current_user_permissions'] == [ 'read'] # Remove any keys that will be different for view-only responses res_normal_json = res_normal.json res_linked_json = res_linked.json user_can_comment = res_normal_json['data']['attributes'].pop( 'current_user_can_comment') view_only_can_comment = res_linked_json['data']['attributes'].pop( 'current_user_can_comment') assert user_can_comment assert not view_only_can_comment # test_private_node_with_link_unauthorized_when_not_using_link res = app.get(private_node_one_url, expect_errors=True) assert res.status_code == 401 # test_private_node_with_link_anonymous_does_not_expose_contributor_id res = app.get(private_node_one_url, { 'view_only': private_node_one_anonymous_link.key, 'embed': 'contributors', }) assert res.status_code == 200 embeds = res.json['data'].get('embeds', None) assert embeds is None or 'contributors' not in embeds # test_private_node_with_link_non_anonymous_does_expose_contributor_id res = app.get(private_node_one_url, { 'view_only': private_node_one_private_link.key, 'embed': 'contributors', }) assert res.status_code == 200 contributors = res.json['data']['embeds']['contributors']['data'] for contributor in contributors: assert contributor['id'].split('-')[1] in valid_contributors # test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id res = app.get(private_node_one_url, { 'view_only': private_node_one_private_link.key, 'embed': 'contributors', }, auth=admin.auth) assert res.status_code == 200 contributors = res.json['data']['embeds']['contributors']['data'] for contributor in contributors: assert contributor['id'].split('-')
[1] in valid_contributors
# test_public_node_with_link_anonymous_does_not_expose_user_id res = app.get(public_node_one_url, { 'view_only': public_node_one_anonymous_link.key, 'embed': 'contributors', }) assert res.status_code == 200 embeds = res.json['data'].get('embeds', None) assert embeds is None or 'contributors' not in embeds # test_public_node_with_link_non_anonymous_does_expose_contributor_id res = app.get(public_node_one_url, { 'view_only': public_node_one_private_link.key, 'embed': 'contributors', }) assert res.status_code == 200 contributors = res.json['data']['embeds']['contributors']['data'] for contributor in contributors: assert contributor['id'].split('-')[1] in valid_contributors # test_public_node_with_link_unused_does_expose_contributor_id res = app.get(public_node_one_url, { 'embed': 'contributors', }) assert res.status_code == 200 contributors = res.json['data']['embeds']['contributors']['data'] for contributor in contributors: assert contributor['id'].split('-')[1] in valid_contributors # test_view_only_link_does_not_grant_write_permission payload = { 'data': { 'attributes': { 'title': 'Cannot touch this'}, 'id': private_node_one._id, 'type': 'n
from io import BytesIO import sys from mitmproxy.net import wsgi from mitmproxy.net.http import Headers def tflow(): headers = Headers(test=b"value") req = wsgi.Request("http", "GET", "/", "HTTP/1.1", headers, "") return wsgi.Flow(("127.0.0.1", 8888), req) class ExampleApp: def __init__(self): self.called = False def __call__(self, environ, start_response): self.called = True status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) return [b'Hello', b' world!\n'] class TestWSGI: def test_make_environ(self): w = wsgi.WSGIAdaptor(None, "foo", 80, "version") tf = tflow() assert w.make_environ(tf, None) tf.request.path = "/foo?bar=voing" r = w.make_environ(tf, None) assert r["QUERY_STRING"] == "bar=voing" def test_serve(self): ta = ExampleApp() w = wsgi.WSGIAdaptor(ta, "foo", 80, "version") f = tflow() f.request.host = "foo" f.r
equest.port = 80
wfile = BytesIO() err = w.serve(f, wfile) assert ta.called assert not err val = wfile.getvalue() assert b"Hello world" in val assert b"Server:" in val def _serve(self, app): w = wsgi.WSGIAdaptor(app, "foo", 80, "version") f = tflow() f.request.host = "foo" f.request.port = 80 wfile = BytesIO() w.serve(f, wfile) return wfile.getvalue() def test_serve_empty_body(self): def app(environ, start_response): status = '200 OK' response_headers = [('Foo', 'bar')] start_response(status, response_headers) return [] assert self._serve(app) def test_serve_double_start(self): def app(environ, start_response): try: raise ValueError("foo") except: sys.exc_info() status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) start_response(status, response_headers) assert b"Internal Server Error" in self._serve(app) def test_serve_single_err(self): def app(environ, start_response): try: raise ValueError("foo") except: ei = sys.exc_info() status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers, ei) yield b"" assert b"Internal Server Error" in self._serve(app) def test_serve_double_err(self): def app(environ, start_response): try: raise ValueError("foo") except: ei = sys.exc_info() status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) yield b"aaa" start_response(status, response_headers, ei) yield b"bbb" assert b"Internal Server Error" in self._serve(app)
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. import copy import hashlib import os from fabric.api import env from fabric.api import run from fabric.api import settings from oslo_config import cfg from cloudferrylib.base.action import action from cloudferrylib.os.actions import task_transfer from cloudferrylib.utils.utils import forward_agent from cloudferrylib.utils import utils as utl from cloudferrylib.utils import qemu_img as qemu_img_util CONF = cfg.CONF CLOUD = 'cloud' BACKEND = 'backend' CEPH = 'ceph' ISCSI = 'iscsi' COMPUTE = 'compute' INSTANCES = 'instances' INSTANCE_BODY = 'instance' INSTANCE = 'instance' DIFF = 'diff' EPHEMERAL = 'ephemeral' DIFF_OLD = 'diff_old' EPHEMERAL_OLD = 'ephemeral_old' PATH_DST = 'path_dst' HOST_DST = 'host_dst' PATH_SRC = 'path_src' HOST_SRC = 'host_src' BACKING_FILE_DST = 'backing_file_dst' TEMP = 'temp' FLAVORS = 'flavors' TRANSPORTER_MAP = {CEPH: {CEPH: 'SSHCephToCeph', ISCSI: 'SSHCephToFile'}, ISCSI: {CEPH: 'SSHFileToCeph', ISCSI: 'SSHFileToFile'}} class TransportEphemeral(action.Action): # TODO constants def run(self, info=None, **kwargs): info = copy.deepcopy(info) # Init before run new_info = { utl.INSTANCES_TYPE: { } } # Get next one instance for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems(): is_ephemeral = instance[utl.INSTANCE_BODY]['is_ephemeral'] one_instance = { utl.INSTANCES_TYPE: { instance_id: instance } } if is_ephemeral: self.copy_ephemeral(self.src_cloud, self.dst_cloud, one_instance) new_info[utl.INSTANCES_TYPE].update( one_instance[utl.INSTANCES_TYPE]) return { 'info': new_info } @staticmethod def delete_remote_file_on_compute(path_file, host_cloud, host_instance): with settings(host_string=host_cloud, connection_attempts=env.connection_attempts): with forward_agent(env.key_filename): run("ssh -oStrictHostKeyChecking=no %s 'rm -rf %s'" % (host_instance, path_file)) def copy_data_via_ssh(self
, src_cloud, dst_cloud, info, body, resources, types): dst_storage = dst_cloud.resources[resources] src_compute = src_cloud.resources[resources] src_backend = src_compute.config.compute.backend dst_backend = dst_storage.config.compute.backend ssh_driver = (CONF.migrate.copy_backend if CONF.migrate.dir
ect_compute_transfer else TRANSPORTER_MAP[src_backend][dst_backend]) transporter = task_transfer.TaskTransfer( self.init, ssh_driver, resource_name=types, resource_root_name=body) transporter.run(info=info) def copy_ephemeral(self, src_cloud, dst_cloud, info): dst_storage = dst_cloud.resources[utl.COMPUTE_RESOURCE] src_compute = src_cloud.resources[utl.COMPUTE_RESOURCE] src_backend = src_compute.config.compute.backend dst_backend = dst_storage.config.compute.backend if (src_backend == CEPH) and (dst_backend == ISCSI): self.copy_ephemeral_ceph_to_iscsi(src_cloud, dst_cloud, info) elif (src_backend == ISCSI) and (dst_backend == CEPH): self.copy_ephemeral_iscsi_to_ceph(src_cloud, info) else: self.copy_data_via_ssh(src_cloud, dst_cloud, info, utl.EPHEMERAL_BODY, utl.COMPUTE_RESOURCE, utl.INSTANCES_TYPE) self.rebase_diff(dst_cloud, info) def copy_ephemeral_ceph_to_iscsi(self, src_cloud, dst_cloud, info): transporter = task_transfer.TaskTransfer( self.init, TRANSPORTER_MAP[ISCSI][ISCSI], resource_name=utl.INSTANCES_TYPE, resource_root_name=utl.EPHEMERAL_BODY) instances = info[utl.INSTANCES_TYPE] temp_src = src_cloud.cloud_config.cloud.temp host_dst = dst_cloud.cloud_config.cloud.ssh_host qemu_img_dst = dst_cloud.qemu_img qemu_img_src = src_cloud.qemu_img temp_path_src = temp_src + "/%s" + utl.DISK_EPHEM for inst_id, inst in instances.iteritems(): path_src_id_temp = temp_path_src % inst_id host_compute_dst = inst[EPHEMERAL][HOST_DST] inst[EPHEMERAL][ BACKING_FILE_DST] = qemu_img_dst.detect_backing_file( inst[EPHEMERAL][PATH_DST], host_compute_dst) self.delete_remote_file_on_compute(inst[EPHEMERAL][PATH_DST], host_dst, host_compute_dst) qemu_img_src.convert( utl.QCOW2, 'rbd:%s' % inst[EPHEMERAL][PATH_SRC], path_src_id_temp) inst[EPHEMERAL][PATH_SRC] = path_src_id_temp transporter.run(info=info) for inst_id, inst in instances.iteritems(): host_compute_dst = inst[EPHEMERAL][HOST_DST] qemu_img_dst.diff_rebase(inst[EPHEMERAL][BACKING_FILE_DST], inst[EPHEMERAL][PATH_DST], host_compute_dst) def copy_ephemeral_iscsi_to_ceph(self, src_cloud, info): instances = info[utl.INSTANCES_TYPE] qemu_img_src = src_cloud.qemu_img transporter = task_transfer.TaskTransfer( self.init, TRANSPORTER_MAP[ISCSI][CEPH], resource_name=utl.INSTANCES_TYPE, resource_root_name=utl.EPHEMERAL_BODY) for inst_id, inst in instances.iteritems(): path_src = inst[EPHEMERAL][PATH_SRC] path_src_temp_raw = path_src + "." + utl.RAW host_src = inst[EPHEMERAL][HOST_SRC] qemu_img_src.convert(utl.RAW, path_src, path_src_temp_raw, host_src) inst[EPHEMERAL][PATH_SRC] = path_src_temp_raw transporter.run(info=info) @staticmethod def rebase_diff(dst_cloud, info): for instance_id, obj in info[utl.INSTANCES_TYPE].items(): image_id = obj['instance']['image_id'] new_backing_file = hashlib.sha1(image_id).hexdigest() diff = obj['diff'] host = diff['host_dst'] qemu_img = qemu_img_util.QemuImg(dst_cloud.config.dst, dst_cloud.config.migrate, host) diff_path = diff['path_dst'] backing_path = qemu_img.detect_backing_file(diff_path, None) backing_dir = os.path.dirname(backing_path) new_backing_path = os.path.join(backing_dir, new_backing_file) qemu_img.diff_rebase(new_backing_path, diff_path)
""" Weather Underground PWS Metadata Scraping Module Code to scrape PWS network metadata """ import pandas as pd import urllib3 from bs4 import BeautifulSoup as BS import numpy as np import requests # import time def scrape_station_info(state="WA"): """ A script to scrape the station information published at the following URL: https://www.wundergrou
nd.com/weatherstation/ListStations.asp? selectedState=WA&selectedCountry=United+States&MR=1 :param state: US State by which to subset WU S
tation table :return: numpy array with station info """ url = "https://www.wunderground.com/" \ "weatherstation/ListStations.asp?selectedState=" \ + state + "&selectedCountry=United+States&MR=1" raw_site_content = requests.get(url).content soup = BS(raw_site_content, 'html.parser') list_stations_info = soup.find_all("tr") all_station_info = np.array(['id', 'neighborhood', 'city', 'type', 'lat', 'lon', 'elevation']) for i in range(1, len(list_stations_info)): # start at 1 to omit headers station_info = str(list_stations_info[i]).splitlines() # pull out station info station_id = station_info[1].split('ID=')[1].split('"')[0] station_neighborhood = station_info[2].split('<td>')[1] station_neighborhood = station_neighborhood.split('\xa0')[0] station_city = station_info[3].split('<td>')[1].split('\xa0')[0] station_type = station_info[4].split('station-type">')[1] station_type = station_type.split('\xa0')[0] station_id = station_id.strip() station_neighborhood = station_neighborhood.strip() station_city = station_city.strip() station_type = station_type.strip() # grab the latitude, longitude, and elevation metadata lat, lon, elev = scrape_lat_lon_fly(station_id) # put all data into an array header = [station_id, station_neighborhood, station_city, station_type, lat, lon, elev] head_len = len(header) all_station_info = np.vstack([all_station_info, header]) all_station_info = pd.DataFrame(all_station_info) all_station_info.columns = all_station_info.ix[0, :] # do some dataframe editing all_station_info = all_station_info.drop(all_station_info .index[0]).reset_index() all_station_info = all_station_info.drop(all_station_info.columns[0], axis=1) return(all_station_info.to_csv('./data/station_data_from_FUN.csv')) def scrape_lat_lon_fly(stationID): """ Add latitude, longitude and elevation data to the stationID that is inputted as the argument to the function. Boom. :param stationID: str a unique identifier for the weather underground personal weather station :return: (latitude,longitude,elevation) as a tuple. Double Boom. """ http = urllib3.PoolManager(maxsize=10, block=True, cert_reqs='CERT_REQUIRED') try: url = 'https://api.wunderground.com/weatherstation/' \ 'WXDailyHistory.asp?ID={0}&format=XML'.format(stationID) r = http.request('GET', url, preload_content=False) soup = BS(r, 'xml') lat = soup.find_all('latitude')[0].get_text() long = soup.find_all('longitude')[0].get_text() elev = soup.find_all('elevation')[0].get_text() return(lat, long, elev) except Exception as err: lat = 'NA' long = 'NA' elev = 'NA' return(lat, long, elev) def subset_stations_by_coords(station_data, lat_range, lon_range): """ Subset station metadata by latitude and longitude :param station_data_csv: str or Pandas.DataFrame filename of csv with station metadata (from scrape_lat_lon) or Pandas.DataFrame with station metadata (from scrape_lat_lon) :param lat_range: 2-element list min and max latitude range, e.g. [47.4, 47.8] :param lon_range: 2-element list min and max longitude range, e.g. [-122.5, -122.2] :return: pandas.DataFrame with station metadata subset by lat/lon bounds """ lat_range.sort() lon_range.sort() if isinstance(station_data, str): df = pd.read_csv(station_data, index_col=1) df = df.dropna(subset=["Latitude", "Longitude"]) elif isinstance(station_data, pd.DataFrame): df = station_data else: pass # TODO: add exception here if type not supported df = df[(df["Latitude"] >= lat_range[0]) & (df["Latitude"] <= lat_range[1]) & (df["Longitude"] >= lon_range[0]) & (df["Longitude"] <= lon_range[1])] return df def get_station_ids_by_coords(station_data_csv, lat_range, lon_range): """ Wrapper around subset_stations_by_coords; returns just the IDs of the stations in a box :param station_data_csv: str filename of csv with station metadata (from scrape_lat_lon) :param lat_range: 2-element list min and max latitude range, e.g. [47.4, 47.8] :param lon_range: 2-element list min and max longitude range, e.g. [-122.5, -122.2] :return: list of station IDs (strings) """ df = subset_stations_by_coords(station_data_csv, lat_range, lon_range) return list(df.index) # TESTING # station_data_csv = "data/station_data.csv" # lat_range = [47.4, 47.8] # lon_range = [-122.5, -122.2] # print(get_station_ids_by_coords(station_data_csv, lat_range, lon_range))
ing-skeinforge-export-module/>' __date__ = '$Date: 2008/21/04 $' __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' def getCraftedTextFromText(gcodeText, repository=None): 'Export a gcode linear move text.' if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'export'): return gcodeText if repository == None: repository = settings.getReadRepository(ExportRepository()) if not repository.activateExport.value: return gcodeText return ExportSkein().getCraftedGcode(repository, gcodeText) def getDescriptionCarve(lines): 'Get the description for carve.' descriptionCarve = '' layerThicknessString = getSettingString(lines, 'carve', 'Layer Height') if layerThicknessString != None: descriptionCarve += layerThicknessString.replace('.', '') + 'h' edgeWidthString = getSettingString(lines, 'carve', 'Edge Width over Height') if edgeWidthString != None: descriptionCarve += 'x%sw' % str(float(edgeWidthString) * float(layerThicknessString)).replace('.', '') return descriptionCarve def getDescriptionFill(lines): 'Get the description for fill.' activateFillString = getSettingString(lines, 'fill', 'Activate Fill') if activateFillString == None or activateFillString == 'False': return '' infillSolidityString = getSettingString(lines, 'fill', 'Infill Solidity') return '_' + infillSolidityString.replace('.', '') + 'fill' def getDescriptionMultiply(lines): 'Get the description for multiply.' activateMultiplyString = getSettingString(lines, 'multiply', 'Activate Multiply') if activateMultiplyString == None or activateMultiplyString == 'False': return '' columnsString = getSettingString(lines, 'multiply', 'Number of Columns') rowsString = getSettingString(lines, 'multiply', 'Number of Rows') if columnsString == '1' and rowsString == '1': return '' return '_%scx%sr' % (columnsString, rowsString) def getDescriptionSpeed(lines): 'Get the description for speed.' activateSpeedString = getSettingString(lines, 'speed', 'Activate Speed') if activateSpeedString == None or activateSpeedString == 'False': return '' feedRateString = getSettingString(lines, 'speed', 'Feed Rate') flowRateString = getSettingString(lines, 'speed', 'Flow Rate') if feedRateString == flowRateString: return '_%sEL' % feedRateString.replace('.0', '') return '_%sE%sL' % (feedRateString.replace('.0', ''), flowRateString.replace('.0', '')) def getDescriptiveExtension(gcodeText): 'Get the descriptive extension.' lines = archive.getTextLines(gcodeText) return '.' + getDescriptionCarve(lines) + getDescriptionFill(lines) + getDescriptionMultiply(lines) + getDescriptionSpeed(lines) def getDistanceGcode(exportText): 'Get gcode lines with distance variable added, this is for if ever there is distance code.' lines = archive.getTextLines(exportText) oldLocation = None for line in lines: splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = None if len(splitLine) > 0: firstWord = splitLine[0] if firstWord == 'G1': location = gcodec.getLocationFromSplitLine(oldLocation, splitLine) if oldLocation != None: distance = location.distance(oldLocation) oldLocation = location return exportText def getFirstValue(gcodeText, word): 'Get the value from the first line which starts with the given word.' for line in archive.getTextLines(gcodeText): splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) if gcodec.getFirstWord(splitLine) == word: return splitLine[1] return '' def getNewRepository(): 'Get new repository.' return ExportRepository() def getReplaceableExportGcode(nameOfReplaceFile, replaceableExportGcode): 'Get text with strings replaced according to replace.csv file.' replaceLines = settings.getAlterationLines(nameOfReplaceFile) if len(replaceLines) < 1: return replaceableExportGcode for replaceLine in replaceLines: splitLine = replaceLine.replace('\\n', '\t').split('\t') if len(splitLine) > 0: replaceableExportGcode = replaceableExportGcode.replace(splitLine[0], '\n'.join(splitLine[1 :])) output = cStringIO.StringIO() gcodec.addLinesToCString(output, archive.getTextLines(replaceableExportGcode)) return output.getvalue() def getSelectedPluginModule( plugins ): 'Get the selected plugin module.' for plugin in plugins: if plugin.value: return archive.getModuleWithDirectoryPath( plugin.directoryPath, plugin.name ) return None def getSettingString(lines, procedureName, settingNameStart): 'Get the setting value from the lines, return None if there is no setting starting with that name.' settingNameStart = settingNameStart.replace(' ', '_') for line in lines: splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line) firstWord = None if len(splitLine) > 0: firstWord = splitLine[0] if firstWord == '(<setting>': if len(splitLine) > 4: if splitLine[1] == procedureName and splitLine[2].startswith(settingNameStart): return splitLine[3] elif firstWord == '(</settings>)': return None return None def sendOutputTo(outputTo, text): 'Send output to a file or a standard output.' if outputTo.endswith('stderr'): sys.stderr.write(text) sys.stderr.write('\n') sys.stderr.flush() return if outputTo.endswith('stdout'): sys.stdout.write(text) sys.stdout.write('\n') sys.stdout.flush() return archive.writeFileText(outputTo, text) def writeOutput(fileName, shouldAnalyze=True): 'Export a gcode linear move file.' if fileName == '': return None repository = ExportRepository() settings.getReadRepository(repository) startTime = time.time() print('File ' + archive.getSummarizedFileName(fileName) + ' is being chain exported.') fileNameSuffix
= fileName[: fileName.rfind('.')] if repository.addExportSuffix.value: fileNameSuffix += '_export' gcodeText = gcodec.getGcodeFileText(fileName, '') procedures = skeinforge_craft.getProcedures('export', gcodeText)
gcodeText = skeinforge_craft.getChainTextFromProcedures(fileName, procedures[: -1], gcodeText) if gcodeText == '': return None if repository.addProfileExtension.value: fileNameSuffix += '.' + getFirstValue(gcodeText, '(<profileName>') if repository.addDescriptiveExtension.value: fileNameSuffix += getDescriptiveExtension(gcodeText) if repository.addTimestampExtension.value: fileNameSuffix += '.' + getFirstValue(gcodeText, '(<timeStampPreface>') fileNameSuffix += '.' + repository.fileExtension.value fileNamePenultimate = fileName[: fileName.rfind('.')] + '_penultimate.gcode' filePenultimateWritten = False if repository.savePenultimateGcode.value: archive.writeFileText(fileNamePenultimate, gcodeText) filePenultimateWritten = True print('The penultimate file is saved as ' + archive.getSummarizedFileName(fileNamePenultimate)) exportGcode = getCraftedTextFromText(gcodeText, repository) window = None if shouldAnalyze and repository.analyzeGcode.value: window = skeinforge_analyze.writeOutput(fileName, fileNamePenultimate, fileNameSuffix, filePenultimateWritten, gcodeText) replaceableExportGcode = None selectedPluginModule = getSelectedPluginModule(repository.exportPlugins) if selectedPluginModule == None: replaceableExportGcode = exportGcode else: if selectedPluginModule.globalIsReplaceable: replaceableExportGcode = selectedPluginModule.getOutput(exportGcode) else: selectedPluginModule.writeOutput(fileNameSuffix, exportGcode) if replaceableExportGcode != None: replaceableExportGcode = getReplaceableExportGcode(repository.nameOfReplaceFile.value, replaceableExportGcode) archive.writeFileText( fileNameSuffix, replaceableExportGcode ) print('The exported file is saved as ' + archive.getSummarizedFileName(fileNameSuffix)) if repository.alsoSendOutputTo.value != '': if replaceableExportGcode == None: replaceableExportGcode = selectedPluginModule.getOutput(exportGcode) sendOutputTo(repository.alsoSendOutputTo.value, replaceableExportGcode) print('It took %s to export the file.' % euclidean.getDurationString(time.time() - startTime)) return window class ExportRepository: 'A class to handle the export settings.' def __init__(self): 'Set the default settings, execute title & settings fileName
import logging import os import shutil import subprocess DEVNULL = open(os.devnull, 'wb') class ShellError(Exception): def __init__(self, command, err_no, message=None): self.command = command self.errno = err_no self.message = message def __str__(self): string = "Command '%s' failed with exit code %d" % (self.command, self.errno) if self.message is not None: string += ': ' + repr(self.message) return string def __repr__(self): return self.__str__() def shell_exec(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, background=False, env=None): str_cmd = cmd if isinstance(cmd, str) else ' '.join(cmd) logging.getLogger('shell_exec').debug(str_cmd) message = None if background: if stdout == subprocess.PIPE: stdout = DEVNULL if stderr == subprocess.PIPE: stderr = DEVNULL elif stdin is not None and isinstance(stdin, str): message = stdin stdin = subprocess.PIPE process = subprocess.Popen(cmd, stdin=stdin,
stdout=stdout, stderr=stderr, shell=isinstance(cmd, str), env=env) stdout_dump = None stderr_dump = None return_code = 0 if message is
not None or stdout == subprocess.PIPE or stderr == subprocess.PIPE: stdout_dump, stderr_dump = process.communicate(message) return_code = process.returncode elif not background: return_code = process.wait() if background: return process else: if stdout_dump is not None: stdout_dump = stdout_dump.decode('utf-8') if stderr_dump is not None: stderr_dump = stderr_dump.decode('utf-8') if return_code != 0: raise ShellError(str_cmd, return_code, stderr_dump) else: return stdout_dump, stderr_dump def mem_size(megabytes=True): mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') return mem_bytes / (1024. ** 2) if megabytes else mem_bytes def lc(filename): with open(filename) as stream: count = 0 for _ in stream: count += 1 return count def cat(files, output, buffer_size=10 * 1024 * 1024): with open(output, 'wb') as blob: for f in files: with open(f, 'rb') as source: shutil.copyfileobj(source, blob, buffer_size)
import numpy from chainer.backends import cuda from chainer import optimizer _default_hyperparam = optimizer.Hyperparameter() _default_hyperparam.lr = 0.01 _default_hyperparam.alpha = 0.99 _default_hyperparam.eps = 1e-8 _default_hyperparam.eps_inside_sqrt = False class RMSpropRule(optimizer.UpdateRule): """Update rule for RMSprop. See :class:`~chainer.optimizers.RMSprop` for the def
ault values of the hyperparameters. Args: parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyp
erparameter that provides the default values. lr (float): Learning rate. alpha (float): Exponential decay rate of the second order moment. eps (float): Small value for the numerical stability. eps_inside_sqrt (bool): When ``True``, gradient will be divided by :math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When ``False`` (default), gradient will be divided by :math:`\\sqrt{ms} + eps` instead. This option may be convenient for users porting code from other frameworks; see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for details. """ def __init__(self, parent_hyperparam=None, lr=None, alpha=None, eps=None, eps_inside_sqrt=None): super(RMSpropRule, self).__init__( parent_hyperparam or _default_hyperparam) if lr is not None: self.hyperparam.lr = lr if alpha is not None: self.hyperparam.alpha = alpha if eps is not None: self.hyperparam.eps = eps if eps_inside_sqrt is not None: self.hyperparam.eps_inside_sqrt = eps_inside_sqrt def init_state(self, param): xp = cuda.get_array_module(param.data) with cuda.get_device_from_array(param.data): self.state['ms'] = xp.zeros_like(param.data) def update_core_cpu(self, param): grad = param.grad if grad is None: return hp = self.hyperparam eps = grad.dtype.type(hp.eps) if hp.eps != 0 and eps == 0: raise ValueError( 'eps of RMSprop optimizer is too small for {} ({})'.format( grad.dtype.name, hp.eps)) ms = self.state['ms'] ms *= hp.alpha ms += (1 - hp.alpha) * grad * grad if hp.eps_inside_sqrt: denom = numpy.sqrt(ms + eps) else: denom = numpy.sqrt(ms) + eps param.data -= hp.lr * grad / denom def update_core_gpu(self, param): grad = param.grad if grad is None: return hp = self.hyperparam eps = grad.dtype.type(hp.eps) if eps == 0: raise ValueError( 'eps of RMSprop optimizer is too small for {} ({})'.format( grad.dtype.name, hp.eps)) if hp.eps_inside_sqrt: denom = 'sqrt(ms + eps)' else: denom = 'sqrt(ms) + eps' kernel = cuda.elementwise( 'T grad, T lr, T alpha, T eps', 'T param, T ms', '''ms = alpha * ms + (1 - alpha) * grad * grad; param -= lr * grad / ({});'''.format(denom), 'rmsprop') kernel(grad, self.hyperparam.lr, self.hyperparam.alpha, eps, param.data, self.state['ms']) class RMSprop(optimizer.GradientMethod): """RMSprop optimizer. See: T. Tieleman and G. Hinton (2012). Lecture 6.5 - rmsprop, COURSERA: Neural Networks for Machine Learning. Args: lr (float): Learning rate. alpha (float): Exponential decay rate of the second order moment. eps (float): Small value for the numerical stability. eps_inside_sqrt (bool): When ``True``, gradient will be divided by :math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When ``False`` (default), gradient will be divided by :math:`\\sqrt{ms} + eps` instead. This option may be convenient for users porting code from other frameworks; see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for details. """ def __init__(self, lr=_default_hyperparam.lr, alpha=_default_hyperparam.alpha, eps=_default_hyperparam.eps, eps_inside_sqrt=_default_hyperparam.eps_inside_sqrt): super(RMSprop, self).__init__() self.hyperparam.lr = lr self.hyperparam.alpha = alpha self.hyperparam.eps = eps self.hyperparam.eps_inside_sqrt = eps_inside_sqrt lr = optimizer.HyperparameterProxy('lr') alpha = optimizer.HyperparameterProxy('alpha') eps = optimizer.HyperparameterProxy('eps') eps_inside_sqrt = optimizer.HyperparameterProxy('eps_inside_sqrt') def create_update_rule(self): return RMSpropRule(self.hyperparam)
""" Kodi urlresolver plugin Copyright (C) 2016 tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re import urllib from urlresolver import common from urlresolver.resolver import UrlResolver, ResolverError class TudouResolver(UrlResolver): name = 'Tudou' domains = ['tudou.com'] pattern = '(?://|\.)(tudou\.com)/programs/view/([0-9a-zA-Z]+)' def __init__(self): self.net = common.Net() def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html = self.net.http_GET(web_url).content swf = re.findall('(http.+?\.swf)', html)[0] sid = re.findall('areaCode\s*:\s*"(\d+)', html)[0] oid = re.findall('"k"\s*:\s*(\d+)', html)[0] f_url = 'http://v2.tudou.com/f?id=%s&sid=%s&hd=3&sj=1' % (oid, sid) headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': swf} html = self.net.http_GET(f_url, headers=headers).content url = re.findall('>(http.+?)<', html)[0] url = url.replace('&amp;', '&') video = self.net.http_HEAD(url, headers=headers).get_headers() video = [i for i in video if 'video' in i] if not video: raise ResolverError('Fi
le not found') url += '|%s' % urllib.urlencode(headers) return url raise ResolverError(
'Unable to locate link') def get_url(self, host, media_id): return 'http://www.tudou.com/programs/view/%s/' % media_id
import warnin
gs from .file import File, open, read, create, write, CfitsioError try: from healpix import read_map, read_mask except: warnings.warn('Cannot import read_map and read_mask if healpy is not install
ed') pass
### Copyright
(C) 2010 Peter Williams <peter_ono@users.sourceforge.net> ### ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; ver
sion 2 of the License only. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, write to the Free Software ### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Select/display which patch guards are in force.""" import sys from . import cli_args from . import db_utils from . import msg PARSER = cli_args.SUB_CMD_PARSER.add_parser( "select", description=_("Display/select which patch guards are in force."), epilog=_("""When invoked with no arguments the currently selected guards are listed."""), ) GROUP = PARSER.add_mutually_exclusive_group() GROUP.add_argument( "-n", "--none", help=_("Disable all guards."), dest="opt_none", action="store_true", ) GROUP.add_argument( "-s", "--set", help=_("the list of guards to be enabled/selected."), dest="guards", metavar="guard", action="append", ) def run_select(args): """Execute the "select" sub command using the supplied args""" PM = db_utils.get_pm_db() db_utils.set_report_context(verbose=True) if args.opt_none: return PM.do_select_guards(None) elif args.guards: return PM.do_select_guards(args.guards) else: selected_guards = PM.get_selected_guards() for guard in sorted(selected_guards): sys.stdout.write(guard + "\n") return 0 PARSER.set_defaults(run_cmd=run_select)
# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ctypes from ctypes import util as ctypes_util
_CDLL = None def get_cdll(): global _CDLL if not _CDLL: # NOTE(ralonsoh): from https://docs.python.org/3.6/library/ # ctypes.html#ctypes.PyDLL: "Instances of this c
lass behave like CDLL # instances, except that the Python GIL is not released during the # function call, and after the function execution the Python error # flag is checked." # Check https://bugs.launchpad.net/neutron/+bug/1870352 _CDLL = ctypes.PyDLL(ctypes_util.find_library('c'), use_errno=True) return _CDLL
try: #comment x = 1<caret> y =
2 exc
ept: pass
# -*- coding: utf-8 -*- # Author: Mikhail Polyanskiy # Last modified: 2017-04-02 # Original data: Djurišić and Li 1999, https://doi.org/10.1063/1.369370 import numpy as np import matplotlib.pyplot as plt # LD model parameters - Normal polarization (ordinary) ωp = 27 εinf = 1.070 f0 = 0.014 Γ0 = 6.365 ω0 = 0 α0 = 0 f1 = 0.073 Γ1 = 4.102 ω1 = 0.275 α1 = 0.505 f2 = 0.056 Γ2 = 7.328 ω2 = 3.508 α2 = 7.079 f3 = 0.069 Γ3 = 1.414 ω3 = 4.451 α3 = 0.362 f4 = 0.005 Γ4 = 0.46 # 0.046 in the original paper! ω4 = 13.591 α4 = 7.426 f5 = 0.262 Γ5 = 1.862 ω5 = 14.226 α5 = 3.82e-4 f6 = 0.460 Γ6 = 11.922 ω6 = 15.550 α6 = 1.387 f7 = 0.200 Γ7 = 39.091 ω7 = 32.011 α7 = 28.963 def LD(ω): ε = εinf; Γ = Γ0*np.exp(-α0*((ω-ω0)/Γ0)**2) ε -= f0*ωp**2 / ((ω**2-ω0**2)+1j*ω*Γ) Γ = Γ1*np.exp(-α1*((ω-ω1)/Γ1)**2) ε -= f1*ωp**2 / ((ω**2-ω1**2)+1j*ω*Γ) Γ = Γ2*np.exp(-α2*((ω-ω2)/Γ2)**2) ε -= f2*ωp**2 / ((ω**2-ω2**2)+1j*ω*Γ) Γ = Γ3*np.exp(-α3*((ω-ω3)/Γ3)**2) ε -= f3*ωp**2 / ((ω**2-ω3**2)+1j*ω*Γ) Γ = Γ4*np.exp(-α4*((ω-ω4)/Γ4)**2) ε -= f4*ωp**2 / ((ω**2-ω4**2)+1j*ω*Γ) Γ = Γ5*np.exp(-α5*((ω-ω5)/Γ5)**2) ε -= f5*ωp**2 / ((ω**2-ω5**2)+1j*ω*Γ) Γ = Γ6*np.exp(-α6*((ω-ω6)/Γ6)**2) ε -= f6*ωp**2 / ((ω**2-ω6**2)+1j*ω*Γ) Γ = Γ7*np.exp(-α7*((ω-ω7)/Γ7)**2) ε -= f7*ωp**2 / ((ω**2-ω7**2)+1j*ω*Γ) return ε ev_min=0.12 ev_max=40 npoints=1000 eV = np.linspace(ev_min, ev_max, npoints) μm = 4.13566733e-1*2.99792458/eV ε = LD(eV) n = (ε**.5).real k = (ε**.5).imag #============================ DATA OUTPUT ================================= file = open('out.txt', 'w') for i in range(npoints-1, -1, -1): file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i])) file.close() #=============================== PLOT ===============================
====== plt.rc('font', family='Arial', size='14') #plot ε vs eV plt.figure(1) plt.plot(eV, ε.real, label="ε1") plt.plot(eV, ε.imag, label="ε2") plt.xlabel('Photon energy (eV)') plt.ylabel('ε') plt.legend(bbox
_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0) #plot n,k vs eV plt.figure(2) plt.plot(eV, n, label="n") plt.plot(eV, k, label="k") plt.xlabel('Photon energy (eV)') plt.ylabel('n, k') plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0) #plot n,k vs μm plt.figure(3) plt.plot(μm, n, label="n") plt.plot(μm, k, label="k") plt.xlabel('Wavelength (μm)') plt.ylabel('n, k') plt.xscale('log') plt.yscale('log') plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on
201
7-06-01 15:57 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pec', '0006_auto_20170601_0719'), ] operations = [ migrations.AddField( model_name='cours', name='type', field=models.CharField(blank=True, max_length=30), ), migrations.AlterField( model_name='cours', name='objectifs_evaluateurs', field=models.ManyToManyField(blank=True, to='pec.ObjectifEvaluateur'), ), ]
# -*- coding:utf-8 -*- from django import forms try: from django.utils.encoding import smart_unicode as smart_text except ImportError: from django.utils.encoding import smart_text from cached_modelforms.tests.utils import SettingsTestCase from cached_modelforms.tests.models import SimpleModel from cached_modelforms import ( CachedModelChoiceField, CachedModelMultipleChoiceField) class TestFields(SettingsTestCase): def setUp(self): self.settings_manager.set(INSTALLED_APPS=('cached_modelforms.tests',)) self.obj1 = SimpleModel.objects.create(name='name1') self.obj2 = SimpleModel.objects.create(name='name2') self.obj3 = SimpleModel.objects.create(name='name3') self.cached_list = [self.obj1, self.obj2, self.obj3] class FormSingle(forms.Form): obj = CachedModelChoiceField( objects=lambda:self.cached_list, required=False ) class FormMultiple(forms.Form): obj = CachedModelMultipleChoiceField( objects=lambda:self.cached_list, required=False ) self.FormSingle = FormSingle self.FormMultiple = FormMultiple def test_modelchoicefield_objects_arg(self): ''' Test, how the field accepts different types of ``objects`` argument. ''' as_list = CachedModelChoiceField(objects=lambda:self.cached_list) as_iterable = CachedModelChoiceField( objects=lambda:iter(self.cached_list) ) list_of_tuples = [(x.pk, x) for x in self.cached_list] as_list_of_tuples = CachedModelChoiceField( objects=lambda:list_of_tuples ) as_dict = CachedModelChoiceField(objects=lambda:dict(list_of_tuples)) choices_without_empty_label = as_list.choices[:] if as_list.empty_label is not None: choices_without_empty_label.pop(0) # make sure all of the ``choices`` attrs are the same self.assertTrue( as_list.choices == as_iterable.choices == as_list_of_tuples.choices == as_dict.choices ) # same for ``objects`` self.assertTrue( as_list.objects == as_iterable.objects == as_list_of_tuples.objects == as_dict.objects ) # ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}`` self.assertEqual( set(as_list.objects.keys()), set(smart_text(x.pk) for x in self.cached_list) ) self.assertEqual(set(as_list.objects.values()), set(self.cached_list)) # ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]`` self.assertEqual( choices_without_empty_label, [(smart_text(x.pk), smart_text(x)) for x in self.cached_list] ) def test_modelmultiplechoicefield_objects_arg(self): ''' Test, how the field accepts different types of ``objects`` argument. ''' as_list = CachedModelMultipleChoiceField( objects=lambda:self.cached_list ) as_iterable = CachedModelMultipleChoiceField( objects=lambda:iter(self.cached_list) ) list_of_tuples = [(x.pk, x) for x in self.cached_list] as_list_of_tuples = CachedModelMultipleChoiceField( objects=lambda:list_of_tuples ) as_dict = CachedModelMultipleChoiceField(objects=dict(list_of_tuples)) # make sure all of the ``choices`` attrs are the same self.assertTrue( as_list.choices == as_iterable.choices == as_list_of_tuples.choices == as_dict.choices) # same for ``objects`` self.assertTrue( as_list.objects == as_iterable.objects == as_list_of_tuples.objects == as_dict.objects) # ``objects`` should be a dict as ``{smart_text(pk1): obj1, ...}`` self.assertEqual( set(as_list.objects.keys()), set(smart_text(x.pk) for x in self.cached_list) ) self.assertEqual(set(as_list.objects.values()), set(self.cached_list)) # ``choices`` should be a list as ``[(smart_text(pk1), smart_text(obj1)), ...]`` self.assertEqual( as_list.choices, [(smart_text(x.pk), smart_text(x)) for x in self.cached_list] ) def test_modelchoicefield_behavior(self): ''' Test, how the field handles data in form. ''' # some value form = self.FormSingle({'obj': smart_text(self.obj1.pk)}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['obj'], self.obj1) # no value form = self.FormSingle({}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['obj'], None) # invalid value form = self.FormSingle({'obj': '-1'}) self.assertFalse(form.is_valid()) self.assertTrue(form._errors['obj']) def test_modelmultiplechoicefield_behavior(self): ''' Test, how the field handles data in form. ''' # some value form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), smart_text(self.obj2.pk)]}) self.assertTrue(form.is_valid()) self.assertEqual(set(form.cleaned_data['obj']), set([self.obj1, self.obj2])) # no value form = self.FormMultiple({}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['obj'], []) # invalid value form = self.FormMultiple({'obj': [smart_text(self.obj1.pk), '-1']}) self.assertFalse(form.is_valid()) self.assertTrue(form._errors['obj']) # invalid list form = self.FormMultiple({'obj': '-1'}) self.assertFalse(form.is_valid()) self.assertTrue(form._errors['obj']) def test_modelchoicefield_objects_assignment(self): field = CachedModelChoiceField(objects=self.cached_list) field2 = CachedModelChoiceField(objects=self.cached_list[:2]) field.objects = self.cached_list[:2] self.assertEqual(field.objects, field2.objects) self.assertEqual(field.choices, field2.choices) def test_modelmultiplechoicef
ield_objects_assignment(self): field = CachedModelMultipleChoiceField(objects=self.cached_list) field2 = CachedModelMultipleChoiceField(objects=self.cached_list[:2]) field.objects = self.cac
hed_list[:2] self.assertEqual(field.objects, field2.objects) self.assertEqual(field.choices, field2.choices)
# coding: utf-8 import copy from google.appengine.ext import ndb import flask from apps import auth from apps.auth import helpers from core import task from core import util import config import forms import models bp = flask.Blueprint( 'user', __name__, url_prefix='/user', template_folder='templates', ) ############################################################################### # User List ############################################################################### @bp.route('/', endpoint='list') @auth.admin_required def user_list(): user_dbs, user_cursor, prev_cursor = models.User.get_dbs( email=util.param('email') ) permissions = list(forms.UserUpdateForm._permission_choices) permissions += util.param('permissions', list) or [] return flask.render_template( 'user/admin/list.html', html_class='user-list', title='User List', user_dbs=user_dbs, next_url=util.generate_next_url(user_cursor), prev_url=util.generate_next_url(prev_cursor), permissions=sorted(set(permissions)), api_url=flask.url_for('api.users') ) @bp.route('/<int:user_id>/update/', methods=['GET', 'POST'], endpoint='update') @auth.admin_required def user_update(user_id): user_db = models.User.get_by_id(user_id) if not user_db: flask.abort(404) form = forms.UserUpdateForm(obj=user_db) for permission in user_db.permissions: form.permissions.choices.append((permission, permission)) form.permissions.choices = sorted(set(form.permissions.choices)) if form.validate_on_submit(): if not util.is_valid_username(form.username.data): form.username.errors.append('This username is invalid.') elif not models.User.is_username_available(form.username.data, user_db.key): form.username.errors.append('This username is already taken.') else: form.populate_obj(user_db) if auth.current_user_id() == user_db.key.id(): user_db.admin = True user_db.active = True user_db.put() return flask.redirect(flask.url_for( 'user.list', order='-modified', active=user_db.active, )) return flask.render_template( 'user/admin/update.html', title=user_db.name, html_class='user-update', form=form, user_db=user_db, api_url=flask.url_for('api.user', key=user_db.key.urlsafe()) ) @bp.route('/verify_email/<token>/
') @auth.login_required def verify_email(token): user_db = auth.current_user_db() if user_db.token != token: flask.flash('That link is either invalid or expired.', category='danger') return flask.redirect(flask.url_for('user.profile_update')) user_db.verifi
ed = True user_db.token = util.uuid() user_db.put() flask.flash('Hooray! Your email is now verified.', category='success') return flask.redirect(flask.url_for('user.profile_update')) @bp.route('/merge/', methods=['GET', 'POST']) @auth.admin_required def merge(): user_keys = util.param('user_keys', list) if not user_keys: flask.abort(400) user_db_keys = [ndb.Key(urlsafe=k) for k in user_keys] user_dbs = ndb.get_multi(user_db_keys) if len(user_dbs) < 2: flask.abort(400) user_dbs.sort(key=lambda user_db: user_db.created) merged_user_db = user_dbs[0] auth_ids = [] permissions = [] is_admin = False is_active = False for user_db in user_dbs: auth_ids.extend(user_db.auth_ids) permissions.extend(user_db.permissions) is_admin = is_admin or user_db.admin is_active = is_active or user_db.active if user_db.key.urlsafe() == util.param('user_key'): merged_user_db = user_db auth_ids = sorted(list(set(auth_ids))) permissions = sorted(list(set(permissions))) merged_user_db.permissions = permissions merged_user_db.admin = is_admin merged_user_db.active = is_active merged_user_db.verified = False form_obj = copy.deepcopy(merged_user_db) form_obj.user_key = merged_user_db.key.urlsafe() form_obj.user_keys = ','.join(user_keys) form = forms.UserMergeForm(obj=form_obj) if form.validate_on_submit(): form.populate_obj(merged_user_db) merged_user_db.auth_ids = auth_ids merged_user_db.put() deprecated_keys = [k for k in user_db_keys if k != merged_user_db.key] merge_user_dbs(merged_user_db, deprecated_keys) return flask.redirect( flask.url_for('user.update', user_id=merged_user_db.key.id()), ) return flask.render_template( 'user/admin/merge.html', title='Merge Users', html_class='user-merge', user_dbs=user_dbs, merged_user_db=merged_user_db, form=form, auth_ids=auth_ids, api_url=flask.url_for('api.users', user_keys=','.join(user_keys)) ) @ndb.transactional(xg=True) def merge_user_dbs(user_db, deprecated_keys): # TODO: Merge possible user data before handling deprecated users deprecated_dbs = ndb.get_multi(deprecated_keys) for deprecated_db in deprecated_dbs: deprecated_db.auth_ids = [] deprecated_db.active = False deprecated_db.verified = False if not deprecated_db.username.startswith('_'): deprecated_db.username = '_%s' % deprecated_db.username ndb.put_multi(deprecated_dbs) @bp.route('/profile/') @auth.login_required def profile(): user_db = auth.current_user_db() return flask.render_template( 'user/profile/index.html', title=user_db.name, html_class='profile-view', user_db=user_db, has_json=True, api_url=flask.url_for('api.user', key=user_db.key.urlsafe()), ) @bp.route('/profile/update/', methods=['GET', 'POST']) @auth.login_required def profile_update(): user_db = auth.current_user_db() form = forms.ProfileUpdateForm(obj=user_db) if form.validate_on_submit(): email = form.email.data if email and not user_db.is_email_available(email, user_db.key): form.email.errors.append('This email is already taken.') if not form.errors: send_verification = not user_db.token or user_db.email != email form.populate_obj(user_db) if send_verification: user_db.verified = False task.verify_email_notification(user_db) user_db.put() return flask.redirect(flask.url_for('pages.welcome')) return flask.render_template( 'user/profile/update.html', title=user_db.name, html_class='profile', form=form, user_db=user_db, ) @bp.route('/profile/password/', methods=['GET', 'POST']) @auth.login_required def profile_password(): if not config.CONFIG_DB.has_email_authentication: flask.abort(418) user_db = auth.current_user_db() form = forms.ProfilePasswordForm(obj=user_db) if form.validate_on_submit(): errors = False old_password = form.old_password.data new_password = form.new_password.data if new_password or old_password: if user_db.password_hash: if helpers.password_hash(user_db, old_password) != user_db.password_hash: form.old_password.errors.append('Invalid current password') errors = True if not errors and old_password and not new_password: form.new_password.errors.append('This field is required.') errors = True if not (form.errors or errors): user_db.password_hash = helpers.password_hash(user_db, new_password) flask.flash('Your password has been changed.', category='success') if not (form.errors or errors): user_db.put() return flask.redirect(flask.url_for('user.profile')) return flask.render_template( 'user/profile/password.html', title=user_db.name, html_class='profile-password', form=form, user_db=user_db, )
from django.conf import settings from geopy import distance, geocoders import pygeoip def get_geodata_by_ip(addr): gi = pygeoip.GeoIP(settings.GEO_CITY_FILE, pygeoip.MEMORY_CACHE) geodata = gi.record_by_addr(addr) return geodata def get_geodata_by_region(*args): gn = geocoders.GeoNames() return gn.geocode(' '.join(args), exactly_one=False)[0] def get_distance(location1, location2): """ Calculate distance between two locations, given the (lat, long) of each. Required
Arguments: location1 A tuple of (lat, long). location2 A tuple of (lat, long). """
return distance.distance(location1, location2).miles
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the L
icense at # # http://w
ww.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A handler that displays servers and their instances.""" from google.appengine.tools.devappserver2.admin import admin_request_handler class ModulesHandler(admin_request_handler.AdminRequestHandler): def get(self): values = {'modules': self.dispatcher.modules} self.response.write(self.render('modules.html', values))
def
isPrime(num): if num <= 1: return False i = 2 while i < num / 2 + 1: if num % i == 0: return False i += 1 return True big = 600851475143 test = 1 while test < big: test += 1 if big % test == 0: print(test, ' divides evenly') div = big / test print('candidate ', div) if isPrime(div): print('found ', div) break
#!/usr/bin/python3 from scrapers.scrape import scrape_page # if you want to use this scraper without the RESTful api webservice then # change this import: from scrape import scrape_page import re try: import pandas as pd pandasImported = True except ImportError: pandasImported = False BASE_URL = "http://finviz.com/quote.ashx?t=" VALUE_NAMES_XPATH = '//*[@class="snapshot-td2-cp"]/text()' VALUES_XPATH = '//*[@class="snapshot-td2"]/b/text() | //*[@class="snapshot-td2"]/b/*/text()' def get_statistics_table(page): """ This function will return the financial statistics tab
le on a stock's finviz page, if it exists as a Python dictionary :param page: HTML tree structure based on the html markup of the scraped web
page. :return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise will return a empty dictionary """ value_names = page.xpath(VALUE_NAMES_XPATH) values = page.xpath(VALUES_XPATH) values = [value if value != "-" else None for value in values] table = dict(zip(value_names, values)) return table def get_statistic(ticker_symbol, stat_name, page=None): """ This function will get the associated financial statistic from the corresponding finviz page given the statistic's name and the ticker symbol :param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GOOG", "MSFT") :param stat_name: The name of the interested financial statistic (e.g., "P/E", "Price", "Volume"). An exhaustive list of available financial statistics can be found on a stock's finviz page :param page: HTML tree structure based on the html markup of the scraped web page. If one is not passed in the function will scrape the page :return: the value of the interested financial statistic if it exists, otherwise None """ if page is None: page = scrape_page(BASE_URL + ticker_symbol) table = get_statistics_table(page) if stat_name in table.keys() and table[stat_name]: return table[stat_name] else: return None def get_all_statistics(ticker_symbol, page=None): """ This function will get all the associated financial statistics from the correspoding finviz page given the ticker symbol :param ticker_symbol: The ticker symbol of the interested stock (e.g., "AAPL", "GGOG", "MSFT") :param page: HTML tree structure based on the html markup of the scraped page. If one is not passed in the function will scrape the page :return: a dictionary of all the financial statistics listed on a stock's finviz page, otherwise None """ if page is None: page = scrape_page(BASE_URL + ticker_symbol) table = get_statistics_table(page) if table: return table else: return None def get_all_statistics_series(ticker_symbol): """ Return pandas Series of ticker symbol. Try to convert to numeric. """ if not pandasImported: raise Exception("Pandas not installed.") d = get_all_statistics(ticker_symbol) new_dict = {} for k,v in d.items(): if v == None: continue if ('%' in v) and (v.index('%') == (len(v)-1)): # percent new_dict[k + '(%)'] = float(v[:-1]) elif (k == '52W Range'): m = re.match('([0-9\.\-]+) - ([0-9\.\-]+)',v) new_dict['52W Low'] = float(m.group(1)) new_dict['52W High'] = float(m.group(2)) else: try: # remove any commas v = re.sub(',','',v) v = re.sub('B','E9',v) # expoentiate billions v = re.sub('M','E6',v) v = re.sub('K','E3',v) new_dict[k] = float(v) except ValueError: new_dict[k] = v return pd.Series(new_dict) def get_all_statistics_df(symbol_list): """Return a dataframe for a list of symbols. """ series = [] for s in symbol_list: series.append(get_all_statistics_series(s)) return pd.DataFrame(series,index=symbol_list) if __name__ == "__main__": # Test Cases print(get_statistic("AAPL", "P/E")) print(get_statistic("AAPL", "Inst Own")) print(get_statistic("AAPL", "Change")) print(get_statistic("AAPL", "This should return None")) print(get_all_statistics("AAPL"))
from w3lib.html import remove_tags from requests import session, codes from bs4 import BeautifulSoup # Net/gross calculator for student under 26 years class Student: _hours = 0 _wage = 0 _tax_rate = 18 _cost = 20 def __init__(self, hours, wage, cost): self._hours = hours self._wage = wage self._cost = cost def _get_real_tax_rate(self): tax_from = (100 - self._cost) / 100 return tax_from * self._tax_rate / 100 def get_net(self): return self._wage * self._hours def get_gross(self): value = self.get_net() / (1 - self._get_real_tax_rate()) return int(value + 0.5) def get_tax_base(self): return self.get_gross() - self.get_cost() def get_cost(self): return self.get_gross() - self.get_gross() * (100 - self._cost) / 100 def get_tax(self): return self.get_gross() - self.get_net() def get_cost_percentage(self): return self._cost # Net/gross calculator using web client with optional fallback class WebCalculator: _data = None _calculator = None _cost = 0 def __init__(self, hours, wage, cost): from tools import Config self._cost = cost self._data = Config.get_calculator_bot().parse(hours * wage, 1 if cost == 50 else 0) # Check if bot returned some data if self._data == None: self._calculator = Config.get_fallback_calculator()(hours, wage, cost) def get_net(self): if self._data == None: return self._calculator.get_net() return self._data['net'] def get_gross(self): if self._data == None: return self._calculator.get_gross() return self._data['gross'] def get_tax_base(self): if self._data == None: return self._calculator.get_tax_base() return self._data['tax_base'] def get_cost(self): if self._data == None: return self._calculator.get_cost() return self._data['cost'] def get_tax(self): if self._data == None: return self._calculator.get_tax() return self._data['tax'] def get_cost_percentage(self): return self._cost # Bot finding invoice values on wfirma.pl calculator page class WfirmaPlBot: _url = 'https://poradnik.wfirma.pl/staff_contract_headers/evaluate/errand' # Send needed data @staticmethod def parse(net, copyright): from tools import Config # Prepare data for request form_data = Config.get('wfirma.pl') header_data = { 'quota_type': form_data['quota_type'], 'quota': net, 'company_incidental': form_data['company_incidental'], } form_data['copyright'] = copyright with session() as c: # convert data to format viable for url-encoding data = {} for k, v in form_data.items(): data['data[StaffContractErrand][%s]' % k] = v for k, v in h
eader_data.items(): data['data[StaffContractHeader][%s]' % k] = v # Send the request to the server try: request = c.post(WfirmaPlBot._url, data=data, timeout=3) except: print('Przekroczon
o maksymalny czas oczekiwania na odpowiedź serwera') return None # There was some error (most likely server-side), so use offline fallback if request.status_code != codes.ok: print('Wystąpił błąd podczas pobierania danych do rachunku') return None return WfirmaPlBot._parse_results(request.text) # Parse data returned on request @staticmethod def _parse_results(request_body): # extract wanted data soup = BeautifulSoup(request_body.replace('\n', ''), 'xml') interesting_columns = soup.findAll('td')[1:15:2] # convert to floats interesting_columns = list(map(lambda x: float(x.get_text().replace(' ', '').replace(',', '.')), interesting_columns)) column_names = [ 'net', 'gross', 'all_cost', 'insurance_base', 'cost', 'tax_base', 'tax', ] result = {} for i in range(0, 7): result[column_names[i]] = interesting_columns[i] return result # @todo nie można ustalić kosztów uzyskania class KalkulatoryNfBot: _url = 'http://kalkulatory.nf.pl/kalkulator/wynagrodzenie/zlecenie' # Send needed data @staticmethod def parse(net, copyright): return None from tools import Config form_data = Config.get('kalkulatory.nf.pl') form_data = {**form_data, **{ 'stawka': 'net', 'kwota': net, '_method': 'POST', }} with session() as c: # Fix data format data = {} for k, v in form_data.items(): data['data[Calculator][%s]' % k] = v # Try to make a request try: request = c.post(KalkulatoryNfBot._url, data=data, timeout=3) except: print('Przekroczono maksymalny czas oczekiwania na odpowiedź serwera') return None # There was some error (most likely server-side), so use offline fallback if request.status_code != codes.ok: print('Wystąpił błąd podczas pobierania danych do rachunku') return None return KalkulatoryNfBot._parse_results(request.text) # Parse data returned on request @staticmethod def _parse_results(request_body): # extract wanted data soup = BeautifulSoup(request_body) table = soup.select('div.calc-body.clr')[0].find_next_sibling().findAll('td')[4:] del table[3:7] # remove unneded table = list(map(lambda x: float(x.get_text().replace(' zł', '').replace(' ', '').replace(',', '.')), table)) column_names = [ 'cost', 'tax_base', 'tax', 'gross', 'net' ] result = {} for i in range(0, 5): result[column_names[i]] = table[i] return result
import logging; logger = logging.getLogger("morse." + __name__) import socket import select import json import morse.core.middleware from functools import partial from morse.core import services class MorseSocketServ: def __init__(self, port, component_name): # List of socket clients self._client_sockets = [] self._message_size = 1024 self._component_name = component_name self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._server.bind((str(socket.INADDR_ANY), port)) self._server.listen(1) logger.info("Socket Mw Server now listening on port " + str(port) + \ " for component " + str(component_name) + ".") def __del__(self): """ Terminate the ports used to accept requests """ if self._client_sockets: logger.info("Closing client sockets...") for s in self._client_sockets: s.close() if self._server: logger.info("Shutting down connections to server...") self._server.shutdown(socket.SHUT_RDWR) logger.info("Closing socket server...") self._server.close() del self._server def main_export(self, encode, component_instance): sockets = self._client_sockets + [self._server] try: inputready, outputready, exceptready = select.select(sockets, sockets, [], 0) except select.error: pass except socket.error: pass if self._server in inputready: sock, addr = self._server.accept() self._client_sockets.append(sock) if outputready != []: message = encode(component_instance) for o in outputready: try: o.send(message) except socket.error: self.close_socket(o) def main_read(self, decode, component_instance): sockets = self._client_sockets + [self._server] try: inputready, outputready, exceptready = select.select(sockets, [], [], 0) except select.error: pass except socket.error: pass for i in inputready: if i == self._server: sock, addr = self._server.accept() if self._client_sockets != []: logger.warning("More than one clients for an actuator!!") self._client_sockets.append(sock) else: try: msg = i.recv(self._message_size) logger.debug("received msg %s" % msg) if msg == b'': self.close_socket(i) else: component_instance.local_data = decode(msg) except socket.error as detail: self.close_socket(i) def close_socket(self, sock): self._client_sockets.remove(sock) try: sock.close() except socket.error as error_info: logger.warning("Socket error catched while closing: " + str(error_info)) class MorseSocketClass(morse.core.middleware.MorseMiddlewareClass): """ External communication using sockets. """ def __init__(self): """ Initialize the socket connections """ # Call the constructor of the parent class super(self.__class__,self).__init__() # port -> MorseSocketServ self._server_dict = {} # component name (string) -> Port (int) self._component_nameservice = {} self._base_port = 60000 # Register two special services in the socket service manager: # TODO To use a new special component instead of 'simulation', # uncomment the line :-) # bge.logic.morse_services.register_request_manager_mapping("streams", "SocketRequestManager") services.do_service_registration(self.list_streams, 'simulation') services.do_service_registration(self.get_stream_port, 'simulation') services.do_service_registration(self.get_all_stream_ports, 'simulation') def list_streams(self): """ List all publish streams. """ return list(self._component_nameservice.keys()) def get_stream_port(self, name): """ Get stream port for stream name. """ port = -1 try: port = self._component_nameservice[name] except KeyError: pass return port def get_all_stream_ports(self): """ Get stream ports for all streams. """ return self._component_nameservice def register_component(self, component_name, component_instance, mw_data): """ Open the port used to communicate by the specified component. """ # Create a socket server for this component serv = MorseSocketServ(self._base_port, component_name) self._server_dict[self
._base_port] = serv self._component_nameservice[component_name] = self._base_port self._base_port = self._base_port + 1 # Ext
ract the information for this middleware # This will be tailored for each middleware according to its needs function_name = mw_data[1] fun = self._check_function_exists(function_name) if fun != None: # Choose what to do, depending on the function being used # Data read functions if function_name == "read_message": component_instance.input_functions.append(partial(MorseSocketServ.main_read, serv, fun)) # Data write functions elif function_name == "post_message": component_instance.output_functions.append(partial(MorseSocketServ.main_export, serv, fun)) # If the function is external and has already been loaded before else: # Pass by mw_data the generated server mw_data.append(serv) self._add_method(mw_data, component_instance) else: # Pass by mw_data the generated server mw_data.append(serv) self._add_method(mw_data, component_instance) def post_message(self, component_instance): return (json.dumps(component_instance.local_data) + '\n').encode() def read_message(self, msg): return json.loads(msg.decode('utf-8')) def print_open_sockets(self): """ Display a list of all currently opened sockets.""" logger.info("Socket Mid: Currently opened sockets:") for name, socket in self._socket_dict.iteritems(): logger.info(" - Port name '{0}' = '{1}'".format(name, socket))
# -*- coding: utf-8 -*- from __
future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('interpreter', '0010_auto_20141215_0027'), ] operations = [ migrations.RemoveField( model_name='band', name='members', ), migrations.AddField( model_name='artist', name='band',
field=models.ManyToManyField(related_name='members', null=True, to='interpreter.Band', blank=True), preserve_default=True, ), ]
# # SPDX-FileCopyrightText: 2016 Dmytro Kolomoiets <amerlyq@gmail.com> and contributors. # # SPDX-License-Identifier: GPL-3.0-only # from miur.cursor import state, update, message as msg class Dispatcher: """Apply actions to any unrelated global states""" def _err_wrong_cmd(self): # Move err processing to 'update.py' (make more symmetrical) # _log.error("Wrong cmd: {}".format(cmd)) raise NotImplementedError def focus_node_next(self): if state.cursor is not None and state.entries is not None: state.cursor = min(state.cursor + 1, len(state.entries) - 1) def focus_node_prev(self): if state.cursor is not None and state.entries is not None: state.cursor = max(state.cursor - 1, 0) def focus_node_beg(self): if state.entries is not None: state.cursor = 0 def focus_node_end(self): if state.entries is not None: state.cursor = len(state.entries) - 1 def shift_node_parent(self): # DEV: combine these multiple queue in single request to *core* # state.path = # TEMP: apply directly to global state # TEMP: send msg and wait until fully processed (send-recv-apply) update.handle(msg.NodeGetParentMsg()) update.handle(msg.ListNodeMsg()) state.cursor = 0 if stat
e.entries else None def shift_node_current(self): if state.cursor is None or state.entries is None: return # WARN: must send both (p, e) for *core* # => to check if (p, e) is still available in fs update.handle(msg.NodeGetChildMsg()) update.handle(msg.Li
stNodeMsg()) state.cursor = 0 if state.entries else None
# # noif : False # # nostate : False # # insec : False # # i_am_s : False # # i_am_d : False # # travers : True # # source-if : eth0 # # source-rn : 1 # # src-linklocal : False # # src-multicast : False # # destin-if : eth1 # # destin-rn : 3 # # dst-linklocal : False # # dst-multicast : False # /sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1" /sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1" echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK# #$IP6I -p tcp --dport 22 -j ACCEPT #$IP6O -p tcp --sport 22 -j ACCEPT # # allow ping and pong always (al gusto) #$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT #$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT ## #$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT #$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT # #ICMPv6types="${ICMPv6types} destination-unreachable" ICMPv6types="${ICMPv6types} echo-request" ICMPv6types="${ICMPv6types} echo-reply" ICMPv6types="${ICMPv6types} neighbour-solicitation" ICMPv6types="${ICMPv6types} neighbour-advertisement" ICMPv6types="${ICMPv6types} router-solicitation" ICMPv6types="${ICMPv6types} router-advertisement" for icmptype in $ICMPv6types do $IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT $IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT done $IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60 $IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT # CHAINS="" CHAINS="$CHAINS input__" CHAINS="$CHAINS output_" CHAINS="$CHAINS forward" #set -x for chain in $CHAINS do /sbin/ip6tables -E "${chain}_act" "${chain}_old" /sbin/ip6tables -E "${chain}_new" "${chain}_act" done # $I6 -F INPUT $I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6 $I6 -A INPUT -m rt --rt-type 0 -j DROP $I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6 $I6 -A INPUT -m rt --rt-type 2 -j DROP $I6 -A INPUT -i lo -j ACCEPT $I6 -A INPUT --jump input___act # $I6 -F OUTPUT $I6 -A OUTPUT -o lo -j ACCEPT $I6 -A OUTPUT --jump output__act # $I6 -F FORWARD $I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6 $I6 -A FORWARD -m rt --rt-type 0 -j DROP $I6 -A FORWARD --jump forward_act # for chain in $CHAINS do /sbin/ip6tables -F "${chain}_old" /sbin/ip6tables -X "${chain}_old" done $I6 -F logdrop >/dev/null 2>/dev/null $I6 -X logdrop >/dev/null 2>/dev/null $I6 -N logdrop $I6 -A INPUT --jump logdrop $I6 -A OUTPUT --jump logdrop $I6 -A FORWARD --jump logdrop $I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6 $I6 -A logdrop -j DROP # /sbin/ip6tables-save -c >/root/last-filter echo "**********************************************************************" echo "**********************************************************************" echo "## ##" echo "## End of generated filter-rules ##" echo "## ##" echo "**********************************************************************" echo "**********************************************************************" # EOF """ expect = temp.replace("HOME_DIR", home_dir_replacement) value_len = len(value) expect_len = len(expect) self.assertEquals(expect_len, value_len) self.assertEquals(expect, value) def test_15_IP6_Filter_mach_output_as_real_file(self): """ ft-15 IP6 Filter mach_output as real file """ debug = True name = "adm6" mach_dir = "~/adm6/desc/%s" % (name) path = homedir(mach_dir) os = "Debian GNU/Linux" fwd = False asym = True ofilename = None fi = IP6_Filter(debug, path, name, os, fwd, asym, None) self.assertIsInstance(fi, IP6_Filter) rule = [] rule.append("should be RuleText") # RuleText ru
le.append(True) # System-Fwd rule.append(1) # Rule-Nr. rule.append(1) # Pair-Nr. rule.append(False) # i_am_s rule.append(True) # i_am_d rule.append(IPv6Network('2001:db8:1::1')) # source rule.append(IPv6Network('2001:d
b8:2::11')) # destin rule.append('eth0') # source-if rule.append(1) # source-rn rule.append('eth1') # destin-if rule.append(3) # destin-rn rule.append('udp') # protocol rule.append('4711') # dport rule.append('accept') # action rule.append('NONEW NOIF INSEC') # options at last fi.rules.append(rule) fi.mach_output(ofilename) value = fi.msg temp = """#!/bin/bash # echo "**********************************************************************" echo "**********************************************************************" echo "## ##" echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##" echo "## ##" echo "## version: 0.2 ##" echo "## ##" echo "## device-name: adm6 ##" echo "## device-type: Debian GNU/Linux ##" echo "## ##" echo "## date: 2013-03-18 23:38 ##" echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##" echo "## ##" echo "## license: GNU general public license version 3 ##" echo "## or any later version ##" echo "## ##" echo "**********************************************************************" echo "**********************************************************************" echo "## ##" echo "## some magic abbreviations follow ##" echo "## ##" # #POLICY_A='ACCEPT' POLICY_D='DROP' # I6='/sbin/ip6tables ' IP6I='/sbin/ip6tables -A input___new ' IP6O='/sbin/ip6tables -A output__new ' IP6F='/sbi
""" Classes and functions for interacting with system management daemons. arkOS Core (c) 2016 CitizenWeb Written by Jacob Cook Licensed under GPLv3, see LICENSE.md """ import ldap import ldap.modlist import xmlrpc.client from .utilities import errors from dbus import SystemBus, Interface class ConnectionsManager: """Manages arkOS connections to system-level processes via their APIs.""" def __init__(self, config, secrets): self.config = config self.secrets = secrets def connect(self): """Initialize the connections.""" self.connect_services() self.connect_ldap() def connect_services(sel
f):
self.DBus = SystemBus() self.SystemD = self.SystemDConnect( "/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager") self.Supervisor = supervisor_connect() def connect_ldap(self): self.LDAP = ldap_connect( config=self.config, passwd=self.secrets.get("ldap") ) def SystemDConnect(self, path, interface): systemd = self.DBus.get_object("org.freedesktop.systemd1", path) return Interface(systemd, dbus_interface=interface) def ldap_connect( uri="", rootdn="", dn="cn=admin", config=None, passwd="", conn_type=""): """ Initialize a connection to arkOS LDAP. :param str uri: LDAP host URI :param str rootdn: Root DN :param str dn: User DN :param Config config: arkOS config to use for default values :param str passwd: Password to use to validate credentials :returns: LDAP connection object """ if not all([uri, rootdn, dn]) and not config: raise errors.InvalidConfigError("No LDAP values passed") uri = uri or config.get("general", "ldap_uri") rootdn = rootdn or config.get("general", "ldap_rootdn") conn_type = conn_type or config.get("general", "ldap_conntype") if conn_type == "dynamic": c = ldap.ldapobject.ReconnectLDAPObject( uri, retry_max=3, retry_delay=5.0) else: c = ldap.initialize(uri) try: c.simple_bind_s("{0},{1}".format(dn, rootdn), passwd) except ldap.INVALID_CREDENTIALS: raise errors.ConnectionError("LDAP", "Invalid username/password") except Exception as e: raise errors.ConnectionError("LDAP") from e if dn != "cn=admin": data = c.search_s("cn=admins,ou=groups,{0}".format(rootdn), ldap.SCOPE_SUBTREE, "(objectClass=*)", ["member"])[0][1]["member"] if "{0},{1}".format(dn, rootdn) not in data: raise errors.ConnectionError("LDAP", "Not an administrator") return c def supervisor_connect(): """ Initialize a connection to Supervisor via XML-RPC API. :returns: XML-RPC connection object """ try: s = xmlrpc.client.Server("http://localhost:9001/RPC2") return s.supervisor except Exception as e: raise errors.ConnectionError("Supervisor") from e
"""Commands for argparse for basket command""" import textwrap from PyBake import Path from PyBake.commands import command @command("basket") class BasketModuleManager: """Module Manager for Basket""" longDescription = textwrap.dedent( """ Retrieves pastries from the shop. """) def createArguments(self, basketParser): basketParser.add_argument("shoppingList", nargs="?", default=Path("shoppingList.py"), type=Path, help="The shopping list script that describes which pastries are required. " "Default: 'shoppingList.py'") basketParser.add_argument("--force-download", dest="force", action="append_const", const="download", help="Download all required pastries, whether they exist locally already or not.") basketParser.add_argument("--force-install", dest="force",
action="append_const", const="install", help="Perform an install, regardless whether the pastry is already installed or not.") basketParser.add_argument("--force", dest="force", action="append_const", const="all", help="Implies --force-download and --force-install.") bas
ketParser.set_defaults(func=execute_basket) def execute_basket(args): """Execute the `basket` command.""" from PyBake import log force = args.force or [] del args.force args.forceDownload = any(arg in ("all", "download") for arg in force) args.forceInstall = any(arg in ("all", "install") for arg in force) log.debug(args) from PyBake import basket return basket.run(**vars(args))
#! /usr/bin/python3 # # This source code is part of icgc, an ICGC processing pipeline. # # Icgc is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Icgc is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see<http://www.gnu.org/licenses/>. # # Contact: ivana.mihalek@gmail.com # # some pathways do not have the associated genes listed, probably by mistake # examples: # R-HSA-1483171 | Synthesis of BMP # R-HSA-2408499 | Formation of selenosugars for excretion from icgc_utils.common_queries import quotify from icgc_utils.reactome import * from config import Config ############ def print_genes(cursor, gene_ids, depth): if len(gene_ids)<1: print("\t"*depth, "no genes listed") return #print("\t"*depth, "print genes here") gene_id_string = ",".join([quotify(z) for z in gene_ids]) qry = "select ensembl_gene_id, approved_name from hgnc where ensembl_gene_id in (%s)" % gene_id_string gene_names = dict(hard_landing_search(cursor, qry)) qry = "select ensembl_gene_id, ap
proved_symbol from hgnc where ensembl_gene_id in (%s)" % gene_id_string gene_symbols = dict(hard_landing_search(cursor, qry)) for gene in gene_ids: print("\t"*de
pth, gene_symbols.get(gene,""), gene_names.get(gene,"")) return ############## def characterize_subtree(cursor, graph, pthwy_id, gene_groups, depth, verbose=True): # this is the whole subtree # children = [node for node in nx.dfs_preorder_nodes(graph, pthwy_id)] # A successor of n is a node m such that there exists a directed edge from n to m. children = [node for node in graph.successors(pthwy_id)] if len(children)==0: return False node_id_string = ",".join([quotify(z) for z in children]) qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)" children_names = hard_landing_search(cursor, qry_template % node_id_string) for child_id, child_name in children_names: # number_of_genes = genes related to nodes without descendants genes = genes_in_subgraph(cursor, graph, child_id) if verbose: print("\t"*depth, child_id, child_name, len(genes)) if len(genes)<100: if verbose: print_genes(cursor, genes, depth+1) gene_groups[child_name] = genes continue if not characterize_subtree(cursor, graph, child_id, gene_groups, depth+1, verbose=verbose): # no further subdivisions if verbose: print_genes(cursor, genes, depth+1) gene_groups[child_name] = genes continue return True ######################################### import numpy as np from matplotlib import pyplot as plt def hist_plot(gene_groups): data = [len(gene_list) for gene_list in list(gene_groups.values())] # fixed bin size bins = np.arange(0, 505, 5) # fixed bin size plt.xlim(0,500) plt.hist(data, bins=bins, alpha=0.5) # plt.title('') plt.xlabel('number of genes in group (bin size = 5)') plt.ylabel('number of groups') # plt.show() #################################################### def main(): verbose = False db = connect_to_mysql(Config.mysql_conf_file) cursor = db.cursor() switch_to_db(cursor, 'icgc') # are there children with multiple parents? Yes. So I need some kind of # directed graph, rather tha a tree. qry = "select child, count(distinct parent) as ct from reactome_hierarchy " qry += "group by child having ct>1" ret = search_db(cursor, qry) print("number of children with multiple parents:", len(ret)) # feed the parent/child pairs as edges into graph graph = build_reactome_graph(cursor, verbose=True) # candidate roots zero_in_degee_nodes = get_roots(graph) node_id_string = ",".join([quotify(z) for z in zero_in_degee_nodes]) qry_template = "select * from reactome_pathways where reactome_pathway_id in (%s)" root_names = hard_landing_search(cursor, qry_template% node_id_string) gene_groups = {} for pthwy_id, name in root_names: if "disease" in name.lower(): continue if verbose: print(pthwy_id, name) characterize_subtree(cursor, graph, pthwy_id, gene_groups, 1, verbose=verbose) print("\n===========================") max_group=0 for group, genes in gene_groups.items(): groupsize = len(genes) if max_group< groupsize: max_group=groupsize print (group, len(genes)) print("\n===========================") print("number of groups", len(gene_groups)) print("largest group", max_group) print("\n===========================") for pthwy_name, genes in gene_groups.items(): if len(genes)<=150: continue print("\n",pthwy_name, len(genes)) #print_genes(cursor, genes, 1) #hist_plot(gene_groups) cursor.close() db.close() ######################################### if __name__ == '__main__': main()
ile) return set( os.path.realpath(os.path.join(util.GetChromiumSrcDir(), os.pardir, path)) for path in deps_paths) def FindPythonDependencies(module_path): logging.info('Finding Python dependencies of %s' % module_path) # Load the module to inherit its sys.path modifications. imp.load_source( os.path.splitext(os.path.basename(module_path))[0], module_path) # Analyze the module for its imports. finder = modulefinder.ModuleFinder() finder.run_script(module_path) # Filter for only imports in Chromium. for module in finder.modules.itervalues(): # If it's an __init__.py, module.__path__ gives the package's folder. module_path = module.__path__[0] if module.__path__ else module.__file__ if not module_path: continue module_path = os.path.realpath(module_path) if not _InDirectory(module_path, util.GetChromiumSrcDir()): continue yield module_path def FindPageSetDependencies(base_dir): logging.info('Finding page sets in %s' % base_dir) # Add base_dir to path so our imports relative to base_dir will work. sys.path.append(base_dir) tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark, index_by_class_name=True) for test_class in tests.itervalues(): test_obj = test_class() # Ensure the test's default options are set if needed. parser = optparse.OptionParser() test_obj.AddCommandLineArgs(parser) options = optparse.Values() for k, v in parser.get_default_values().__dict__.iteritems(): options.ensure_value(k, v) # Page set paths are relative to their runner script, not relative to us. util.GetBaseDir = lambda: base_dir # TODO: Loading the page set will automatically download its Cloud Storage # deps. This is really expensive, and we don't want to do this by default. page_set = test_obj.CreatePageSet(options) # Add all of its serving_dirs as dependencies. for serving_dir in page_set.serving_dirs: yield serving_dir for page in page_set: if page.is_file: yield page.serving_dir def FindExcludedFiles(files, options): def MatchesConditions(path, conditions): for condition in conditions: if condition(path): return True return False # Define some filters for files. def IsHidden(path): for pathname_component in path.split(os.sep): if pathname_component.startswith('.'): return True return False def IsPyc(path): return os.path.splitext(path)[1] == '.pyc' def IsInCloudStorage(path): return os.path.exists(path + '.sha1') def MatchesExcludeOptions(path): for pattern in options.exclude: if (fnmatch.fnmatch(path, pattern) or fnmatch.fnmatch(os.path.basename(path), pattern)): return True return False # Collect filters we're going to use to exclude files. exclude_conditions = [ IsHidden, IsPyc, IsInCloudStorage, MatchesExcludeOptions, ] # Check all the files against the filters. for path in files: if MatchesConditions(path, exclude_conditions): yield path def FindDependencies(paths, options): # Verify arguments. for path in paths: if not os.path.exists(path): raise ValueError('Path does not exist: %s' % path) dependencies = path_set.PathSet() # Including __init__.py will include Telemetry and its dependencies. # If the user doesn't pass any arguments, we just have Telemetry. dependencies |= FindPythonDependencies(os.path.realpath( os.path.join(util.GetTelemetryDir(), 'telemetry', '__init__.py'))) dependencies |= FindBootstrapDependencies(util.GetTelemetryDir()) # Add dependencies. for path in paths: base_dir = os.path.dirname(os.path.realpath(path)) dependencies.add(base_dir) dependencies |= FindBootstrapDependencies(base_dir) dependencies |= FindPythonDependencies(path) if options.include_page_set_data: dependencies |= FindPageSetDependencies(base_dir) # Remove excluded files. dependencies -= FindExcludedFiles(set(dependencies), options) return dependencies def ZipDependencies(paths, dependencies, options): base_dir = os.path.dirname(os.path.realpath(util.GetChromiumSrcDir())) with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file: # Add dependencies to archive. for path in dependencies: path_in_archive = os.path.join( 'telemetry', os.path.relpath(path, base_dir)) zip_file.write(path, path_in_archive) # Add symlinks to executable paths, for ease of use. for path in paths: link_info = zipfile.ZipInfo( os.path.join('telemetry', os.path.basename(path))) link_info.create_system = 3 # Unix attributes. # 010 is regular file, 0111 is the permission bits rwxrwxrwx. link_info.external_attr = 0100777 << 16 # Octal. relative_path = os.path.relpath(path, base_dir) link_script = ( '#!/usr/bin/env python\n\n' 'import os\n' 'import sys\n\n\n' 'script = os.path.join(os.path.dirname(__file__), \'%s\')\n' 'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])' % relative_path) zip_file.writestr(link_info, link_script) # Add gsutil to the archive, if it's available. The gsutil in # depot_tools is modified to allow authentication using prodaccess. # TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps # will include it. Then there will be two copies of gsutil at the sa
me # location in the archive. This can be confusing for users. gsutil_path = os.path.realpath(cloud_storage.FindGsutil()) if cloud_storage.SupportsProdaccess(gsutil_path): gsutil_base_dir = os.path.join(
os.path.dirname(gsutil_path), os.pardir) gsutil_dependencies = path_set.PathSet() gsutil_dependencies.add(os.path.dirname(gsutil_path)) # Also add modules from depot_tools that are needed by gsutil. gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto')) gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib')) gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator')) gsutil_dependencies -= FindExcludedFiles( set(gsutil_dependencies), options) # Also add upload.py to the archive from depot_tools, if it is available. # This allows us to post patches without requiring a full depot_tools # install. There's no real point in including upload.py if we do not # also have gsutil, which is why this is inside the gsutil block. gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py')) for path in gsutil_dependencies: path_in_archive = os.path.join( 'telemetry', os.path.relpath(util.GetTelemetryDir(), base_dir), 'third_party', os.path.relpath(path, gsutil_base_dir)) zip_file.write(path, path_in_archive) class FindDependenciesCommand(command_line.OptparseCommand): """Prints all dependencies""" @classmethod def AddCommandLineArgs(cls, parser): parser.add_option( '-v', '--verbose', action='count', dest='verbosity', help='Increase verbosity level (repeat as needed).') parser.add_option( '-p', '--include-page-set-data', action='store_true', default=False, help='Scan tests for page set data and include them.') parser.add_option( '-e', '--exclude', action='append', default=[], help='Exclude paths matching EXCLUDE. Can be used multiple times.') parser.add_option( '-z', '--zip', help='Store files in a zip archive at ZIP.') @classmethod def ProcessCommandLineArgs(cls, parser, args): if args.verbosity >= 2: logging.getLogger().setLevel(logging.DEBUG) elif args.verbosity: logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) def Run(self, args): paths = args.positional_args dependencies = FindDependencies(paths, args) if args.zip: ZipDependencies(paths, dependencies, args) print 'Zip archive writte
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- # This file is part of Guadalinex # # This software is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this package; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA __author__ = "Antonio Hernández <ahernandez@emergya.com>" __copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>" __license__ = "GPL-2" import firstboot.serverconf from ChefConf import ChefConf from GCCConf import GCCConf from AuthConf import AuthConf from DateSyncConf import DateSyncConf from UsersConf import UsersConf class Singleton: """ A non-thread-safe helper class to ease implementing singletons. This should be used as a decorator -- not a metaclass -- to the class that should be a singleton. The decorated class can define one `__init__` function that takes only the `self` argument. Other than that, there are no restrictions that apply to the decorated class. To get the singleton instance, use the `Instance` method. Trying to use `__call__` will result in a `TypeError` being raised. Limitations: The decorated class cannot be inherited from. """ def __init__(self, decorated): self._decorated = decorated def Instance(self): """ Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned. """ try: return self._instance except AttributeError: self._instance = self._decorated() return self._instance def __call__(self): raise TypeError('Singletons must be accessed through `Instance()`.') def __instancecheck__(self, inst): return isinstance(inst, self._decorated) @Singleton class ServerConf(): # Version of the configuration JSON file def __init__(self): self._data = {} self.VERSION = '0.2.0' self._data['gem_repo'] = 'http://rubygems.org' self._data['version'] = self.VERSION self._data['organization'] = '' self._chef_conf = ChefConf() self._gcc_conf = GCCConf() self._auth_conf = AuthConf() self._ntp_conf = DateSyncConf() self._users_conf = UsersConf() def load_data(self, conf): msg = 'ServerConf: Key "%s" not found in the configuration file.' try: v = conf['version'] if v != self.VERSION: print 'WARNING: ServerConf and AUTOCONFIG_JSON version mismatch!' except KeyError as e: print msg % ('version',) try: self.set_organization(conf['organization']) except KeyError as e:
print msg % ('organization',) try: self.set_notes(conf['notes']) except KeyError as e:
print msg % ('notes',) try: self.set_gem_repo(conf['gem_repo']) except KeyError as e: print msg % ('gem_repo',) try: self._chef_conf.load_data(conf['chef']) except KeyError as e: print msg % ('chef',) try: self._gcc_conf.load_data(conf['gcc']) except KeyError as e: print msg % ('gcc',) try: self._auth_conf.load_data(conf['auth']) except KeyError as e: print msg % ('auth',) try: self._ntp_conf.load_data(conf['uri_ntp']) except KeyError as e: print msg % ('ntp',) def validate(self): valid = len(self._data['version']) > 0 \ and self._chef_conf.validate() \ and self._auth_conf.validate() \ and self._ntp_conf.validate() \ and self._gcc_conf.validate() return valid def set_gem_repo(self, repo): self._data['gem_repo'] = repo return self def get_gem_repo(self): return self._data['gem_repo'].encode('utf-8') def get_version(self): return self._data['version'].encode('utf-8') def set_version(self, version): self._data['version'] = version return self def get_organization(self): return self._data['organization'].encode('utf-8') def set_organization(self, organization): self._data['organization'] = organization return self def get_notes(self): return self._data['notes'].encode('utf-8') def set_notes(self, notes): self._data['notes'] = notes return self def get_auth_conf(self): return self._auth_conf def get_chef_conf(self): return self._chef_conf def get_ntp_conf(self): return self._ntp_conf def get_gcc_conf(self): return self._gcc_conf def get_users_conf(self): return self._users_conf def set_auth_conf(self, auth_conf): self._auth_conf = auth_conf return self def set_chef_conf(self, chef_conf): self._chef_conf = chef_conf return self def set_ntp_conf(self, ntp_conf): self._ntp_conf = ntp_conf return self def set_gcc_conf(self, gcc_conf): self._gcc_conf = gcc_conf return gcc_conf def set_users_conf(self, user_conf): self._users_conf = user_conf return self
VShowID, IMDb, Title, Rating, Votes, TVDB, TMDB #episode: EpisodeID, IMDb, Title, Rating, Votes, TVDB, TMDB, season, episode global num_threads if IMDb == None or IMDb == "" or "tt" not in IMDb: IMDb = None Top250 = None if dType == "movie": Top250 = TVDB if Top250 == None: Top250 = 0 TVDB = None defaultLog( addonLanguage(32507) % ( Title, IMDb, TVDB, TMDB ) ) if IMDb == None: if dType == "tvshow" or dType == "episode": (IMDb, statusInfo) = get_IMDb_ID_from_theTVDB(dType, TVDB) if IMDb == None: (IMDb, add_statusInfo) = get_IMDb_ID_from_TMDb(dType, TMDB, season, episode) statusInfo = statusInfo + "\n" + add_statusInfo elif dType == "movie": statusInfo = "Missing IMDb ID" if IMDb == None: defaultLog( addonLanguage(32503) % ( Title ) ) flock.acquire() try: statusLog( Title + ":\n" + statusInfo ) finally: flock.release() lock.acquire() num_threads -= 1 lock.release() return (updatedRating, updatedVotes, updatedTop250, statusInfo) = parse_IMDb_page(IMDb) if updatedRating == None: defaultLog( addonLanguage(32503) % ( Title ) ) flock.acquire() try: statusLog( Title + ":\n" + statusInfo ) finally: flock.release() else: Rating = str( float( ( "%.1f" % Rating ) ) ) Votes = '{:,}'.format( int ( Votes ) ) defaultLog( addonLanguage(32499) % ( Rating, Votes, Top250 ) ) if (dType != "movie"): updatedTop250 = None if Rating != updatedRating or ( Votes != updatedVotes and \ ((dType == "movie" and IncludeMoviesVotes == "true" ) or ((dType == "tvshow" or dType == "episode") and IncludeTVShowsVotes == "true")) or \ ( dType == "movie" and (Top250 != updatedTop250) and IncludeMoviesTop250 == "true" )): if (dType == "movie"): jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetMovieDetails","params":{"movieid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","top250":' + str( updatedTop250 ) + '},"id":1}' elif (dType == "tvshow"): jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetTVShowDetails","params":{"tvshowid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","uniqueid": {"imdb": "' + IMDb + '"}},"id":1}' elif (dType == "episode"): jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.SetEpisodeDetails","params":{"episodeid":' + str( dbID ) + ',"rating":' + str( updatedRating ) + ',"votes":"' + str( updatedVotes ) + '","uniqueid": {"imdb": "' + IMDb + '"}},"id":1}' debugLog( "JSON Query: " + jSonQuery ) jSonResponse = xbmc.executeJSONRPC( jSonQuery ) jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' ) debugLog( "JSON Response: " + jSonResponse ) defaultLog(
addonLanguage(32500) % ( Title,
str( updatedRating ), str( updatedVotes ), str( updatedTop250 ) ) ) else: defaultLog( addonLanguage(32502) % ( Title ) ) lock.acquire() num_threads -= 1 lock.release() return class Movies: def __init__( self ): defaultLog( addonLanguage(32255) ) statusLog( "\n" + "--> " + addonLanguage(32255).rsplit(' ', 1)[0] ) if ShowNotifications == "true": doNotify( addonLanguage(32255), 5000 ) xbmc.sleep(5000) self.AllMovies = [] self.getDBMovies() self.lock = allocate_lock() self.flock = allocate_lock() self.doUpdate() defaultLog( addonLanguage(32258) ) if ShowNotifications == "true": doNotify( addonLanguage(32258), 5000 ) xbmc.sleep(5000) def getDBMovies( self ): jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetMovies","params":{"properties":["imdbnumber","rating","votes","top250","playcount"]},"id":1}' debugLog( "JSON Query: " + jSonQuery ) jSonResponse = xbmc.executeJSONRPC( jSonQuery ) jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' ) debugLog( "JSON Response: " + jSonResponse ) jSonResponse = jSon.loads( jSonResponse ) try: if jSonResponse['result'].has_key( 'movies' ): for item in jSonResponse['result']['movies']: MovieID = item.get('movieid'); IMDb = item.get('imdbnumber'); Title = item.get('label'); Rating = item.get('rating'); Votes = item.get('votes'); Top250 = item.get('top250'); Watched = item.get('playcount'); self.AllMovies.append( ( MovieID, IMDb, Title, Rating, Votes, Top250, Watched ) ) except: pass def doUpdate( self ): global num_threads AllMovies = len( self.AllMovies ); Counter = 0; if ShowProgress == "true": Progress = xbmcgui.DialogProgressBG() Progress.create( addonLanguage(32261) ) for Movie in self.AllMovies: while num_threads > max_threads: xbmc.sleep(500) if ShowProgress == "true": Counter = Counter + 1 Progress.update( (Counter*100)/AllMovies, addonLanguage(32261), Movie[2] ) if int(Movie[6]) > 0 and ExcludeWatched == "true": defaultLog( addonLanguage(32504) % ( Movie[2] ) ) continue start_new_thread(thread_parse_IMDb_page,("movie",Movie[0],Movie[1],Movie[2],Movie[3],Movie[4],Movie[5],"","","",self.lock,self.flock)) self.lock.acquire() num_threads += 1 self.lock.release() while num_threads > 0: xbmc.sleep(500) if ShowProgress == "true": Progress.close() class TVShows: def __init__( self ): defaultLog( addonLanguage(32256) ) statusLog( "\n" + "--> " + addonLanguage(32256).rsplit(' ', 1)[0] ) if ShowNotifications == "true": doNotify( addonLanguage(32256), 5000 ) xbmc.sleep(5000) self.AllTVShows = [] self.getDBTVShows() self.lock = allocate_lock() self.flock = allocate_lock() self.doUpdateTVShows() defaultLog( addonLanguage(32259) ) if ShowNotifications == "true": doNotify( addonLanguage(32259), 5000 ) xbmc.sleep(5000) def getDBTVShows( self ): jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetTVShows","params":{"properties":["uniqueid","rating","votes","playcount"]},"id":1}' debugLog( "JSON Query: " + jSonQuery ) jSonResponse = xbmc.executeJSONRPC( jSonQuery ) jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' ) debugLog( "JSON Response: " + jSonResponse ) jSonResponse = jSon.loads( jSonResponse ) try: if jSonResponse['result'].has_key( 'tvshows' ): for item in jSonResponse['result']['tvshows']: TVShowID = item.get('tvshowid'); unique_id = item.get('uniqueid'); imdb_id = unique_id.get('imdb'); Title = item.get('label'); Rating = item.get('rating'); Votes = item.get('votes'); tvdb_id = unique_id.get('tvdb'); Watched = item.get('playcount'); tmdb_id = unique_id.get('tmdb'); self.AllTVShows.append( ( TVShowID, imdb_id, Title, Rating, Votes, tvdb_id, Watched, tmdb_id ) ) except: pass def doUpdateEpisodes( self, tvshowid, tvshowtitle, tvshowtmdb_id, PCounter ): global num_threads jSonQuery = '{"jsonrpc":"2.0","method":"VideoLibrary.GetEpisodes","params":{"tvshowid":' + str( tvshowid ) + ', "properties":["uniqueid","rating","votes","playcount","episode","season"]},"id":1}' debugLog( "JSON Query: " + jSonQuery ) jSonResponse = xbmc.executeJSONRPC( jSonQuery ) jSonResponse = unicode( jSonResponse, 'utf-8', errors='ignore' ) debugLog( "JSON Response: " + jSonResponse ) jSonResponse = jSon.loads( jSonResponse ) try: if jSonResponse['result'].has_key( 'episodes' ): for item in jSonResponse['result']['episodes']: while num_threads > max_threads: xbmc.sleep(500) EpisodeID = item.get('episodeid'); unique_id = item.get('uniqueid'); IMDb = unique_id.get('imdb') Title = tvshowtitle + " " + str( item.get('season') ) + "x" + str( "%02d" % item.get('episode') ); Rating = item.get('rating'); Votes = item.get('votes'); Watched = item.get('playcount'); TVDB = unique_id.get('tvdb') if ShowProgress == "true": self.Progress.update( PCounter, addonLanguage(32262), Title ) if int(Watched) > 0 and ExcludeWatched == "true": defaultLog( addonLanguage(32504) % ( Title ) ) continue start_new_thread(thread_parse_IMDb_page,("episode",EpisodeID,IMDb,Title,Rating,Votes,TVDB,tvshowtmdb_id,item.get('season'),item.get('episode'),self.lock,self.flock)) self.lock.acquire() num_threads += 1 self.lock.release() except: pass def doUpdateTVShows( self ): global num_t
d also serving static from the same directory. """ import os from path import Path as path from tempfile import mkdtemp from openedx.core.release import RELEASE_LINE CONFIG_ROOT = path(__file__).abspath().dirname() TEST_ROOT = CONFIG_ROOT.dirname().dirname() / "test_root" ########################## Prod-like settings ################################### # These should be as close as possible to the settings we use in production. # As in prod, we read in environment and auth variables from JSON files. # Unlike in prod, we use the JSON files stored in this repo. # This is a convenience for ensuring (a) that we can consistently find the files # and (b) that the files are the same in Jenkins as in local dev. os.environ['SERVICE_VARIANT'] = 'bok_choy' os.environ['CONFIG_ROOT'] = CONFIG_ROOT from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import ######################### Testing overrides #################################### # Redirect to the test_root folder within the repo GITHUB_REPO_ROOT = (TEST_ROOT / "data").abspath() LOG_DIR = (TEST_ROOT / "log").abspath() # Configure modulestore to use the test folder within the repo update_module_store_settings( MODULESTORE, module_store_options={ 'fs_root': (TEST_ROOT / "data").abspath(), }, xml_store_options={ 'data_dir': (TEST_ROOT / "data").abspath(), }, default_store=os.environ.get('DEFAULT_STORE', 'draft'), ) ############################ STATIC FILES ############################# # Enable debug so that static assets are served by Django DEBUG = True # Serve static files at /static directly from the staticfiles directory under test root # Note: optimized files for testing are generated with settings from test_static_optimized STATIC_URL = "/static/" STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', ) STATICFILES_DIRS = [ (TEST_ROOT / "staticfiles" / "lms").abspath(), ] DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' MEDIA_ROOT = TEST_ROOT / "uploads" # Webpack loader must use webpack output setting WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = TEST_ROOT / "staticfiles" / "lms" / "webpack-stats.json" # Don't use compression during tests PIPELINE_JS_COMPRESSOR = None ################################# CELERY ###################################### CELERY_ALWAYS_EAGER = True CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend' BLOCK_STRUCTURES_SETTINGS = dict( # We have CELERY_ALWAYS_EAGER set to True, so there's no asynchronous # code running and the celery routing is unimportant. # It does not make sense to retry. TASK_MAX_RETRIES=0, # course publish task delay is irrelevant is because the task is run synchronously COURSE_PUBLISH_TASK_DELAY=0, # retry delay is irrelevent because we never retry TASK_DEFAULT_RET
RY_DELAY=0, ) ###################### Grade Downloads #
##################### GRADES_DOWNLOAD = { 'STORAGE_TYPE': 'localfs', 'BUCKET': 'edx-grades', 'ROOT_PATH': os.path.join(mkdtemp(), 'edx-s3', 'grades'), } # Configure the LMS to use our stub XQueue implementation XQUEUE_INTERFACE['url'] = 'http://localhost:8040' # Configure the LMS to use our stub EdxNotes implementation EDXNOTES_PUBLIC_API = 'http://localhost:8042/api/v1' EDXNOTES_INTERNAL_API = 'http://localhost:8042/api/v1' EDXNOTES_CONNECT_TIMEOUT = 10 # time in seconds EDXNOTES_READ_TIMEOUT = 10 # time in seconds NOTES_DISABLED_TABS = [] # Silence noisy logs import logging LOG_OVERRIDES = [ ('track.middleware', logging.CRITICAL), ('edxmako.shortcuts', logging.ERROR), ('dd.dogapi', logging.ERROR), ('edx.discussion', logging.CRITICAL), ] for log_name, log_level in LOG_OVERRIDES: logging.getLogger(log_name).setLevel(log_level) # Enable milestones app FEATURES['MILESTONES_APP'] = True # Enable oauth authentication, which we test. FEATURES['ENABLE_OAUTH2_PROVIDER'] = True # Enable pre-requisite course FEATURES['ENABLE_PREREQUISITE_COURSES'] = True # Enable Course Discovery FEATURES['ENABLE_COURSE_DISCOVERY'] = True # Enable student notes FEATURES['ENABLE_EDXNOTES'] = True # Enable teams feature FEATURES['ENABLE_TEAMS'] = True # Enable custom content licensing FEATURES['LICENSING'] = True # Use the auto_auth workflow for creating users and logging them in FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True # Open up endpoint for faking Software Secure responses FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True ########################### Entrance Exams ################################# FEATURES['ENTRANCE_EXAMS'] = True FEATURES['ENABLE_SPECIAL_EXAMS'] = True # Point the URL used to test YouTube availability to our stub YouTube server YOUTUBE_PORT = 9080 YOUTUBE['TEST_TIMEOUT'] = 5000 YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT) YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT) YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT) ############################# SECURITY SETTINGS ################################ # Default to advanced security in common.py, so tests can reset here to use # a simpler security model FEATURES['ENFORCE_PASSWORD_POLICY'] = False FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False FEATURES['SQUELCH_PII_IN_LOGS'] = False FEATURES['PREVENT_CONCURRENT_LOGINS'] = False FEATURES['ADVANCED_SECURITY'] = False FEATURES['ENABLE_MOBILE_REST_API'] = True # Show video bumper in LMS FEATURES['ENABLE_VIDEO_BUMPER'] = True # Show video bumper in LMS FEATURES['SHOW_BUMPER_PERIODICITY'] = 1 PASSWORD_MIN_LENGTH = None PASSWORD_COMPLEXITY = {} # Enable courseware search for tests FEATURES['ENABLE_COURSEWARE_SEARCH'] = True # Enable dashboard search for tests FEATURES['ENABLE_DASHBOARD_SEARCH'] = True # discussion home panel, which includes a subscription on/off setting for discussion digest emails. FEATURES['ENABLE_DISCUSSION_HOME_PANEL'] = True # Enable support for OpenBadges accomplishments FEATURES['ENABLE_OPENBADGES'] = True # Use MockSearchEngine as the search engine for test scenario SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine" # Path at which to store the mock index MOCK_SEARCH_BACKING_FILE = ( TEST_ROOT / "index_file.dat" ).abspath() # Verify student settings VERIFY_STUDENT["SOFTWARE_SECURE"] = { "API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB", "API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", } # this secret key should be the same as cms/envs/bok_choy.py's SECRET_KEY = "very_secret_bok_choy_key" # Set dummy values for profile image settings. PROFILE_IMAGE_BACKEND = { 'class': 'storages.backends.overwrite.OverwriteStorage', 'options': { 'location': os.path.join(MEDIA_ROOT, 'profile-images/'), 'base_url': os.path.join(MEDIA_URL, 'profile-images/'), }, } # Make sure we test with the extended history table FEATURES['ENABLE_CSMH_EXTENDED'] = True INSTALLED_APPS += ('coursewarehistoryextended',) BADGING_BACKEND = 'lms.djangoapps.badges.backends.tests.dummy_backend.DummyBackend' # Configure the LMS to use our stub eCommerce implementation ECOMMERCE_API_URL = 'http://localhost:8043/api/v2/' LMS_ROOT_URL = "http://localhost:8000" if RELEASE_LINE == "master": # On master, acceptance tests use edX books, not the default Open edX books. HELP_TOKENS_BOOKS = { 'learner': 'http://edx.readthedocs.io/projects/edx-guide-for-students', 'course_author': 'http://edx.readthedocs.io/projects/edx-partner-course-staff', } # TODO: TNL-6546: Remove this waffle and flag code. from django.db.utils import ProgrammingError from waffle.models import Flag try: flag, created = Flag.objects.get_or_create(name='unified_course_view') WAFFLE_OVERRIDE = True except ProgrammingError: # during initial reset_db, the table for the flag doesn't yet exist. pass ##################################################################### # Lastly, see if the developer has any local overrides. try: from .private import * # pylint: disable=import-error exce
import os ADDRESS = '127.0.0.1' PORT = 12345 BACKUP_DIR = 'Backup' BASE_P
ATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
import os import requests if __name__ == "__main__": session = requests.Session() data = {"email": "admin@knex.com", "password": "admin"} session.post("http://localhost:5000/api/users/login", data=data) for file in os.listdir("."): if file.
endswith(".json"): text = open(file, "r").read() res = session.post("http://localhost:5000/api/projects", data=text.encode('utf-8'), headers={'Content-Type': 'application/json'}) print(file + " " + str(res)) elif file.endswith(".json5"): text = open(file, "r").read() res = session.post("http:
//localhost:5000/api/projects", data=text.encode('utf-8'), headers={'Content-Type': 'application/json5'}) print(file + " " + str(res)) session.get("http://localhost:5000/api/users/logout")
if self.closed: raise ValueError('I/O operation on closed file') return False @property def encoding(self): return self.response.charset class ResponseStreamMixin(object): """Mixin for :class:`BaseRequest` subclasses. Classes that inherit from this mixin will automatically get a :attr:`stream` property that provides a write-only interface to the response iterable. """ @cached_property def stream(self): """The response iterable as write-only stream.""" return ResponseStream(self) class CommonRequestDescriptorsMixin(object): """A mixin for :class:`BaseRequest` subclasses. Request objects that mix this class in will automatically get descriptors for a couple of HTTP headers with automatic type conversion. .. versionadded:: 0.5 """ content_type = environ_property('CONTENT_TYPE', doc=''' The Content-Type entity-header field indicates the media type of the entity-body sent to the recipient or, in the case of the HEAD method, the media type that would have been sent had the request been a GET.''') @cached_property def content_length(self): """The Content-Length entity-header field indicates the size of the entity-body in bytes or, in the case of the HEAD method, the size of the entity-body that would have been sent had the request been a GET. """ return get_content_length(self.environ) content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc=''' The Content-Encoding entity-header field is used as a modifier to the media-type. When present, its value indicates what additional content codings have been applied to the entity-body, and thus what decoding mechanisms must be applied in order to obtain the media-type referenced by the Content-Type header field. .. versionadded:: 0.9''') content_md5 = environ_property('HTTP_CONTENT_MD5', doc=''' The Content-MD5 entity-header field, as defined in RFC 1864, is an MD5 digest of the entity-body for the purpose of providing an end-to-end message integrity check (MIC) of the entity-body. (Note: a MIC is good for detecting accidental modification of the entity-body in transit, but is not proof against malicious attacks.) .. versionadded:: 0.9''') referrer = environ_property('HTTP_REFERER', doc=''' The Referer[sic] request-header field allows the client to specify, for the server's benefit, the address (URI) of the resource from which the Request-URI was obtained (the "referrer", although the header field is misspelled).''') date = environ_property('HTTP_DATE', None, parse_date, doc=''' The Date general-header field represents the date and time at which the message was originated, having the same semantics as orig-date in RFC 822.''') max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc=''' The Max-Forwards request-header field provides a mechanism with the TRACE and OPTIONS methods to limit the number of proxies or gateways that can forward the request to the next inbound server.''') def _parse_content_type(self): if not hasattr(self, '_parsed_content_type'): self._parsed_content_type = \ parse_options_header(self.environ.get('CONTENT_TYPE', '')) @property def mimetype(self): """Like :attr:`content_type` but without parameters (eg, without charset, type etc.). For example if the content type is ``text/html; charset=utf-8`` the mimetype would be ``'text/html'``. """ self._parse_content_type() return self._parsed_content_type[0] @property def mimetype_params(self): """The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. """ self._parse_content_type() return self._parsed_content_type[1] @cached_property def pragma(self): """The Pragma general-header field is used to include implementation-specific directives that might apply to any recipient along the request/response chain. All pragma directives specify optional behavior from the viewpoint of the protocol; however, some systems MAY require that behavior be consistent with the directives. """ return parse_set_header(self.environ.get('HTTP_PRAGMA', '')) class CommonResponseDescriptorsMixin(object): """A mixin for :class:`BaseResponse` subclasses. Response objects that mix this class in will automatically get descriptors for a couple of HTTP headers with automatic type conversion. """ def _get_mimetype(self): ct = self.headers.get('content-type') if ct: return ct.split(';')[0].strip() def _set_mimetype(self, value): self.headers['Content-Type'] = get_content_type(value, self.charset) def _get_mimetype_params(self): def on_update(d): self.headers['Content-Type'] = \ dump_options_header(self.mimetype, d) d = parse_options_header(self.headers.get('content-type', ''))[1] return CallbackDict(d, on_update) mimetype = property(_get_mimetype, _set_mimetype, doc=''' The mimetype (content type without charset etc.)''') mimetype_params = property(_get_mimetype_params, doc=''' The mimetype parameters as dict. For example if the content type is ``text/html; charset=utf-8`` the params would be ``{'charset': 'utf-8'}``. .. versionadded:: 0.5 ''') location = header_property('Location', doc=''' The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request or identification of a new resource.''') age = header_property('Age', None, parse_date, http_date, doc=''' T
he Age response-header field conveys the sender's estimate of the amount of time since the response (or its revalidation) was generated at the origin server. Age values are non-negative decimal integers, representing time in seconds.''') content_type = header_property('Content-Type', doc=''' The Content-Type entity-header field indicates the media type of the entity-body sent to the recipient or, in the case of the HEAD method, the media type that would have been sent had the request been a GET. ''') content_length = header_property('Content-Length', None, int, str, doc=''' The Content-Length entity-header field indicates the size of the entity-body, in decimal number of OCTETs, sent to the recipient or, in the case of the HEAD method, the size of the entity-body that would have been sent had the request been a GET.''') content_location = header_property('Content-Location', doc=''' The Content-Location entity-header field MAY be used to supply the resource location for the entity enclosed in the message when that entity is accessible from a location separate from the requested resource's URI.''') content_encoding = header_property('Content-Encoding', doc=''' The Content-Encoding entity-header field is used as a modifier to the media-type. When present, its value indicates what additional content codings have been applied to the entity-body, and thus what decoding mechanisms must be applied in order to obtain the media-type referenced by the Content-Type header field.''') content_md5 = header_property('Content-MD5', doc=''' The Content-MD5 entity-header field, as defined in RFC 1864, is an MD5 digest of the entity-body for the purpose of providing an end-to-end message integrity check (MIC) of the entity-body. (Note: a MIC is good for detecting accidental modifi
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 University of Dundee & Open Microscopy Environment # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public
License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from builtins import str from builtins import range from builtins import object import os import uuid import shutil import logging import tempfile from scc.git import get_github, get_token_or_user from subprocess import Popen sandbox_url = "https://github.co
m/ome/snoopys-sandbox.git" class SandboxTest(object): def setup_method(self, method): # Basic logging configuration so if a test fails we can see # the statements at WARN or ERROR at least. logging.basicConfig() self.method = method.__name__ self.cwd = os.getcwd() self.token = get_token_or_user(local=False) self.gh = get_github(self.token, dont_ask=True) self.user = self.gh.get_login() self.path = tempfile.mkdtemp("", "sandbox-", ".") self.path = os.path.abspath(self.path) try: with open(os.devnull, 'w') as dev_null: p = Popen(["git", "clone", "-q", sandbox_url, self.path], stdout=dev_null, stderr=dev_null) assert p.wait() == 0 self.sandbox = self.gh.git_repo(self.path) self.origin_remote = "origin" except Exception: try: shutil.rmtree(self.path) finally: # Return to cwd regardless. os.chdir(self.cwd) raise # If we succeed, then we change to this dir. os.chdir(self.path) def shortDescription(self): return None def init_submodules(self): """ Fetch submodules after cloning the repository """ try: with open(os.devnull, 'w') as dev_null: p = Popen(["git", "submodule", "update", "--init"], stdout=dev_null, stderr=dev_null) assert p.wait() == 0 except Exception: os.chdir(self.path) raise def uuid(self): """ Return a string representing a uuid.uuid4 """ return str(uuid.uuid4()) def fake_branch(self, head="master", commits=None): """ Return a local branch with a list of commits, defaults to a single commit adding a unique file """ name = self.uuid() if commits is None: commits = [(name, "hi")] self.sandbox.new_branch(name, head=head) for n in range(len(commits)): fname, txt = commits[n] fname = os.path.join(self.path, fname) with open(fname, 'w') as f: f.write(txt) self.sandbox.add(fname) self.sandbox.commit("%d: Writing %s" % (n, name)) self.sandbox.get_status() return name def add_remote(self): """ Add the remote of the authenticated Github user """ if self.user not in self.sandbox.list_remotes(): remote_url = "https://%s:x-oauth-basic@github.com/%s/%s.git" \ % (self.token, self.user, self.sandbox.origin.name) self.sandbox.add_remote(self.user, remote_url) def rename_origin_remote(self, new_name): """ Rename the remote used for the upstream repository """ self.sandbox.call("git", "remote", "rename", self.origin_remote, new_name) self.origin_remote = new_name def push_branch(self, branch): """ Push a local branch to GitHub """ self.add_remote() self.sandbox.push_branch(branch, remote=self.user) def open_pr(self, branch, base, description=None): """ Push a local branch and open a PR against the selected base """ self.push_branch(branch) if description is None: description = ("This is a call to Sandbox.open_pr by %s" % self.method) new_pr = self.sandbox.origin.open_pr( title="test %s" % branch, description=description, base=base, head="%s:%s" % (self.user, branch)) return new_pr def teardown_method(self, method): try: self.sandbox.cleanup() finally: try: shutil.rmtree(self.path) finally: # Return to cwd regardless. os.chdir(self.cwd)
from django.conf import settings from images.models import S3Connection from shutil import copyfileobj import tinys3 import os import urllib class LocalStorage(object): def __init__(self, filename): self.filename = filename def get_file_data(self): """ Returns the raw data for the specified file """ image_path = os.path.join(settings.MEDIA_ROOT, self.filename) # TODO: do you need to close this? data = open(image_path, 'r').read() return data def get_remote_path(self): """ Builds a relative remote path by combining the MEDIA_URL setting and the filename """ return '%s%s' % (settings.MEDIA_URL, self.filename) def store(self, file_instance, content_type=None): """ Copy over the `file_instance` to the local storage """ image_path = os.path.join(settings.MEDIA_ROOT, self.filename) with open(image_path, 'w') as fw: copyfileobj(file_instance, fw) @staticmethod def create_argument_slug(arguments_dict): """ Converts an arguments dictionary into a string that can be stored in a filename """ # TODO: is there a possible bug if an invalid key/value is presented?
args_list = ['%s-%s' % (key, value) for key, value in arguments_dict.items()] return '--'.join(args_list) class S3Storage(LocalStorage): def __init__(self, *args, **kwargs): """ Overrides the LocalStorage and initializes a shared S3 connection """ super(S3Storage, self).__init__(*args, **kwargs
) self.conn = tinys3.Connection(self.S3_ACCESS_KEY, self.S3_SECRET_KEY, default_bucket=self.S3_BUCKET, tls=True) def get_remote_path(self): """ Returns an absolute remote path for the filename from the S3 bucket """ return 'https://%s.%s/%s' % (self.conn.default_bucket, self.conn.endpoint, self.filename) def get_file_data(self): """ Returns the raw data for the specific file, downloading it from S3 """ path = self.get_remote_path() data = urllib.urlopen(path).read() return data def store(self, file_instance, content_type=None): """ Copy over the `file_instance` from memory to S3 """ self.conn.upload(self.filename, file_instance, content_type=content_type) @property def S3_BUCKET(self): """ Returns the S3_BUCKET. Checks local environment variables first, database-stored settings second """ return os.environ.get('S3_BUCKET', self.database_settings.bucket) @property def S3_ACCESS_KEY(self): """ Returns the S3_ACCESS_KEY. Checks local environment variables first, database-stored settings second """ return os.environ.get('S3_ACCESS_KEY', self.database_settings.access_key) @property def S3_SECRET_KEY(self): """ Returns the S3_SECRET_KEY. Checks local environment variables first, database-stored settings second """ return os.environ.get('S3_SECRET_KEY', self.database_settings.secret_key) @property def database_settings(self): """ Pulls an S3Connection instance, which contains S3 connection settings, from the databas. Result is cached locally """ if not getattr(self, '__database_settings', None): self.__database_settings = S3Connection.objects.get() return self.__database_settings
import os import uuid from django.db import models from django.utils import timezone from django.contrib.auth.models import User def avatar_upload(instance, filename): ext = filename.split(".")[-1] filename = "%s.%s" % (uuid.uuid4(), ext) return os.path.join("avatars", filename) class Profile(models.Model): user = models.ForeignKey(User) name = models.CharField(max_length=75, blank=True) avatar = models.ImageField(upload_to=
avatar_upload, blank=True) bio = models.TextField(blank=True) affiliation = models.CharField(max_length=1
00, blank=True) location = models.CharField(max_length=100, blank=True) website = models.CharField(max_length=250, blank=True) twitter_username = models.CharField("Twitter Username", max_length=100, blank=True) created_at = models.DateTimeField(default=timezone.now) modified_at = models.DateTimeField(default=timezone.now) def save(self, *args, **kwargs): self.modified_at = timezone.now() return super(Profile, self).save(*args, **kwargs) @property def display_name(self): if self.name: return self.name else: return self.user.username
) l.close() def writeStoOutputFiles(s, out_bh_file): global best_query_taxon_score, BestInterTaxonScore, options try: (cutoff_exp, cutoff_mant) = best_query_taxon_score[(s.query_id,
s.subject_taxon)] if ( s.query_taxon != s.subject_taxon and s.evalue_exp < options.evalueExponentCutoff and s.percent_match > options.percentMatchCutoff and (s.evalue_mant < 0.01 or s.evalue_exp==cutoff_exp and s.evalue_mant==cutoff_mant) ): out_bh_file.write('{0}\t{1}\t{2}\t{3}\n'.format(s.query_seq, s.subject_id, s.evalue_exp, s.evalue_mant)) except KeyError: pass if options.outInParalogTempFolder:
try: (cutoff_exp, cutoff_mant) = BestInterTaxonScore[s.query_id] if (s.query_taxon == s.subject_taxon and s.query_id != s.subject_id and s.evalue_exp <= options.evalueExponentCutoff and s.percent_match >= options.percentMatchCutoff and (s.evalue_mant < 0.01 or s.evalue_exp<cutoff_exp or (s.evalue_exp == cutoff_exp and s.evalue_mant<=cutoff_mant)) ): # try: # BetterHit[(s.query_seq, s.subject_seq)] += [(s.evalue_exp, s.evalue_mant)] # except KeyError: BetterHit[(s.query_seq, s.subject_seq)] = (s.evalue_exp, s.evalue_mant) except KeyError: # Include the ones with if ( s.query_taxon == s.subject_taxon and (options.keepOrthoMCLBug or s.query_id != s.subject_id) and #### THIS IS an OrthoMCL bug s.evalue_exp <= options.evalueExponentCutoff and s.percent_match >= options.percentMatchCutoff ): # try: # BetterHit[(s.query_seq, s.subject_seq)] += [(s.evalue_exp, s.evalue_mant)] # except KeyError: BetterHit[(s.query_seq, s.subject_seq)] = (s.evalue_exp, s.evalue_mant) if __name__ == '__main__': usage = "This is STEP 5.1 of PorthoMCL.\n\nusage: %prog options\n" parser = OptionParser(usage) parser.add_option("-t", "--taxonlist", dest="taxonlistfile", help="A single column file containing the list of taxon to work with") parser.add_option("-x", "--index", dest="index", help="An integer number identifying which taxon to work on [1-size_of_taxon_list]", type='int') parser.add_option('-s', '--inSimSeq', dest='inSimSeq', help='Input folder that contains split similar sequences files (ss files)') parser.add_option('-b', '--outBestHitFolder', dest='outBestHitFolder', help='folder that will stores Best Hit files (If not set, current folder)') parser.add_option('-q', '--outInParalogTempFolder', dest='outInParalogTempFolder', help='folder to generate best InParalogTemp evalue scores (pt files) (required only for Paralogs)') parser.add_option("-l", "--logfile", dest="logfile", help="log file (optional, if not supplied STDERR will be used)") parser.add_option('', '--evalueExponentCutoff', dest='evalueExponentCutoff', help='evalue Exponent Cutoff (a nebative value, default=-5)', default=-5, type='int') parser.add_option('', '--percentMatchCutoff', dest='percentMatchCutoff', help='percent Match Cutoff (integer value, default=50)', default=50, type='int') parser.add_option('', '--cacheInputFile', dest='cacheInputFile', help='Cache input file or read it again. (Only use if I/O is very slow)', default=False, action="store_true") parser.add_option('', '--keepOrthoMCLBug', dest='keepOrthoMCLBug', help='Keep the OrthoMCL bug in creating Temporary Paralogs files (pt files) where self hits are included', default=False, action="store_true") # (options, args) = parser.parse_args() if len(args) != 0 or not options.taxonlistfile or not options.inSimSeq or not options.index: parser.error("incorrect arguments.\n\t\tUse -h to get more information or refer to the MANUAL.md") log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(1, 'reading taxon list', options.index, '', memory_usage_resource(), datetime.now())) taxon_list = readTaxonList(options.taxonlistfile) if options.index <= 0 or options.index > len(taxon_list): log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format('ERROR', 'Error in index', options.index, '', memory_usage_resource(), datetime.now())) exit() taxon1s = taxon_list[options.index - 1] if options.cacheInputFile: log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format('OPTION', 'Caching Input files', options.index, taxon1s, memory_usage_resource(), datetime.now())) log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(2, 'Reading similar sequences (ss file)', options.index, taxon1s, memory_usage_resource(), datetime.now())) if options.outBestHitFolder and not os.path.exists(options.outBestHitFolder): os.makedirs(options.outBestHitFolder) if options.outInParalogTempFolder and not os.path.exists(options.outInParalogTempFolder): os.makedirs(options.outInParalogTempFolder) input_file_cache = [] with open(os.path.join(options.inSimSeq, taxon1s+'.ss.tsv')) as input_file: for line in input_file: ss = SimilarSequenceLine._fromLine(line) if options.cacheInputFile: input_file_cache += [ss] if ss.query_taxon != ss.subject_taxon: try: best_query_taxon_score[(ss.query_id, ss.subject_taxon)] += [(ss.evalue_mant, ss.evalue_exp)] except: best_query_taxon_score[(ss.query_id, ss.subject_taxon)] = [(ss.evalue_mant, ss.evalue_exp)] for (query_id,subject_taxon) in best_query_taxon_score: evalues = best_query_taxon_score[(query_id, subject_taxon)] min_exp = sys.maxint #min(evalues, key = lambda t: t[1]) min_mants = [] for (evalue_mant, evalue_exp) in evalues: if evalue_exp < min_exp: min_exp = evalue_exp min_mants += [evalue_mant] if evalue_mant == 0 and evalue_exp == 0: min_mants += [evalue_mant] best_query_taxon_score[(query_id,subject_taxon)] = (min_exp, min(min_mants)) if options.outInParalogTempFolder: # log('{2} | Best Hit | {0} | {1} | * | {3} MB | {4}'.format(3 , 'Creating bestQueryTaxonScore (q-t file)', options.index, memory_usage_resource(), datetime.now() )) # with open(os.path.join(options.outQueryTaxonScoreFolder, taxon1s+'.q-t.tsv'), 'w') as out_file: # for (query_id,subject_taxon) in sorted(best_query_taxon_score): # (ev_exp, ev_mant) = best_query_taxon_score[(query_id,subject_taxon)] # out_file.write('{0}\t{1}\t{2}\t{3}\n'.format(query_id, subject_taxon, ev_exp, ev_mant)) log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(3 , 'Creating BestInterTaxonScore Matirx', options.index,taxon1s, memory_usage_resource(), datetime.now() )) for (query_id,subject_taxon) in best_query_taxon_score: (ev_exp, ev_mant) = best_query_taxon_score[(query_id,subject_taxon)] try: (min_exp, mants) = BestInterTaxonScore[query_id] if ev_exp < min_exp: BestInterTaxonScore[query_id] = (ev_exp, [ev_mant]) elif ev_exp == min_exp: BestInterTaxonScore[query_id] = (ev_exp, mants+[ev_mant]) except: BestInterTaxonScore[query_id] = (ev_exp, [ev_mant]) for query_id in BestInterTaxonScore: (ev_exp, ev_mants) = BestInterTaxonScore[query_id] BestInterTaxonScore[query_id] = (ev_exp, min(ev_mants)) log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(4 , 'Creating BestHit file needed for Orthology (bh file)', options.index, taxon1s, memory_usage_resource(), datetime.now() )) BestHit = {} if not options.outBestHitFolder: options.outBestHitFolder = '.' out_bh_file = open(os.path.join(options.outBestHitFolder, taxon1s+'.bh.tsv') ,'w') if not options.cacheInputFile: with open(os.path.join(options.inSimSeq, taxon1s+'.ss.tsv')) as input_file: for line in input_file: s = SimilarSequenceLine._fromLine(line) writeStoOutputFiles(s, out_bh_file) else: for s in input_file_cache: writeStoOutputFiles(s, out_bh_file) out_bh_file.close() if options.outInParalogTempFolder: log('{2} | Best Hit | {0} | {1} | {3} | {4} MB | {5}'.format(5 , 'Creating InParalogTemp file needed for InParalogs (pt file)', options.index, taxon1s, memory_usage_resource(), datetime.now() )) out_pt_file = open(os.path.join(options.outInParalogTempFolder, taxon1s+'.pt.tsv') ,'w') for (seq1, seq2) in BetterHit: if seq1 < seq2: (bh1_evalue_exp, bh1_evalue_mant) = BetterHit[(seq1, seq2)] try: (bh2_evalue_
es, we could make a Patch of exterior ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha)) ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth) for p in poly.interiors: x, y = zip(*p.coords) ax.plot(x, y, color=edgecolor, linewidth=linewidth) def plot_multipolygon(ax, geom, facecolor='red', alpha=0.5, linewidth=1): """ Can safely call with either Polygon or Multipolygon geometry """ if geom.type == 'Polygon': plot_polygon(ax, geom, facecolor=facecolor, alpha=alpha, linewidth=linewidth) elif geom.type == 'MultiPolygon': for poly in geom.geoms: plot_polygon(ax, poly, facecolor=facecolor, alpha=alpha, linewidth=linewidth) def plot_linestring(ax, geom, color='black', linewidth=1): """ Plot a single LineString geometry """ a = np.array(geom) ax.plot(a[:,0], a[:,1], color=color, linewidth=linewidth) def plot_multilinestring(ax, geom, color='red', linewidth=1): """ Can safely call with either LineString or MultiLineString geometry """ if geom.type == 'LineString': plot_linestring(ax, geom, color=color, linewidth=linewidth) elif geom.type == 'MultiLineString': for line in geom.geoms: plot_linestring(ax, line, color=color, linewidth=linewidth) def plot_point(ax, pt, marker='o', markersize=2): """ Plot a single Point geometry """ ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, linewidth=0) def gencolor(N, colormap='Set1'): """ Color generator intended to work with one of the ColorBrewer qualitative color scales. Suggested values of colormap are the following: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 (although any matplotlib colormap will work). """ from matplotlib import cm # don't use more than 9 discrete colors n_colors = min(N, 9) cmap = cm.get_cmap(colormap, n_colors) colors = cmap(range(n_colors)) for i in xrange(N): yield colors[i % n_colors] def plot_series(s, colormap='Set1', alpha=0.5, linewidth=1.0, axes=None): """ Plot a GeoSeries Generate a plot of a GeoSeries geometry with matplotlib. Parameters ---------- Series The GeoSeries to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. colormap : str (default 'Set1') The name of a colormap recognized by matplotlib. Any colormap will work, but categorical colormaps are generally recommended. Examples of useful discrete colormaps include: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 alpha : float (default 0.5) Alpha value for polygon fill regions. Has no effect for lines or points. linewidth : float (default 1.0) Line width for geometries. axes : matplotlib.pyplot.Artist (default None) axes on which to draw the plot Returns ------- matplotlib axes instance """ import matplotlib.pyplot as plt if axes == None: fig = plt.gcf() fig.add_subplot(111, aspect='equal') ax = plt.gca() else: ax = axes color = gencolor(len(s), colormap=colormap) for geom in s: if geom.type == 'Polygon' or geom.type == 'MultiPolygon': plot_multipolygon(ax, geom, facecolor=next(color), alpha=alpha, linewidth=linewidth) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=next(color), linewidth=linewidth) elif geom.type == 'Point': plot_point(ax, geom) plt.draw() return ax def plot_dataframe(s, column=None, colormap=None, alpha=0.5, linewidth=1.0, categorical=False, legend=False, axes=None, scheme=None, vmin=None, vmax=None, k=5): """ Plot a GeoDataFrame Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Otherwise, a categorical plot of the geometries in the `geometry` column will be generated. Parameters ---------- GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column
: str (default None) The name of the column to be plotted. categorical : bool (default False) If False, colormap will reflect numerical values of the column being plotted. For non-numerical columns
(or if column=None), this will be set to True. colormap : str (default 'Set1') The name of a colormap recognized by matplotlib. alpha : float (default 0.5) Alpha value for polygon fill regions. Has no effect for lines or points. linewidth : float (default 1.0) Line width for geometries. legend : bool (default False) Plot a legend (Experimental; currently for categorical plots only) axes : matplotlib.pyplot.Artist (default None) axes on which to draw the plot scheme : pysal.esda.mapclassify.Map_Classifier Choropleth classification schemes vmin : float Minimum value for color map vmax : float Maximum value for color map k : int (default 5) Number of classes (ignored if scheme is None) Returns ------- matplotlib axes instance """ import matplotlib.pyplot as plt from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm if column is None: return plot_series(s.geometry, colormap=colormap, alpha=alpha, linewidth=linewidth, axes=axes) else: if s[column].dtype is np.dtype('O'): categorical = True if categorical: if colormap is None: colormap = 'Set1' categories = list(set(s[column].values)) categories.sort() valuemap = dict([(k, v) for (v, k) in enumerate(categories)]) values = [valuemap[k] for k in s[column]] else: values = s[column] if scheme is not None: values = __pysal_choro(values, scheme, k=k) cmap = norm_cmap(values, colormap, Normalize, cm, mn=vmin, mx=vmax) if axes == None: fig = plt.gcf() fig.add_subplot(111, aspect='equal') ax = plt.gca() else: ax = axes for geom, value in zip(s.geometry, values): if geom.type == 'Polygon' or geom.type == 'MultiPolygon': plot_multipolygon(ax, geom, facecolor=cmap.to_rgba(value), alpha=alpha, linewidth=linewidth) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=cmap.to_rgba(value), linewidth=linewidth) # TODO: color point geometries elif geom.type == 'Point': plot_point(ax, geom) if legend: if categorical: patches = [] for value, cat in enumerate(categories): patches.append(Line2D([0], [0], linestyle="none", marker="o", alpha=alpha, markersize=10, markerfacecolor=cmap.to_rgba(value))) ax.legend(patches, categories, numpoints=1, loc='best') else: # TODO: show a colorbar raise NotImplementedError plt.draw() return ax def __pysal_choro(values, scheme, k=5): """ Wrapper for choropleth schemes from PySAL for use with plot_dataframe Parameters
must_exist=False) ganesha.os.listdir.assert_called_once_with( self.fake_conf_dir_path) ganesha.LOG.info.assert_called_once_with( mock.ANY, self.fake_conf_dir_path) self.assertFalse(mockopen.called) self.assertFalse(ganesha.ganesha_manager.parseconf.called) self.assertFalse(ganesha.ganesha_utils.patch.called) self.assertEqual({}, ret) def test_load_conf_dir_error_no_conf_dir_must_exist_true(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.ENOENT, os.strerror(errno.ENOENT))))
self.assertRais
es(OSError, self._helper._load_conf_dir, self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_load_conf_dir_error_conf_dir_present_must_exist_false(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))) self.assertRaises(OSError, self._helper._load_conf_dir, self.fake_conf_dir_path, must_exist=False) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_load_conf_dir_error(self): self.mock_object( ganesha.os, 'listdir', mock.Mock(side_effect=RuntimeError('fake error'))) self.assertRaises(RuntimeError, self._helper._load_conf_dir, self.fake_conf_dir_path) ganesha.os.listdir.assert_called_once_with(self.fake_conf_dir_path) def test_init_helper(self): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=mock_template)) self.mock_object(self._helper, '_default_config_hook') ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, 'faketag', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_db_path='/fakedir1/fake.db', ganesha_service_name='ganesha.fakeservice') self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self.assertFalse(self._helper._default_config_hook.called) self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_init_helper_conf_dir_empty(self): mock_template = mock.Mock() mock_ganesha_manager = mock.Mock() self.mock_object(ganesha.ganesha_manager, 'GaneshaManager', mock.Mock(return_value=mock_ganesha_manager)) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value={})) self.mock_object(self._helper, '_default_config_hook', mock.Mock(return_value=mock_template)) ret = self._helper.init_helper() ganesha.ganesha_manager.GaneshaManager.assert_called_once_with( self._execute, 'faketag', ganesha_config_path='/fakedir0/fakeconfig', ganesha_export_dir='/fakedir0/export.d', ganesha_db_path='/fakedir1/fake.db', ganesha_service_name='ganesha.fakeservice') self._helper._load_conf_dir.assert_called_once_with( '/fakedir2/faketempl.d', must_exist=False) self._helper._default_config_hook.assert_called_once_with() self.assertEqual(mock_ganesha_manager, self._helper.ganesha) self.assertEqual(mock_template, self._helper.export_template) self.assertIsNone(ret) def test_default_config_hook(self): fake_template = {'key': 'value'} self.mock_object(ganesha.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir3/fakeconfdir')) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=fake_template)) ret = self._helper._default_config_hook() ganesha.ganesha_utils.path_from.assert_called_once_with( ganesha.__file__, 'conf') self._helper._load_conf_dir.assert_called_once_with( '/fakedir3/fakeconfdir') self.assertEqual(fake_template, ret) def test_fsal_hook(self): ret = self._helper._fsal_hook('/fakepath', self.share, self.access) self.assertEqual({}, ret) def test_allow_access(self): mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2, tmpl3): mock_ganesha_utils_patch(copy.deepcopy(tmpl1), tmpl2, tmpl3) tmpl1.update(tmpl3) self.mock_object(self._helper.ganesha, 'get_export_id', mock.Mock(return_value=101)) self.mock_object(self._helper, '_fsal_hook', mock.Mock(return_value='fakefsal')) self.mock_object(ganesha.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) ret = self._helper._allow_access(fake_basepath, self.share, self.access) self._helper.ganesha.get_export_id.assert_called_once_with() self._helper._fsal_hook.assert_called_once_with( fake_basepath, self.share, self.access) mock_ganesha_utils_patch.assert_called_once_with( {}, self._helper.export_template, fake_output_template) self._helper._fsal_hook.assert_called_once_with( fake_basepath, self.share, self.access) self._helper.ganesha.add_export.assert_called_once_with( fake_export_name, fake_output_template) self.assertIsNone(ret) def test_allow_access_error_invalid_share(self): access = fake_share.fake_access(access_type='notip') self.assertRaises(exception.InvalidShareAccess, self._helper._allow_access, '/fakepath', self.share, access) def test_deny_access(self): ret = self._helper._deny_access('/fakepath', self.share, self.access) self._helper.ganesha.remove_export.assert_called_once_with( 'fakename--fakeaccid') self.assertIsNone(ret) @ddt.data({}, {'recovery': False}) def test_update_access_for_allow(self, kwargs): self.mock_object(self._helper, '_allow_access') self.mock_object(self._helper, '_deny_access') self._helper.update_access( '/some/path', 'aShare', add_rules=["example.com"], delete_rules=[], **kwargs) self._helper._allow_access.assert_called_once_with( '/some/path', 'aShare', 'example.com') self.assertFalse(self._helper._deny_access.called) self.assertFalse(self._helper.ganesha.reset_exports.called) self.assertFalse(self._helper.ganesha.restart_service.called) def test_update_access_for_deny(self): self.mock_object(self._helper, '_allow_access') self.mock_object(self._helper, '_deny_access') self._helper.update_access( '/some/path', 'aShare', [], delete_rules=["example.com"]) self._helper._deny_access.assert_called_once_with( '/some/path', 'aShare', 'example.com') self.assertFalse(self._helper._allow_access.called) self.assertFalse(self._helper.ganesha.reset_exports.called) self.assertFalse(self._helper.ganesha.restart_service.called) def test_update_access_recovery(self): self.mock_object(self._helper, '_allow_access') self.mock_object(self._helper, '_deny_access') self._helper.update_access( '/some/path', 'aShare', add_rules=["example.com"], delete_rules=[],
# setup.py: based off setup.py for toil-vg, modified to install this pipeline # instead. import sys import os # Get the local version.py and not any other version module execfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "version.py")) from setuptools import find_packages, setup from setuptools.command.test import test as TestCommand kwargs = dict( name='hgvm-builder', version=version, description="Human Genome Variation Map construction kit", author='Adam Novak', author_email='anovak@soe.ucsc.edu', url="https://github.com/BD2KGenomics/hgvm-builder", install_requires=[package + ver for package, ver in required_versions.iteritems()], dependency_links = dependency_links, tests_require=['pytest==2.8.3'], package_dir={'': 'src'}, packages=find_packages('src'), entry_points={ 'console_scripts': [ 'build-hgvm = hgvmbuilder.build:entrypoint', 'copy-hgvm = hgvmbuilder.parallelcopy:entrypoint', 'import-sam-hgvm = hgvmbuilder.importsam:entrypoint' ]}) class PyTest(TestCommand): user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_op
tions(self) self.test_args = [] self.test_suite = True def run_tests(self): import pytest # Sanitize command line arguments to avoid confusing Toil code # attempting to parse them sys.argv[1:] = [] errno = pytest.main(self.pytest_args) sys.exit(errno) kwargs['cmdclass'] = {'test': PyTest} setup(**kwargs) # Wen we run setup, tell the user they need
a good Toil with cloud support print(""" Thank you for installing the hgvm-builder pipeline! If you want to run this Toil-based pipeline on a cluster in a cloud, please install Toil with the appropriate extras. For example, To install AWS/EC2 support for example, run pip install toil[aws,mesos]{} on every EC2 instance. For Microsoft Azure, deploy your cluster using the Toil template at https://github.com/BD2KGenomics/toil/tree/master/contrib/azure For more information, please refer to Toil's documentation at http://toil.readthedocs.io/en/latest/installation.html To start building HGVMs, run build-hgvm --help 2>&1 | less """.format(required_versions['toil']))
# -*- coding: utf-8 -*- """
QGIS Unit tests for QgsMultiEditToolButton. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Nyall Dawson' __date__ = '16/03/2016' __copyright__ = 'Copyright 2016, The QGIS Project' import qgis # NOQA switch sip api from qgis.gui impor
t QgsMultiEditToolButton from qgis.testing import start_app, unittest start_app() class TestQgsMultiEditToolButton(unittest.TestCase): def test_state_logic(self): """ Test that the logic involving button states is correct """ w = QgsMultiEditToolButton() self.assertEqual(w.state(), QgsMultiEditToolButton.Default) # set is changed should update state to changed w.setIsChanged(True) self.assertEqual(w.state(), QgsMultiEditToolButton.Changed) w.setIsChanged(False) self.assertEqual(w.state(), QgsMultiEditToolButton.Default) # resetting changes should fall back to default state w.setIsChanged(True) w.resetChanges() self.assertEqual(w.state(), QgsMultiEditToolButton.Default) # setting changes committed should result in default state w.setIsChanged(True) w.changesCommitted() self.assertEqual(w.state(), QgsMultiEditToolButton.Default) # Test with mixed values w.setIsMixed(True) self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues) # changed state takes priority over mixed state w.setIsChanged(True) self.assertEqual(w.state(), QgsMultiEditToolButton.Changed) w.setIsChanged(False) # should reset to mixed state self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues) # resetting changes should fall back to mixed state w.setIsChanged(True) w.resetChanges() self.assertEqual(w.state(), QgsMultiEditToolButton.MixedValues) # setting changes committed should result in default state w.setIsChanged(True) w.changesCommitted() self.assertEqual(w.state(), QgsMultiEditToolButton.Default) if __name__ == '__main__': unittest.main()
import os import json import logging import ConfigParser from framework.db import models from framework.dependency_management.dependency_resolver import BaseComponent from framework.dependency_management.interfaces import MappingDBInterface from framework.lib.exceptions import InvalidMappingReference class MappingDB(BaseComponent, MappingDBInterface): COMPONENT_NAME = "mapping_db" def __init__(self): """ The mapping_types attributes contain the unique mappings in memory """ self.register_in_service_locator() self.config = self.get_component("config") self.db = self.get_component("db") self.mapping_types = [] self.error_handler = self.get_component("error_handler") def init(self): self.LoadMappingDBFromFile(self.config.get_profile_path("MAPPING_PROFILE")) def LoadMappingDBFromFile(self, file_path): """ This needs to be a list instead of a dictionary to preserve order in python < 2.7 """ file_path = self.config.select_user_or_default_config_path(file_path) logging.info("Loading Mapping from: %s..", file_path) config_parser = ConfigParser.RawConfigParser() # Otherwise all the keys are converted to lowercase xD config_parser.optionxform = str if not os.path.isfile(file_path): # check if the mapping file exists self.error_handler.FrameworkAbort("Mapping file not found at: %s" % file_path) config_parser.read(file_path) for owtf_code in config_parser.sections(): mappings = {} category = None for mapping_type, data in config_parser.items(owtf_code): if mapping_type != 'category': if mapping_type not in self.mapping_types: self.mapping_types.append(mapping_type) mapped_code, mapped_name = data.split('_____') mappings[mapping_type] = [mapped_code, mapped_name] else: category = data self.db.session.merge(models.Mapping(owtf_code=owtf_code, mappings=json.dumps(mappings), category=category)) self.db.session.commit() def DeriveMappingDict(self, obj): if obj: pdict = dict(obj.__dict__) pdict.pop("_sa_instance_state", None) # If output is present, json decode it if pdict.get("mappings", None): pdict["mappings"] = json.loads(pdict["mappings"]) return pdict def DeriveMappingDicts(self, obj_list): dict_list = [] for obj in obj_list: dict_list.append(self.DeriveMappingDict(obj)) return dict_list def GetMappingTypes(self): """ In memory data saved when loading db """ return se
lf.mapping_types def GetMappings(self, mapping_type): if mapping_type in self.mapping_types: mapping_objs = self.db.session.query(models.Mapping).all() mappings = {} for mapping_dict in self.
DeriveMappingDicts(mapping_objs): if mapping_dict["mappings"].get(mapping_type, None): mappings[mapping_dict["owtf_code"]] = mapping_dict["mappings"][mapping_type] return mappings else: raise InvalidMappingReference("InvalidMappingReference %s requested" % mapping_type) def GetCategory(self, plugin_code): category = self.db.session.query(models.Mapping.category).get(plugin_code) # Getting the corresponding category back from db return category
# for back compatibility class Logger(object): def __init__(self): pass def write(self, message): intp.appendOutput(message) def reset(self): pass def flush(self): pass class PyZeppelinContext(object): """ A context impl that uses Py4j to communicate to JVM """ def __init__(self, z): self.z = z self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption self.javaList = gateway.jvm.java.util.ArrayList self.max_result = 1000 self._displayhook = lambda *args: None self._setup_matplotlib() def getInterpreterContext(self): return self.z.getCurrentInterpreterContext() def input(self, name, defaultValue=""): return self.z.getGui().input(name, defaultValue) def select(self, name, options, defaultValue=""): javaOptions = gateway.new_array(self.paramOption, len(options)) i = 0 for tuple in options: javaOptions[i] = self.paramOption(tuple[0], tuple[1]) i += 1 return self.z.getGui().select(name, defaultValue, javaOptions) def checkbox(self, name, options, defaultChecked=[]): javaOptions = gateway.new_array(self.paramOption, len(options)) i = 0 for tuple in options: javaOptions[i] = self.paramOption(tuple[0], tuple[1]) i += 1 javaDefaultCheck = self.javaList() for check in defaultChecked: javaDefaultCheck.append(check) return self.z.getGui().checkbox(name, javaDefaultCheck, javaOptions) def show(self, p, **kwargs): if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot": self.show_matplotlib(p, **kwargs) elif type(p).__name__ == "DataFrame": # does not play well with sub-classes # `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame` # and so a dependency on pandas self.show_dataframe(p, **kwargs) elif hasattr(p, '__call__'): p() #error reporting def show_dataframe(self, df, show_index=False, **kwargs): """Pretty prints DF using Table Display System """ limit = len(df) > self.max_result header_buf = StringIO("") if show_index: idx_name = str(df.index.name) if df.index.name is not None else "" header_buf.write(idx_name + "\t") header_buf.write(str(df.columns[0])) for col in df.columns[1:]: header_buf.write("\t") header_buf.write(str(col)) header_buf.write("\n") body_buf = StringIO("") rows = df.head(self.max_result).values if limit else df.values index = df.index.values for idx, row in zip(index, rows): if show_index: body_buf.write("%html <strong>{}</strong>".format(idx)) body_buf.write("\t") body_buf.write(str(row[0])) for cell in row[1:]: body_buf.write("\t") body_buf.write(str(cell)) body_buf.write("\n") body_buf.seek(0); header_buf.seek(0) #TODO(bzz): fix it, so it shows red noti
ce, as in Spark print("%table " + header_buf.read() + body_buf.read()) # + # ("\n<font color=red>Results are limited by {}.</font>" \ # .format(self.max_result) if limit else "") #) body_buf.close
(); header_buf.close() def show_matplotlib(self, p, fmt="png", width="auto", height="auto", **kwargs): """Matplotlib show function """ if fmt == "png": img = BytesIO() p.savefig(img, format=fmt) img_str = b"data:image/png;base64," img_str += base64.b64encode(img.getvalue().strip()) img_tag = "<img src={img} style='width={width};height:{height}'>" # Decoding is necessary for Python 3 compability img_str = img_str.decode("ascii") img_str = img_tag.format(img=img_str, width=width, height=height) elif fmt == "svg": img = StringIO() p.savefig(img, format=fmt) img_str = img.getvalue() else: raise ValueError("fmt must be 'png' or 'svg'") html = "%html <div style='width:{width};height:{height}'>{img}<div>" print(html.format(width=width, height=height, img=img_str)) img.close() def configure_mpl(self, **kwargs): import mpl_config mpl_config.configure(**kwargs) def _setup_matplotlib(self): # If we don't have matplotlib installed don't bother continuing try: import matplotlib except ImportError: return # Make sure custom backends are available in the PYTHONPATH rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd()) mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python') if mpl_path not in sys.path: sys.path.append(mpl_path) # Finally check if backend exists, and if so configure as appropriate try: matplotlib.use('module://backend_zinline') import backend_zinline # Everything looks good so make config assuming that we are using # an inline backend self._displayhook = backend_zinline.displayhook self.configure_mpl(width=600, height=400, dpi=72, fontsize=10, interactive=True, format='png') except ImportError: # Fall back to Agg if no custom backend installed matplotlib.use('Agg') warnings.warn("Unable to load inline matplotlib backend, " "falling back to Agg") def handler_stop_signals(sig, frame): sys.exit("Got signal : " + str(sig)) signal.signal(signal.SIGINT, handler_stop_signals) host = "127.0.0.1" if len(sys.argv) >= 3: host = sys.argv[2] _zcUserQueryNameSpace = {} client = GatewayClient(address=host, port=int(sys.argv[1])) #gateway = JavaGateway(client, auto_convert = True) gateway = JavaGateway(client) intp = gateway.entry_point intp.onPythonScriptInitialized(os.getpid()) java_import(gateway.jvm, "org.apache.zeppelin.display.Input") z = __zeppelin__ = PyZeppelinContext(intp) __zeppelin__._setup_matplotlib() _zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__ _zcUserQueryNameSpace["z"] = z output = Logger() sys.stdout = output #sys.stderr = output while True : req = intp.getStatements() if req == None: break try: stmts = req.statements().split("\n") final_code = [] # Get post-execute hooks try: global_hook = intp.getHook('post_exec_dev') except: global_hook = None try: user_hook = __zeppelin__.getHook('post_exec') except: user_hook = None nhooks = 0 for hook in (global_hook, user_hook): if hook: nhooks += 1 for s in stmts: if s == None: continue # skip comment s_stripped = s.strip() if len(s_stripped) == 0 or s_stripped.startswith("#"): continue final_code.append(s) if final_code: # use exec mode to compile the statements except the last statement, # so that the last statement's evaluation will be printed to stdout code = compile('\n'.join(final_code), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1) to_run_hooks = [] if (nhooks > 0): to_run_hooks = code.body[-nhooks:] to_run_exec, to_run_single = (code.body[:-(nhooks + 1)], [code.body[-(nhooks + 1)]]) try: for node in to_run_exec: mod = ast.Module([node]) code = compile(mod, '<stdin>', 'exec') exec(code, _zcUserQueryNameSpace) for node in to_run_single: mod = ast.Interactive([node]) code = compile(mod, '<stdin>', 'single') exec(code, _zcUserQueryNameSpace) for node in to_run_hooks: mod = ast.Module([node]) code = compile(mod, '<stdin>', 'exec') exec(code, _zcUserQueryNameSpace) except: raise Exception(traceback.format_exc()) intp.setStatementsFinished("", False) except Py4JJavaError: excInnerError = traceback.format_exc() # format_tb() does not return the inner exception innerErrorStart = excInnerError.find("Py4JJavaError:") if innerErrorStart > -1: excInnerError = excInnerError[innerErrorStart:] intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True) except Py4JNetworkError: # lost connection from gateway server. exit sys.exit(1) except: intp.setStatementsFinished(trac
f
rom c3nav.editor.models.changedobject import ChangedObject # noqa from c3nav.editor.models.changeset import ChangeSet #
noqa from c3nav.editor.models.changesetupdate import ChangeSetUpdate # noqa
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import django.core.validators import django.contrib.auth.models class Migration(migrations.Migration): dependencies = [ ('auth', '0006_require_contenttypes_0002'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')), ('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)), ('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)), ('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('remote_id', models.IntegerField(null=True, blank=True)), ('remote_uri', models.CharField(max_length=256, null=True, blank=True)), ('profile_uri', models.CharField(max_length=256, null=True, blank=True)), ('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this u
ser belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')), ('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')), ], options={ 'verbose_name': 'User',
'verbose_name_plural': 'Users', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
import socket from heapq import h
eappush, heappop, heapify from collections import defaultdict ##defbig def encode(symb2freq): """Huffman encode the given dict mapping symbols to weights""" heap = [[wt, [sym, ""]]
for sym, wt in symb2freq.items()] heapify(heap) while len(heap) > 1: lo = heappop(heap) hi = heappop(heap) for pair in lo[1:]: pair[1] = '1' + pair[1] for pair in hi[1:]: pair[1] = '0' + pair[1] heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:]) return sorted(heappop(heap)[1:], key=lambda p: (len(p[-1]), p)) ##defend s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = socket.gethostname() port = 1743 s.connect((host, port)) s.send("#BEGIN") s.send("!") f = open('a.txt', 'r') #for line in f.readlines(): txt = 'mississippi river' symb2freq = defaultdict(int) for ch in txt: symb2freq[ch] += 1 huff = encode(symb2freq) for p in huff: s.send("{0},{1},{2}".format(p[0], symb2freq[p[0]], p[1])) s.send("#END") s.close() ##############3/////////////
_insert(tx): existing_user = await check_valid_new_user(tx, username, login_id, is_developer, is_service_account) if existing_user is not None: return False await tx.execute_insertone( ''' INSERT INTO users (state, username, login_id, is_developer, is_service_account) VALUES (%s, %s, %s, %s, %s); ''', ('creating', username, login_id, is_developer, is_service_account), ) await _insert() # pylint: disable=no-value-for-parameter return True def cleanup_session(session): def _delete(key): if key in session: del session[key] _delete('pending') _delete('login_id') _delete('next') _delete('caller') _delete('session_id') _delete('flow') @routes.get('/healthcheck') async def get_healthcheck(request): # pylint: disable=W0613 return web.Response() @routes.get('') @routes.get('/') async def get_index(request): # pylint: disable=unused-argument return aiohttp.web.HTTPFound(deploy_config.external_url('auth', '/login')) @routes.get('/creating') @web_maybe_authenticated_user async def creating_account(request, userdata): db = request.app['db'] session = await aiohttp_session.get_session(request) if 'pending' in session: login_id = session['login_id'] user = await user_from_login_id(db, login_id) nb_url = deploy_config.external_url('notebook', '') next_page = session.pop('next', nb_url) cleanup_session(session) if user is None: set_message(session, f'Account does not exist for login id {login_id}.', 'error') return aiohttp.web.HTTPFound(nb_url) page_context = {'username': user['username'], 'state': user['state'], 'login_id': user['login_id']} if user['state'] == 'deleting' or user['state'] == 'deleted': return await render_template('auth', request, userdata, 'account-error.html', page_context) if user['state'] == 'active': session_id = await create_session(db, user['id']) session['session_id'] = session_id set_message(session, f'Account has been created for {user["username"]}.', 'info') return aiohttp.web.HTTPFound(next_page) assert user['state'] == 'creating' session['pending'] = True session['login_id'] = login_id session['next'] = next_page return await render_template('auth', request, userdata, 'account-creating.html', page_context) return aiohttp.web.HTTPUnauthorized() @routes.get('/creating/wait') async def creating_account_wait(request): session = await aiohttp_session.get_session(request) if 'pending' not in session: raise web.HTTPUnauthorized() return await _wait_websocket(request, session['login_id']) async def _wait_websocket(request, login_id): app = request.app db = app['db'] user = await user_from_login_id(db, login_id) if not user: return web.HTTPNotFound() ws = web.WebSocketResponse() await ws.prepare(request) try: count = 0 while count < 10: try: user = await user_from_login_id(db, login_id) assert user if user['state'] != 'creating': log.info(f"user {user['username']} is no longer creating") break except asyncio.CancelledError: raise except Exception: # pylint: disable=broad-except log.exception(f"/creating/wait: error while updating status for user {user['username']}") await asyncio.sleep(1) count += 1 if count >= 10: log.info(f"user {user['username']} is still in state creating") ready = user['state'] == 'active' await ws.send_str(str(int(ready))) return ws finally: await ws.close() @routes.get('/signup') async def signup(request): next_page = request.query.get('next', deploy_config.external_url('notebook', '')) flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2callback')) session = await aiohttp_session.new_session(request) cleanup_session(session) session['next'] = next_page session['caller'] = 'signup' session['flow'] = flow_data return aiohttp.web.HTTPFound(flow_data['authorization_url']) @routes.get('/login') async def login(request): next_page = request.query.get('next', deploy_config.external_url('notebook', '')) flow_data = request.app['flow_client'].initiate_flow(deploy_config.external_url('auth', '/oauth2callback')) session = await aiohttp_session.new_session(request) cleanup_session(session) session['next'] = next_page session['c
aller'] = 'login' session['flow'] = flow_data return aiohttp.web.HTTPFound(flow_data['authorization_url']) @routes.get('/oauth2callback') async def callback(request): session = await aiohttp_session.get_session(request) if 'flow' not in session: raise web.HTTPUnauthorized() nb_url = deploy_config.external_url('notebook', '') creating_url = deploy_config.external_url('auth', '/creating') caller = session['caller']
next_page = session.pop('next', nb_url) flow_dict = session['flow'] flow_dict['callback_uri'] = deploy_config.external_url('auth', '/oauth2callback') cleanup_session(session) try: flow_result = request.app['flow_client'].receive_callback(request, flow_dict) login_id = flow_result.login_id except asyncio.CancelledError: raise except Exception as e: log.exception('oauth2 callback: could not fetch and verify token') raise web.HTTPUnauthorized() from e db = request.app['db'] user = await user_from_login_id(db, login_id) if user is None: if caller == 'login': set_message(session, f'Account does not exist for login id {login_id}', 'error') return aiohttp.web.HTTPFound(nb_url) assert caller == 'signup' username, domain = flow_result.email.split('@') username = ''.join(c for c in username if c.isalnum()) if domain != ORGANIZATION_DOMAIN: raise web.HTTPUnauthorized() try: await insert_new_user(db, username, login_id, is_developer=False, is_service_account=False) except AuthUserError as e: set_message(session, e.message, 'error') return web.HTTPFound(deploy_config.external_url('notebook', '')) session['pending'] = True session['login_id'] = login_id return web.HTTPFound(creating_url) if user['state'] in ('deleting', 'deleted'): page_context = {'username': user['username'], 'state': user['state'], 'login_id': user['login_id']} return await render_template('auth', request, user, 'account-error.html', page_context) if user['state'] == 'creating': if caller == 'signup': set_message(session, f'Account is already creating for login id {login_id}', 'error') if caller == 'login': set_message(session, f'Account for login id {login_id} is still being created.', 'error') session['pending'] = True session['login_id'] = user['login_id'] return web.HTTPFound(creating_url) assert user['state'] == 'active' if caller == 'signup': set_message(session, f'Account has already been created for {user["username"]}.', 'info') session_id = await create_session(db, user['id']) session['session_id'] = session_id return aiohttp.web.HTTPFound(next_page) @routes.post('/api/v1alpha/users/{user}/create') @rest_authenticated_developers_only async def create_user(request: web.Request, userdata): # pylint: disable=unused-argument db: Database = request.app['db'] username = request.match_info['user'] body = await request.json() login_id = body['login_id'] is_developer = body['is_developer'] is_service_account = body['is_service_account'] try: await insert_new_user(db, username, login_id, is_developer, is_serv
else: vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist) mapping = {} for i, x in enumerate(self): if Vector.simp: mapping[self.varlist[i]] = trigsimp(vars_matrix[i], method='fu') else: mapping[self.varlist[i]] = vars_matrix[i] self._var_dict[(otherframe, Vector.simp)] = mapping return mapping def ang_acc_in(self, otherframe): """Returns the angular acceleration Vector of the ReferenceFrame. Effectively returns the Vector: ^N alpha ^B which represent the angular acceleration of B in N, where B is self, and N is otherframe. Parameters ========== otherframe : ReferenceFrame The ReferenceFrame which the angular acceleration is returned in. Examples ======== >>> from sympy.physics.vector import ReferenceFrame, Vector >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> V = 10 * N.x >>> A.set_ang_acc(N, V) >>> A.ang_acc_in(N) 10*N.x """ _check_frame(otherframe) if otherframe in self._ang_acc_dict: return self._ang_acc_dict[otherframe] else: return self.ang_vel_in(otherframe).dt(otherframe) def ang_vel_in(self, otherframe): """Returns the angular velocity Vector of the ReferenceFrame. Effectively returns the Vector: ^N omega ^B which represent the angular velocity of B in N, where B is self, and N is otherframe. Parameters ========== otherframe : ReferenceFrame The ReferenceFrame which the angular velocity is returned in. Examples ======== >>> from sympy.physics.vector import ReferenceFrame, Vector >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> V = 10 * N.x >>> A.set_ang_vel(N, V) >>> A.ang_vel_in(N) 10*N.x """ _check_frame(otherframe) flist = self._dict_list(otherframe, 1) outvec = Vector(0) for i in range(len(flist) - 1): outvec += flist[i]._ang_vel_dict[flist[i + 1]] return outvec def dcm(self, otherframe): """The direction cosine matrix between frames. This gives the DCM between this frame and the otherframe. The format is N.xyz = N.dcm(B) * B.xyz A SymPy Matrix is returned. Parameters ========== otherframe : ReferenceFrame The otherframe which the DCM is generated to. Examples ======== >>> from sympy.physics.vector import ReferenceFrame, Vector >>> from sympy import symbols >>> q1 = symbols('q1') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'Axis', [q1, N.x]) >>> N.dcm(A) Matrix([ [1, 0, 0], [0, cos(q1), -sin(q1)], [0, sin(q1), cos(q1)]]) """ _check_frame(otherframe) #Check if the dcm wrt that frame has already been calculated if otherframe in self._dcm_cache: return self._dcm_cache[otherframe] flist = self._dict_list(otherframe, 0) outdcm = eye(3) for i in range(len(flist) - 1): outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]] #After calculation, store the dcm in dcm cache for faster #future retrieval self._dcm_cache[otherframe] = outdcm otherframe._dcm_cache[self] = outdcm.T return outdcm def orient(self, parent, rot_type, amounts, rot_order=''): """Defines the orientation of this frame relative to a parent frame. Parameters ========== parent : ReferenceFrame The frame that this ReferenceFrame will have its orientation matrix defined in relation to. rot_type : str The type of orientation matrix that is being created. Supported types are 'Body', 'Space', 'Quaternion', 'Axis', and 'DCM'. See examples for correct usage. amounts : list OR value The quantities that the orientation matrix will be defined by. In case of rot_type='DCM', value must be a sympy.matrices.MatrixBase object (or subclasses of it). rot_order : str If applicable, the order of a series of rotations. Examples ======== >>> from sympy.physics.vector import ReferenceFrame, Vector >>> from sympy import symbols, eye, ImmutableMatrix >>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') Now we have a choice of how to implement the orientation. First is Body. Body orientation takes this reference frame through three successive simple rotations. Acceptable rotation orders are of length 3, expressed in XYZ or 123, and cannot have a rotation about about an axis twice in a row. >>> B.orient(N, 'Body', [q1, q2, q3], '123') >>> B.orient(N, 'Body', [q1, q2, 0], 'ZXZ') >>> B.orient(N, 'Body', [0, 0, 0], 'XYX') Next is Space. Space is like Body, but the rotations are applied in the opposite order. >>> B.orient(N, 'Space', [q1, q2, q3], '312') Next is Quaternion. This orients the new ReferenceFrame with Quaternions, defined as a finite rotation about lambda, a unit vector, by some amount theta. This orientation is described by four parameters: q0 = cos(theta/2) q1 = lambda_x sin(theta/2) q2 = lambda_y sin(theta/2) q3 = lambda_z sin(theta/2) Quaternion does not take in a rotation order. >>> B.orient(N, 'Quaternion', [q0, q1, q2, q3]) Next is Axis. This is a rotation about an arbitrary, non-time-varying axis by some angle. The axis is supplied as a Vector. This is how simple rotations are defined. >>> B.orient(N, 'Axis', [q1, N.x + 2 * N.y]) Last is DCM (Direction Cosine Matrix). This is a rotation matrix given manually. >>> B.orient(N, 'DCM', eye(3)) >>> B.orient(N, 'DCM', ImmutableMatrix([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])) """ from sympy.physics.vector.functions import dynamicsymbols _check_frame(parent) # Allow passing a rotation matrix manually. if rot_type == 'DCM': # When rot_type == 'DCM', then amounts must be a Matrix type object # (e.g. sympy.matrices.dense.MutableDenseMatrix). if not isinstance(amounts, MatrixBase): raise TypeError("Amounts must be a sympy Matrix type object.") else: amounts = list(amounts) for i, v in enumerate(amounts): if not isinstance(v, Vector): amounts[i] = sympify(v) def _rot(axis, angle): """DCM for simple axis 1,2,or 3 rotations. """ if axis == 1: return Matrix([[1, 0, 0], [0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]]) elif axis == 2: return Matrix([[cos(angle), 0, sin(angle)], [0, 1, 0], [-sin(angle), 0, cos(angle)]]) elif axis == 3: return Matrix([[cos(angle), -sin(angle), 0], [sin(angle), cos(angle), 0], [0, 0, 1]]) approved_orders = ('123', '231', '312', '132', '
213', '321', '121', '131', '212', '232', '313', '323', '') rot_order = str( rot_order).upper() # Now we need to make sure XYZ = 123 rot_type = rot_type.upper() rot_order = [i.replace('X', '1') for i in rot_order] rot_order = [i.replace('Y', '2') for i in rot_order] rot_order = [i.replace('Z', '3') for i in rot_order] rot_order = ''.join(rot_order) if not rot_order in approved
"""PEP 656 support. This module implements logic to detect if the currently running Python is linked against musl, and what musl version is used. """ import contextlib import functools import operator import os import re import struct import subprocess import sys from typing import IO, Iterator, NamedTuple, Optional, Tuple def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: return struct.unpack(fmt, f.read(struct.calcsize(fmt))) def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: """Detect musl libc location by parsing the Python executable. Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html """ f.seek(0) try: ident = _read_unpacked(f, "16B") except struct.error: return None if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. return None f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. try: # e_fmt: Format for program header. # p_fmt: Format for section header. # p_idx: Indexes to find p_type, p_offset, and p_filesz. e_fmt, p_fmt, p_idx = { 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. }[ident[4]] except KeyError: return None else: p_get = operator.itemgetter(*p_idx) # Find the interpreter section and return its content. try: _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) except struct.error: return None for i in range(e_phnum + 1): f.seek(e_phoff + e_phentsize * i) try: p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) except struct.error: return None if p_type != 3: # Not PT_INTERP. continue f.seek(p_offset) interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") if "musl" not in interpreter: return None return interpreter return None class _MuslVersion(NamedTuple): major: int minor: int def _parse_musl_version(output: str) -> Optional[_MuslVersion]: lines = [n for n in (n.strip() for n in output.splitlines()) if n] if len(lines) < 2 or lines[0][:4] != "musl": return None m = re.match(r"Version (\d+)\.(\d+)", lines[1]
) if not m: return None return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) @functools.lru_cache() def _get_musl_version(executable: str) -> Optional[_MuslVersion]: """Detect currently-running musl runtime version. This is done by checking the specified executable's dynamic linking information, and invoking the loader to parse its output for a version str
ing. If the loader is musl, the output would be something like:: musl libc (x86_64) Version 1.2.2 Dynamic Program Loader """ with contextlib.ExitStack() as stack: try: f = stack.enter_context(open(executable, "rb")) except IOError: return None ld = _parse_ld_musl_from_elf(f) if not ld: return None proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) return _parse_musl_version(proc.stderr) def platform_tags(arch: str) -> Iterator[str]: """Generate musllinux tags compatible to the current platform. :param arch: Should be the part of platform tag after the ``linux_`` prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a prerequisite for the current platform to be musllinux-compatible. :returns: An iterator of compatible musllinux tags. """ sys_musl = _get_musl_version(sys.executable) if sys_musl is None: # Python not dynamically linked against musl. return for minor in range(sys_musl.minor, -1, -1): yield f"musllinux_{sys_musl.major}_{minor}_{arch}" if __name__ == "__main__": # pragma: no cover import sysconfig plat = sysconfig.get_platform() assert plat.startswith("linux-"), "not linux" print("plat:", plat) print("musl:", _get_musl_version(sys.executable)) print("tags:", end=" ") for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): print(t, end="\n ")
self.settings.setValue('testqgissettings/name', 'qgisrocks') self.settings.sync() self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks') def test_defaults(self): self.assertIsNone(self.settings.value('testqgissettings/name')) self.addToDefaults('testqgissettings/name', 'qgisrocks') self.assertEqual(self.settings.value('testqgissettings/name'), 'qgisrocks') def test_allkeys(self): self.assertEqual(self.settings.allKeys(), []) self.addToDefaults('testqgissettings/name', 'qgisrocks') self.addToDefaults('testqgissettings/name2', 'qgisrocks2') self.settings.setValue('nepoti/eman', 'osaple') self.assertEqual(3, len(self.settings.allKeys())) self.assertIn('testqgissettings/name', self.settings.allKeys()) self.assertIn('nepoti/eman', self.settings.allKeys()) self.assertEqual('qgisrocks', self.settings.value('testqgissettings/name')) self.assertEqual('qgisrocks2', self.settings.value('testqgissettings/name2')) self.assertEqual('qgisrocks', self.globalsettings.value('testqgissettings/name')) self.assertEqual('osaple', self.settings.value('nepoti/eman')) self.assertEqual(3, len(self.settings.allKeys())) self.assertEqual(2, len(self.globalsettings.allKeys())) def test_precedence_simple(self): self.assertEqual(self.settings.allKeys(), []) self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1') self.settings.setValue('testqgissettings/names/name1', 'qgisrocks-1') self.assertEqual(self.settings.value('testqgissettings/names/name1'), 'qgisrocks-1') def test_precedence_group(self): """Test if user can override a group value""" self.assertEqual(self.settings.allKeys(), []) self.addGroupToDefaults('connections-xyz', { 'OSM': 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png', 'OSM-b': 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png', }) self.settings.beginGroup('connections-xyz') self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png') self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png') self.settings.endGroup() # Override edit self.settings.beginGroup('connections-xyz') self.settings.setValue('OSM', 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png') self.settings.endGroup() # Check it again! self.settings.beginGroup('connections-xyz') self.assertEqual(self.settings.value('OSM'), 'http://c.tile.openstreetmap.org/{z}/{x}/{y}.png') self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png') self.settings.endGroup() # Override remove: the global valu
e will be resumed!!! self.settings.beginGroup('connections-xyz') self.settings.remove('OSM') self.settin
gs.endGroup() # Check it again! self.settings.beginGroup('connections-xyz') self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png') self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png') self.settings.endGroup() # Override remove: store a blank! self.settings.beginGroup('connections-xyz') self.settings.setValue('OSM', '') self.settings.endGroup() # Check it again! self.settings.beginGroup('connections-xyz') self.assertEqual(self.settings.value('OSM'), '') self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png') self.settings.endGroup() # Override remove: store a None: will resume the global setting! self.settings.beginGroup('connections-xyz') self.settings.setValue('OSM', None) self.settings.endGroup() # Check it again! self.settings.beginGroup('connections-xyz') self.assertEqual(self.settings.value('OSM'), 'http://a.tile.openstreetmap.org/{z}/{x}/{y}.png') self.assertEqual(self.settings.value('OSM-b'), 'http://b.tile.openstreetmap.org/{z}/{x}/{y}.png') self.settings.endGroup() def test_uft8(self): self.assertEqual(self.settings.allKeys(), []) self.addToDefaults('testqgissettings/names/namèé↓1', 'qgisrocks↓1') self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓1') self.settings.setValue('testqgissettings/names/namèé↓2', 'qgisrocks↓2') self.assertEqual(self.settings.value('testqgissettings/names/namèé↓2'), 'qgisrocks↓2') self.settings.setValue('testqgissettings/names/namèé↓1', 'qgisrocks↓-1') self.assertEqual(self.settings.value('testqgissettings/names/namèé↓1'), 'qgisrocks↓-1') def test_groups(self): self.assertEqual(self.settings.allKeys(), []) self.addToDefaults('testqgissettings/names/name1', 'qgisrocks1') self.addToDefaults('testqgissettings/names/name2', 'qgisrocks2') self.addToDefaults('testqgissettings/names/name3', 'qgisrocks3') self.addToDefaults('testqgissettings/name', 'qgisrocks') self.settings.beginGroup('testqgissettings') self.assertEqual(self.settings.group(), 'testqgissettings') self.assertEqual(['names'], self.settings.childGroups()) self.settings.setValue('surnames/name1', 'qgisrocks-1') self.assertEqual(['surnames', 'names'], self.settings.childGroups()) self.settings.setValue('names/name1', 'qgisrocks-1') self.assertEqual('qgisrocks-1', self.settings.value('names/name1')) self.settings.endGroup() self.assertEqual(self.settings.group(), '') self.settings.beginGroup('testqgissettings/names') self.assertEqual(self.settings.group(), 'testqgissettings/names') self.settings.setValue('name4', 'qgisrocks-4') keys = sorted(self.settings.childKeys()) self.assertEqual(keys, ['name1', 'name2', 'name3', 'name4']) self.settings.endGroup() self.assertEqual(self.settings.group(), '') self.assertEqual('qgisrocks-1', self.settings.value('testqgissettings/names/name1')) self.assertEqual('qgisrocks-4', self.settings.value('testqgissettings/names/name4')) def test_global_groups(self): self.assertEqual(self.settings.allKeys(), []) self.assertEqual(self.globalsettings.allKeys(), []) self.addToDefaults('testqgissettings/foo/first', 'qgis') self.addToDefaults('testqgissettings/foo/last', 'rocks') self.settings.beginGroup('testqgissettings') self.assertEqual(self.settings.group(), 'testqgissettings') self.assertEqual(['foo'], self.settings.childGroups()) self.assertEqual(['foo'], self.settings.globalChildGroups()) self.settings.endGroup() self.assertEqual(self.settings.group(), '') self.settings.setValue('testqgissettings/bar/first', 'qgis') self.settings.setValue('testqgissettings/bar/last', 'rocks') self.settings.beginGroup('testqgissettings') self.assertEqual(sorted(['bar', 'foo']), sorted(self.settings.childGroups())) self.assertEqual(['foo'], self.settings.globalChildGroups()) self.settings.endGroup() self.globalsettings.remove('testqgissettings/foo') self.settings.beginGroup('testqgissettings') self.assertEqual(['bar'], self.settings.childGroups()) self.assertEqual([], self.settings.globalChildGroups()) self.settings.endGroup() def test_group_section(self): # Test group by using Section self.settings.beginGroup('firstgroup', section=QgsSettings.Core) self.assertEqual(self.settings.group(), 'core/firstgroup') self.assertEqual([], self.settings.childGroups()) self.settings.setValue('key', 'value') self.settings.setValue('key2/subkey1', 'subvalue1') self.settings.setValue('key2/subkey2', 'subvalue2') self.settings.setValue('key3', 'val
s import ( compat_urllib_request, compat_urllib_error, ContentTooShortError, encodeFilename, sanitize_open, format_bytes, ) class HttpFD(FileDownloader): _TEST_FILE_SIZE = 10241 def real_download(self, filename, info_dict): url = info_dict['url'] tmpfilename = self.temp_name(filename) stream = None # Do not include the Accept-Encoding header headers = {'Youtubedl-no-compression': 'True'} if 'user_agent' in info_dict: headers['Youtubedl-user-agent'] = info_dict['user_agent'] if 'http_referer' in info_dict: headers['Referer'] = info_dict['http_referer'] basic_request = compat_urllib_request.Request(url, None, headers) request = compat_urllib_request.Request(url, None, headers) is_test = self.params.get('test', False) if is_test: request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1)) # Establish possible resume length if os.path.isfile(encodeFilename(tmpfilename)): resume_len = os.path.getsize(encodeFilename(tmpfilename)) else: resume_len = 0 open_mode = 'wb' if resume_len != 0: if self.params.get('continuedl', False): self.report_resuming_byte(resume_len) request.add_header('Range', 'bytes=%d-' % resume_len) open_mode = 'ab' else: resume_len = 0 count = 0 retries = self.params.get('retries', 0) while count <= retries: # Establish connection try: data = self.ydl.urlopen(request) break except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416: # Unexpected HTTP error raise elif err.code == 416: # Unable to resume (requested range not satisfiable) try:
# Open the connection again without the range header data = self.ydl.urlopen(basic_request) content_length = data.info()['Content-Length'] except (compat_urllib_error.HTTPError, ) as err: if err.code < 500 or err.code >= 600: raise else: # Examine the reported length if (content_length is not None and (resume_len - 100 < int(content_length) < resume_len + 100)): # The file had already been fully downloaded. # Explanation to the above condition: in issue #175 it was revealed that # YouTube sometimes adds or removes a few bytes from the end of the file, # changing the file size slightly and causing problems for some users. So # I decided to implement a suggested change and consider the file # completely downloaded if the file size differs less than 100 bytes from # the one in the hard drive. self.report_file_already_downloaded(filename) self.try_rename(tmpfilename, filename) self._hook_progress({ 'filename': filename, 'status': 'finished', }) return True else: # The length does not match, we start the download over self.report_unable_to_resume() resume_len = 0 open_mode = 'wb' break # Retry count += 1 if count <= retries: self.report_retry(count, retries) if count > retries: self.report_error(u'giving up after %s retries' % retries) return False data_len = data.info().get('Content-length', None) # Range HTTP header may be ignored/unsupported by a webserver # (e.g. extractor/scivee.py, extractor/bambuser.py). # However, for a test we still would like to download just a piece of a file. # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control # block size when downloading a file. if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE): data_len = self._TEST_FILE_SIZE if data_len is not None: data_len = int(data_len) + resume_len min_data_len = self.params.get("min_filesize", None) max_data_len = self.params.get("max_filesize", None) if min_data_len is not None and data_len < min_data_len: self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) return False if max_data_len is not None and data_len > max_data_len: self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) return False data_len_str = format_bytes(data_len) byte_counter = 0 + resume_len block_size = self.params.get('buffersize', 1024) start = time.time() while True: # Download and write before = time.time() data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter)) after = time.time() if len(data_block) == 0: break byte_counter += len(data_block) # Open file just in time if stream is None: try: (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) assert stream is not None filename = self.undo_temp_name(tmpfilename) self.report_destination(filename) except (OSError, IOError) as err: self.report_error(u'unable to open for writing: %s' % str(err)) return False try: stream.write(data_block) except (IOError, OSError) as err: self.to_stderr(u"\n") self.report_error(u'unable to write data: %s' % str(err)) return False if not self.params.get('noresizebuffer', False): block_size = self.best_block_size(after - before, len(data_block)) # Progress message speed = self.calc_speed(start, time.time(), byte_counter - resume_len) if data_len is None: eta = percent = None else: percent = self.calc_percent(byte_counter, data_len) eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) self.report_progress(percent, data_len_str, speed, eta) self._hook_progress({ 'downloaded_bytes': byte_counter, 'total_bytes': data_len, 'tmpfilename': tmpfilename, 'filename': filename, 'status': 'downloading', 'eta': eta, 'speed': speed, }) if is_test and byte_counter == data_len: break # Apply rate limit self.slow_down(start, byte_counter - resume_len) if stream is None: self.to_stderr(u"\n") self.report_error(u'Did not get any data blocks') return False stream.close() self.report_finish(data_len_str, (time.time() - start)) if data_len is not None and byte_counter != data_len: raise ContentTooShortError(byte_counter, int(data_len)) self.try_rename(tmpfilename,
from jx_elasticsearch.es52.painless._utils import Painless, LIST_TO_PIPE from
jx_elasticsearch.es52.painless.add_op import AddOp from jx_elasticsearch.es52.painless.and_op import AndOp from jx_elasticsearch.es52.painless.basic_add_op import BasicAddOp from jx_elasticsearch.es52.painless.basic_eq_op import BasicEqOp from jx_elasticsearch.es52.painless.basic_index_of_op import BasicIndexOfOp from jx_elasticsearch.es52.painless.basic_mul_op import BasicMulOp from jx_elasticsearch.es52.painless.basic_starts_with_op import BasicStartsWithOp from jx_elast
icsearch.es52.painless.basic_substring_op import BasicSubstringOp from jx_elasticsearch.es52.painless.boolean_op import BooleanOp from jx_elasticsearch.es52.painless.case_op import CaseOp from jx_elasticsearch.es52.painless.coalesce_op import CoalesceOp from jx_elasticsearch.es52.painless.concat_op import ConcatOp from jx_elasticsearch.es52.painless.count_op import CountOp from jx_elasticsearch.es52.painless.date_op import DateOp from jx_elasticsearch.es52.painless.div_op import DivOp from jx_elasticsearch.es52.painless.eq_op import EqOp from jx_elasticsearch.es52.painless.es_script import EsScript from jx_elasticsearch.es52.painless.exists_op import ExistsOp from jx_elasticsearch.es52.painless.exp_op import ExpOp from jx_elasticsearch.es52.painless.find_op import FindOp from jx_elasticsearch.es52.painless.first_op import FirstOp from jx_elasticsearch.es52.painless.floor_op import FloorOp from jx_elasticsearch.es52.painless.gt_op import GtOp from jx_elasticsearch.es52.painless.gte_op import GteOp from jx_elasticsearch.es52.painless.in_op import InOp from jx_elasticsearch.es52.painless.integer_op import IntegerOp from jx_elasticsearch.es52.painless.is_number_op import IsNumberOp from jx_elasticsearch.es52.painless.leaves_op import LeavesOp from jx_elasticsearch.es52.painless.length_op import LengthOp from jx_elasticsearch.es52.painless.literal import Literal from jx_elasticsearch.es52.painless.lt_op import LtOp from jx_elasticsearch.es52.painless.lte_op import LteOp from jx_elasticsearch.es52.painless.max_op import MaxOp from jx_elasticsearch.es52.painless.min_op import MinOp from jx_elasticsearch.es52.painless.missing_op import MissingOp from jx_elasticsearch.es52.painless.mod_op import ModOp from jx_elasticsearch.es52.painless.mul_op import MulOp from jx_elasticsearch.es52.painless.ne_op import NeOp from jx_elasticsearch.es52.painless.not_left_op import NotLeftOp from jx_elasticsearch.es52.painless.not_op import NotOp from jx_elasticsearch.es52.painless.number_op import NumberOp from jx_elasticsearch.es52.painless.or_op import OrOp from jx_elasticsearch.es52.painless.prefix_op import PrefixOp from jx_elasticsearch.es52.painless.string_op import StringOp from jx_elasticsearch.es52.painless.sub_op import SubOp from jx_elasticsearch.es52.painless.suffix_op import SuffixOp from jx_elasticsearch.es52.painless.tuple_op import TupleOp from jx_elasticsearch.es52.painless.union_op import UnionOp from jx_elasticsearch.es52.painless.variable import Variable from jx_elasticsearch.es52.painless.when_op import WhenOp from jx_elasticsearch.es52.painless.false_op import FalseOp, false_script from jx_elasticsearch.es52.painless.true_op import TrueOp, true_script from jx_elasticsearch.es52.painless.null_op import NullOp, null_script Painless.register_ops(vars())
# Building inheritance class MITPerson(Person): nextIdNum = 0 #next ID number to assing def __init__(self, name): Person.__init__(self, name) #initialize Person attributes # new MITPerson atrribute: a unique ID number self.idNum = MITPerson.nextIdNum MITPerson.nextIdNum += 1 def getIdNum(self): return self.idNum def __It__(self, other): return self.idNum < other.idNum class Student(MITPerson): pass class UG(Student): #UG = under graduate ###-----
- def __init__(self, name, classYear): MITPerson.__init__(self, name) self.year = classYear def getClass(self): # getter
method return self.year class Grad(Student): ##---- pass class TransferStudent(Student): pass def isStudent(obj): return isinstance(obj, Student)