prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
def Setup(Settings,DefaultModel):
# set1-test_of_models_against_datasets/osm299.py
Settings["experiment_name"] = "set1_Mix_model_versus_datasets_299px"
Settings["graph_histories"] = ['together'] #['all','together',[],[1,0],[0,0,0],[]]
# 5556x_minlen30_640px 5556x_minlen20_640px 5556x_reslen20_299px 5556x_reslen30_299px
n=0
Settings["models"][n]["dataset_name"] = "5556x_reslen30_299px"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix_minlen30_299px'
Settings["models"][n]["top_repeat_FC_block"] | = 2
Settings["models"][n]["epochs"] = 800
Settings["models"].append(DefaultModel.copy())
n+=1
Settings["models"][n]["dataset_pointer"] = -1
Settings["models"][n]["dataset_name"] = "5556x_reslen20_299px"
Settings["models"][n]["dump_ | file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix_minlen20_299px'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
Settings["models"].append(DefaultModel.copy())
n+=1
Settings["models"][n]["dataset_pointer"] = -1
Settings["models"][n]["dataset_name"] = "5556x_mark_res_299x299"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'mix_nosplit_299px'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 800
return Settings
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from gui.dwidgets import DMenu
class SettingsMenu(DMenu):
"""docstring for SettingsMenu"""
def __init__(self, parent=None):
super(SettingsMenu, self).__init__(parent)
self.parent = parent
self.menuItems = [
{
'name': self.tr('Login'),
'icon': u'',
'shortcut': u'',
'trigger': 'Login',
},
{
'name': self.tr('Show suspension window'),
'icon': u'',
'shortcut': u'',
'trigger': 'Suspension',
},
{
'name': self.tr('Show float window'),
'icon': u'',
'shortcut': u'',
'trigger': 'Float',
},
{
'name': self.tr('Show Dock window'),
'icon': u'',
'shortcut': u'',
'trigger': 'Dock',
},
{
'name': self.tr('Language'),
'trigger': 'Language',
'type': 'submenu',
'actions': [
{
'name': 'English',
'icon': u'',
'shortcut': u'',
'trigger': 'English',
"checkable": True
},
{
'name': 'Chinese',
'icon': u'',
'shortcut': u'',
'trigger': 'Chinese',
"checkable": True
| },
]
},
{
'name': self.tr('Document'),
'trigger': 'Document',
'type': 'submenu',
'actions': [
| {
'name': 'Android developer guide',
'icon': u'',
'shortcut': u'',
'trigger': 'AndroidDeveloper',
"checkable": False
},
{
'name': 'iOS developer guide',
'icon': u'',
'shortcut': u'',
'trigger': 'IOSDeveloper',
"checkable": False
},
{
'name': 'Ford developer center',
'icon': u'',
'shortcut': u'',
'trigger': 'FordDeveloper',
"checkable": False
},
]
},
{
'name': self.tr('ObjectView'),
'icon': u'',
'shortcut': u'',
'trigger': 'ObjectView',
},
{
'name': self.tr('About'),
'icon': u'',
'shortcut': u'Qt.Key_F12',
'trigger': 'About',
},
{
'name': self.tr('Exit'),
'icon': u'',
'shortcut': u'',
'trigger': 'Exit',
},
]
self.creatMenus(self.menuItems)
self.initConnect()
getattr(self, '%sAction' % 'English').setChecked(True)
def initConnect(self):
for item in ['English', 'Chinese']:
getattr(self, '%sAction' % item).triggered.connect(self.updateChecked)
def updateChecked(self):
for item in ['English', 'Chinese']:
action = getattr(self, '%sAction' % item)
if self.sender() is action:
action.setChecked(True)
else:
action.setChecked(False)
|
import sys
import os
import os.path
from jinja2 import Template
from ConfigParser import ConfigParser
import StringIO
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: <program> <deploy_cfg_template_file> <file_with_properties>")
print("Properties from <file_with_properties> will be applied to <deploy_cfg_template_file>")
print("template which will be overwritten with .orig copy saved in the same folder first.")
sys.exit(1)
file = open(sys.argv[1], 'r')
text = file.read()
t = Template(text)
config = ConfigParser()
if os.path.isfile(sys.argv[2]):
config.read(sys.argv[2])
elif "KBASE_ENDPOINT" in os.environ:
kbase_endpoint = os.environ.get("KBASE_ENDPOINT")
props = "[global]\n" + \
"kbase_endpoint = " + kbase_endpoint + "\n" + \
"job_service_url = " + kbase_endpoint + "/userandjobstate\n" + \
"workspace_url = " + kbase_endpoint + "/ws\n" + \
"shock_url = " + kbase_endpoint + "/shock-api\n" + \
"handle_url = " + kbase_endpoint + "/handle_service\n" + \
"srv_wiz_url = " + kbase_endpoint + "/service_wizard\n" + \
"njsw_url = " + kbase_endpoint + "/njs_wrapper\n"
if "AUTH_SERVICE_URL" in os.environ:
props += "auth_service_url = " + os.environ.get("AUTH_SERVICE_URL") + "\n"
elif "auth2services" in kbase_endpoint:
props += "auth_service_url = " + kbase_endpoint + "/auth/api/legacy/KBase/Sessions/Login\n"
props += "auth_service_url_allow_insecure = " + \
os.environ.get("AUTH_SERVICE_URL_ALLOW_INSEC | URE", "false") + "\n"
config.readfp(StringIO.StringIO(props))
else:
raise ValueError('Neither ' + sys.argv[2] + ' file nor KBASE_ENDPOINT env-variable found')
props = dict(config.items( | "global"))
output = t.render(props)
with open(sys.argv[1] + ".orig", 'w') as f:
f.write(text)
with open(sys.argv[1], 'w') as f:
f.write(output)
|
# typeclasses, an educational implementation of Haskell-style type
# classes, in Python
#
# Copyright (C) 2010 Nicolas Trangez <eikke eikke com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, version 2.1
# of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
'''Some demonstrations of the Eq typeclass and its `eq` and `ne` functions'''
from typeclasses.eq import eq, ne
import typeclasses.instances.list
import typeclasses.instances.tuple
from typeclasses.instances.maybe import Just, Nothing
from typeclasses.instances.tree import Branch, Leaf
# | List
assert eq([1, 2, 3], [1, 2, 3])
assert ne([0, 1, 2], [1, 2, 3])
# Tuple
assert eq((1, 2, 3, ), (1, 2, 3, ))
assert ne((0, 1, 2, ), (1, 2, 3, ))
# Maybe
assert eq(Nothing, Nothing)
assert eq(Just(1), Just(1))
assert ne(Just(1), Just(2))
assert ne(Just(1), Nothing)
# Tree
assert eq(Branch(B | ranch(Leaf(0), Leaf(1)), Leaf(2)),
Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)))
assert ne(Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)),
Branch(Branch(Leaf(0), Leaf(1)), Branch(Leaf(2), Leaf(3))))
|
from __future__ import absolute_import
import sys
from kombu.transport import amqplib
from kombu.connection import BrokerConnection
from kombu.tests.utils import TestCase
from kombu.tests.utils import mask_modules, Mock
class MockConnection(dict):
def __setattr__(self, key, value):
self[key] = value
class Channel(amqplib.Channel):
wait_returns = []
def _x_open(self, *args, **kwargs):
pass
def wait(self, *args, **kwargs):
return self.wait_returns
def _send_method(self, *args, **kwargs):
pass
class test_Channel(TestCase):
def setUp(self):
self.conn = Mock()
self.conn.channels = {}
self.channel = Channel(self.conn, 0)
def test_init(self):
self.assertFalse(self.channel.no_ack_consumers)
def test_prepare_message(self):
x = self.channel.prepare_message("foobar", 10,
"application/data", "utf-8",
properties={})
self.assertTrue(x)
def test_message_to_python(self):
message = Mock()
message.headers = {}
message.properties = {}
self.assertTrue(self.channel.message_to_python(message))
def test_close_resolves_connection_cycle(self):
self.assertIsNotNone(self.channel.connection)
self.channel.close()
self.assertIsNone(self.channel.connection)
def test_basic_consume_registers_ack_status(self):
self.channel.wait_returns = "my-consumer-tag"
self.channel.basic_consume("foo", no_ack=True)
self.assertIn("my-consumer-tag", self.channel.no_ack_consumers)
self.channel.wait_returns = "other-consumer-tag"
self.channel.basic_consume("bar", no_ack=False)
self.assertNotIn("other-consumer-tag", self.channel.no_ack_consumers)
self.channel.basic_cancel("my-consumer-tag")
self.assertNotIn("my-consumer-tag", self.channel.no_ack_consumers)
class test_Transport(TestCase):
def setUp(self):
self.connection = BrokerConnection("amqplib://")
self.transport = self.connection.transport
def test_create_channel(self):
connection = Mock()
self.transport.create_channel(connection)
connection.channel.assert_called_with()
def test_drain_events(self):
connection = Mock()
self.transport.drain_events(connection, timeout=10.0)
connection.drain_events.assert_called_with(timeout=10.0)
def test_dnspython_localhost_resolve_bug(self):
class Conn(object):
def __init__(self, **kwargs):
vars(self).update(kwargs)
self.transport.Connection = Conn
self.transport.client.hostname = "localhost"
conn1 = self.transport.establish_connection()
self.assertEqual(conn1.host, "127.0.0.1:5672")
self.transport.client.hostname = "example.com"
conn2 = self.transport.establish_connection()
self.assertEqual(conn2.host, "example.com:5672")
def test_close_connection(self):
connection = Mock()
connection.client = Mock()
self.transport.close_connection(connection)
self.assertIsNone(connection.client)
connection.close.assert_called_with()
def test_verify_connection(self):
connection = Mock()
connection.channels = None
self.assertFalse(self.transport.verify_connection(connection))
connection.channels = {1: 1, 2: 2}
self.assertTrue(self.transport.verify_connection(connection))
@mask_modules("ssl")
def test_import_no_ssl(self):
pm = sys.modules.pop("kombu.transport.amqplib")
try:
from kombu.transport.amqplib import SSLError
self.assertEqual(SSLError.__module__, "kombu.transport.amqplib")
finally:
if pm is not None:
sys.modules["kombu.transport.amqplib"] = pm
clas | s test_amqplib(TestCase):
def test_default_port(self):
class Transport(amqplib.Transport):
Connection = MockConnection
c = BrokerConnection(port=None, transport=Transport).connect()
self.assertEqual(c["host"],
"127.0.0.1:%s" % (Transport.default_port, ))
def test_custom_port(self):
class Transport(amqplib.Transport):
Connection = M | ockConnection
c = BrokerConnection(port=1337, transport=Transport).connect()
self.assertEqual(c["host"], "127.0.0.1:1337")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
fabfile.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
try:
f | rom config import MACHINES
except ImportError as e:
print "You should cp config.py.sample | config.py, and modify it then"
raise e
for node in MACHINES['slave']:
print node
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2010 - 2011, University of New Orleans
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# --
#
# | Heatmap generator.
#
# This application could be used like this, too:
#
# python generate_rpy_heatmap.py -a ABUNDANCE_FILE -p 10 -m 1 --row-text-size=1.5 --column-text-size=1 --margin-right=20 --margin-bottom=10 --width=1000 --height=1200 -l
#
import os
import sys
import math
import numpy
import numpy.core.numeric
from rpy2 import robjects
import rpy2.robjects.numpy2ri
from rpy2.robjects.packages imp | ort importr
from optparse import OptionParser
def main(options, analyses_dir = ''):
if len(analyses_dir):
sample_color_map_file = os.path.join(analyses_dir, options.sample_color_map_file)
abundance_file = os.path.join(analyses_dir, options.abundance_file)
options.output_file = os.path.join(analyses_dir, options.output_file)
else:
abundance_file = options.abundance_file
sample_color_map_file = options.sample_color_map_file
#first line of the abundance file to see what sample names are
col_names = open(abundance_file).readline().strip().split("\t")[1:]
#creating an entry for every bacterium in the abundance file
row_names_non_scaled = []
exprs_non_scaled = []
row_names = []
exprs = []
for line in open(abundance_file).readlines()[1:]:
row_names_non_scaled.append(line.strip().split("\t")[0])
exprs_non_scaled.append(map(float, line.strip().split("\t")[1:]))
for i in range(0, len(row_names_non_scaled)):
if sum(exprs_non_scaled[i]) > options.min_percentage and len([x for x in exprs_non_scaled[i] if x > 0.0]) > options.min_present:
if options.log:
exprs.append([math.log10(x + 1) for x in exprs_non_scaled[i]])
row_names.append(row_names_non_scaled[i])
else:
exprs.append(exprs_non_scaled[i])
row_names.append(row_names_non_scaled[i])
else:
print "* Discarding '%s' (total percentage: %f, present in %d sample(s))." % (row_names_non_scaled[i], sum(exprs_non_scaled[i]), len([x for x in exprs_non_scaled[i] if x > 0.0]))
print "\n%i samples, %i bacteria\n" % (len(col_names), len(row_names))
data_matrix = numpy.array(exprs)
sample_color_map = {}
if sample_color_map_file:
for sample, desc, color in [x.strip().split('\t') for x in open(sample_color_map_file).readlines() if len(x.strip().split('\t')) == 3]:
sample_color_map[sample] = {'description': desc, 'color': color}
def sample_colour(sample_id):
if sample_color_map.has_key(sample_id):
return sample_color_map[sample_id]['color']
else:
return '#FFFFFF'
if len(col_names) < 2 or len(row_names) < 2:
raise Exception, "Number of columns or rows can't be smaller than 2 in a heatmap (you might have enetered some criteria that eliminates all OTU's or samples)."
#bioDist = importr('bioDist')
generate_heatmap(options, col_names, row_names, data_matrix, sample_colours = map(sample_colour, col_names))#,dist_func=bioDist.spearman_dist
return
def generate_heatmap(options, col_names, row_names, data_matrix, sample_colours, dist_func=robjects.r.dist):
robjects.r.library('gplots')
grdevices = importr('grDevices')
h = options.height or len(row_names) * 25
if h < 400:
h = 400
w = options.width or len(col_names) * 20
if w < 500:
w = 500
grdevices.png(options.output_file, width=w, height=h)
robjects.r('heatmap.2')(data_matrix,
labRow=row_names,
scale=options.scale,
labCol=col_names,
ColSideColors=robjects.StrVector(sample_colours),
col=robjects.r('redgreen')(100),
distfun=dist_func,
key=True,
symkey=False,
density_info="none",
trace="none",
margins=robjects.r.c(options.margin_bottom, options.margin_right), # margin right and bottom
cexRow=options.cexRow, # Y axis text size
cexCol=options.cexCol) # X axis text size
grdevices.dev_off()
return
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-a", "--abundance-file", dest="abundance_file",
type="string", help="abundance file name", metavar="FILE")
parser.add_option("-c", "--sample-color-map-file", dest="sample_color_map_file",
type="string", help="sample color map file. every line should have three columns: SAMPLE ONE_WORD_SAMPLE_DESCRIPTION COLOR", metavar="FILE")
parser.add_option("-o", "--output-file", dest="output_file", default="heatmap.png",
help="file name for the PNG", metavar="FILE")
parser.add_option("-s", "--scale", dest="scale", default="column",
help="scale either columns or rows", metavar="[row|column]")
parser.add_option("-m", "--min-percentage", dest="min_percentage", type="float", default=0.0,
help="minimum total percentage of a bug in all samples (can be bigger than 100%%)")
parser.add_option("-p", "--min-present", dest="min_present", type="int", default=0,
help="minimum total number of samples have this bug (if it present in less than --min-present samples, bug would be discarded)")
parser.add_option("-l", "--log", dest="log", default=False, action="store_true",
help="apply log10 to abundance percentages (log(abundance percentage + 1))")
parser.add_option("--width", type="int", dest="width", default=0,
help="width of the heatmap image (pixels)")
parser.add_option("--height", type="int", dest="height", default=0,
help="height of the heatmap image (pixels)")
parser.add_option("--margin-right", type="int", dest="margin_right", default=20,
help="text area between the map and the right side of the image")
parser.add_option("--margin-bottom", type="int", dest="margin_bottom", default=10,
help="text area between the map and the bottom of the image")
parser.add_option("--row-text-size", type="float", dest="cexRow", default=1.5,
help="row text size")
parser.add_option("--column-text-size", type="float", dest="cexCol", default=1,
help="column text size")
(options, args) = parser.parse_args()
if options.abundance_file == None:
print "Error: You need to provide an input file (percent or total count abundance file)\n"
parser.print_help()
sys.exit(2)
main(options)
|
from .components_generic import System
from .ports | _library import FluidPort, MechPort, CustomPort
class Duct(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.params['fl_in | '] = FluidPort()
self.outputs['fl_out'] = FluidPort()
self.set()
self.cst_loss = 0.99
def run(self):
self.fl_out.Pt = self.cst_loss*self.fl_in.Pt
self.fl_out.W = self.fl_in.W
self.fl_out.Tt = self.fl_in.Tt
class Splitter(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.params['fl_in'] = FluidPort()
self.outputs['fl1_out'] = FluidPort()
self.outputs['fl2_out'] = FluidPort()
self.set()
self.split_ratio = 0.99
def run(self):
self.fl1_out.Pt = self.fl_in.Pt
self.fl2_out.Pt = self.fl_in.Pt
self.fl1_out.Tt = self.fl_in.Tt
self.fl2_out.Tt = self.fl_in.Tt
self.fl1_out.W = self.fl_in.W*self.split_ratio
self.fl2_out.W = self.fl_in.W*(1-self.split_ratio)
class Merger(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.params['fl1_in'] = FluidPort()
self.params['fl2_in'] = FluidPort()
self.outputs['fl_out'] = FluidPort()
self.set()
def run(self):
self.fl_out.Pt = self.fl1_in.Pt
self.fl_out.Tt = self.fl1_in.Tt
self.fl_out.W = self.fl1_in.W+self.fl2_in.W
class Atm(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.outputs['fl_out'] = FluidPort()
self.outputs['fl_out'].remove('W')
self.set()
def run(self):
self.fl_out.Pt = 101325
self.fl_out.Tt = 273.15
class Inlet(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.params['fl_in'] = FluidPort()
self.params['fl_in'].remove('W')
self.outputs['fl_out'] = FluidPort()
self.add_port('W_in', CustomPort({'W': 100}), 'in')
self.set()
def run(self):
self.fl_out.Pt = self.fl_in.Pt*0.995
self.fl_out.Tt = self.fl_in.Tt
self.fl_out.W = self.W_in.W
class Fan(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.params['fl_in'] = FluidPort()
self.params['mech_in'] = MechPort()
self.outputs['fl_out'] = FluidPort()
self.add_port('gh_in', CustomPort({'gh': 0.1}), 'in')
self.set()
#self.residuals['Wfan'] = self.fl_out.W / self.fl_in.W - 1
def run(self):
fl_in = self.fl_in
fl_out = self.fl_out
try:
self.PCNR = self.mech_in.XN / (fl_in.Tt/288.15)**0.5
except:
self.PCNR = 0.
fl_out.Pt = fl_in.Pt*(0.01*(self.PCNR+self.gh_in.gh)+1)
fl_out.Tt = fl_in.Tt*(fl_out.Pt/fl_in.Pt)**(1-1/1.4)
try:
fl_out.W = 2*(1-self.gh_in.gh)*self.PCNR/(fl_in.Tt/288.15)**0.5*(fl_in.Pt/101325.)
except:
fl_out.W = 0.
if fl_in.W < 1e-5:
self.residuals['Wfan'] = fl_out.W - fl_in.W
else:
self.residuals['Wfan'] = fl_out.W / fl_in.W - 1
if self.mech_in.PW < 1e-5:
self.residuals['PWfan'] = (fl_out.W*1004*(fl_out.Tt - fl_in.Tt)) - self.mech_in.PW
else:
self.residuals['PWfan'] = (fl_out.W*1004*(fl_out.Tt-fl_in.Tt))/self.mech_in.PW-1
class Nozzle(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.params['fl_in'] = FluidPort()
self.set()
self.residuals = {'WRnozzle': 0.}
self.Acol = 0.4
self.Aexit = 0.5
def run(self):
fl = self.fl_in
try:
Qwr = fl.W*(fl.Tt/288.15)**0.5/(fl.Pt/101325)/241./self.Acol-1
except:
Qwr = fl.W*(fl.Tt/288.15)**0.5/(fl.Pt/101325)/241. - self.Acol
self.residuals['WRnozzle'] = Qwr
class FanComplex(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.add(DuctComplex('duct'))
self.add(Fan('fan'))
self.fan.params['fl_in'] = self.duct.bleed.outputs['fl1_out']
self.set()
self.version = 1.
self.duct.cst_loss = 1.
def run(self):
pass
class DuctComplex(System):
def __init__(self, name='n/a'):
super().__init__()
self.name = name
self.add(Duct('duct'))
self.add(Merger('merger'))
self.add(Splitter('bleed'))
self.duct.params['fl_in'] = self.merger.outputs['fl_out']
self.bleed.params['fl_in'] = self.duct.outputs['fl_out']
self.merger.params['fl2_in'] = self.bleed.outputs['fl2_out']
self.set()
self.version = 1.
self.duct.cst_loss = 1.
def run(self):
pass
|
get('params', {})
params['capacity'] = capacity
self._vol_api_submit(vol_id, method='POST', action='grow',
params=params, **kwargs)
def _snap_api_submit(self, vol_id, snap_id, **kwargs):
vol_id = urllib.parse.quote(vol_id, '')
snap_id = urllib.parse.quote(snap_id, '')
rel_url = "/volumes/%s/snapshots/%s" % (vol_id, snap_id)
return self.client.submit(rel_url, **kwargs)
def _create_snapshot(self, vol_id, snap_id, params, **kwargs):
"""Execute a backend snapshot create operation."""
self._snap_api_submit(vol_id, snap_id, method='PUT',
params=params, **kwargs)
def _delete_snapshot(self, vol_id, snap_id, **kwargs):
"""Execute a backend snapshot delete operation."""
return self._snap_api_submit(vol_id, snap_id, method='DELETE',
**kwargs)
def _export_api_submit(self, vol_id, ini_name, **kwargs):
vol_id = urllib.parse.quote(vol_id, '')
ini_name = urllib.parse.quote(ini_name, '')
rel_url = "/volumes/%s/exports/%s" % (vol_id, ini_name)
return self.client.submit(rel_url, **kwargs)
def _create_export(self, vol_id, ini_name, params, **kwargs):
"""Execute a backend volume export operation."""
return self._export_api_submit(vol_id, ini_name, method='PUT',
params=params, **kwargs)
def _delete_export(self, vol_id, ini_name, **kwargs):
"""Remove a previously created volume export."""
self._export_api_submit(vol_id, ini_name, method='DELETE',
**kwargs)
def _get_pool_stats(self, pool, query, **kwargs):
"""Retrieve pool statistics and capabilities."""
pq = {
'pool': pool,
'query': query,
}
pq.update(kwargs)
return self.client.submit('/status', params=pq)
def _get_dbref_name(self, ref):
display_name = ref.get('display_name')
if not display_name:
return ref.get('name')
return display_name
def _get_query_string(self, ctxt, volume):
pools = self.configuration.blockbridge_pools
default_pool = self.configuration.blockbridge_default_pool
explicit_pool = volume_utils.extract_host(volume['host'], 'pool')
pool_name = explicit_pool or default_pool
if pool_name:
return pools[pool_name]
else:
# no pool specified or defaulted -- just pick whatever comes out of
# the dictionary first.
return list(pools.values())[0]
def create_volume(self, volume):
"""Create a volume on a Blockbridge EPS backend.
:param volume: volume reference
"""
ctxt = context.get_admin_context()
create_params = {
'name': self._get_dbref_name(volume),
'query': self._get_query_string(ctxt, volume),
'capacity': int(volume['size'] * units.Gi),
}
LOG.debug("Provisioning %(capacity)s byte volume "
"with query '%(query)s'", create_params, resource=volume)
return self._create_volume(volume['id'],
create_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
create_params = {
'name': self._get_dbref_name(volume),
'capacity': int(volume['size'] * units.Gi),
'src': {
'volume_id': src_vref['id'],
},
}
LOG.debug("Cloning source volume %(id)s", src_vref, resource=volume)
return self._create_volume(volume['id'],
create_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
def delete_volume(self, volume):
"""Remove an existing volume.
:param volume: volume reference
"""
LOG.debug("Removing volume %(id)s", volume, resource=volume)
return self._delete_volume(volume['id'],
user_id=volume['user_id'],
| project_id=volume['project_id'])
def create_snapshot(self, snapshot):
"""Create snapshot of existing volume.
:param snapshot: shapshot reference
"""
create_params = {
'name': self._get_dbref_name(snapshot),
}
| LOG.debug("Creating snapshot of volume %(volume_id)s", snapshot,
resource=snapshot)
return self._create_snapshot(snapshot['volume_id'],
snapshot['id'],
create_params,
user_id=snapshot['user_id'],
project_id=snapshot['project_id'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from existing snapshot.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
create_params = {
'name': self._get_dbref_name(volume),
'capacity': int(volume['size'] * units.Gi),
'src': {
'volume_id': snapshot['volume_id'],
'snapshot_id': snapshot['id'],
},
}
LOG.debug("Creating volume from snapshot %(id)s", snapshot,
resource=volume)
return self._create_volume(volume['id'],
create_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot.
:param snapshot: shapshot reference
"""
LOG.debug("Deleting snapshot of volume %(volume_id)s", snapshot,
resource=snapshot)
self._delete_snapshot(snapshot['volume_id'],
snapshot['id'],
user_id=snapshot['user_id'],
project_id=snapshot['project_id'])
def create_export(self, _ctx, volume, connector):
"""Do nothing: target created during instance attachment."""
pass
def ensure_export(self, _ctx, volume):
"""Do nothing: target created during instance attachment."""
pass
def remove_export(self, _ctx, volume):
"""Do nothing: target created during instance attachment."""
pass
def initialize_connection(self, volume, connector, **kwargs):
"""Attach volume to initiator/host.
Creates a profile for the initiator, and adds the new profile to the
target ACL.
"""
# generate a CHAP secret here -- there is no way to retrieve an
# existing CHAP secret over the Blockbridge API, so it must be
# supplied by the volume driver.
export_params = {
'chap_user': (
kwargs.get('user', volume_utils.generate_username(16))),
'chap_secret': (
kwargs.get('password', volume_utils.generate_password(32))),
}
LOG.debug("Configuring export for %(initiator)s", connector,
resource=volume)
rsp = self._create_export(volume['id'],
connector['initiator'],
export_params,
user_id=volume['user_id'],
project_id=volume['project_id'])
# combine locally generated chap credentials with target iqn/lun to
# present the attach properties.
target_portal = "%s:%s" % (rsp['target_ip'], rsp['target_port'])
properties = {
'target_discovered': False,
'target_portal': target_portal,
'target_iqn |
#
# Copyright (C) 2013 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=E1101
"""Status navlet"""
import simplejson
from datetime import datetime
from django.http import HttpResponse
from nav.django.utils import get_account
from nav.models.manage import Netbox
from nav.models.profiles import AccountNavlet
from nav.web.navlets import (Navlet, REFRESH_INTERVAL, NAVLET_MODE_VIEW,
NAVLET_MODE_EDIT)
from nav.web.webfront.utils import boxes_down
from nav.web.status.sections import get_user_sections
class StatusNavlet(Navlet):
"""Navlet for displaying status"""
title = "Status"
description = "Shows status for your ip-devices and services"
refresh_interval = 1000 * 60 * 10 # Refresh every 10 minutes
is_editable = True
def get_template_basename(self):
return "status"
def get(self, request, *args, **kwargs):
"""Fetch all status and display it to user"""
sections = get_user_sections(request.account)
problems = 0
for section in sections:
if section.history and section.devicehistory_type != 'a_boxDown':
problems += len(section.history)
context = self.get_context_data(**kwargs)
context['problems'] = problems
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(StatusNavlet, self).get_context_data(**kwargs)
if self.mode == NAVLET_MODE_VIEW:
down = boxes_down()
num_shadow = 0
for box in down:
if box.netbox.up == Netbox.UP_SHADOW:
num_shadow += 1
context['boxes_down'] = down
context['num_shadow'] = num_shadow
context['date_now'] = datetime.today()
elif self.mode == NAVLET_MODE_EDIT:
navlet = AccountNavlet.objects.get(pk=self.navlet_id)
if not navlet.pr | eferences:
# This happens when navlet is added directly in sql and no
# preference is set
navlet.preferences = {REFRESH_INTERVAL: self.refresh_interval}
navlet.save()
context['interval'] = navlet.preferences.get(
REFRESH_INTERVAL, self.refresh_interval | ) / 1000
return context
def post(self, request):
"""Save refresh interval for this widget"""
account = get_account(request)
try:
interval = int(request.POST.get('interval')) * 1000
except ValueError:
return HttpResponse(status=400)
try:
navlet = AccountNavlet.objects.get(pk=self.navlet_id,
account=account)
except AccountNavlet.DoesNotExist:
return HttpResponse(status=404)
else:
navlet.preferences[REFRESH_INTERVAL] = interval
navlet.save()
return HttpResponse(simplejson.dumps(navlet.preferences))
|
# ----------------------------------------------------------- | ---------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# ---------------------------------------------- | ----------------------------------------------
# AZURE CLI EventHub - NAMESPACE TEST DEFINITIONS
import time
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
class EHConsumerGroupCURDScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_eh_consumergroup')
def test_eh_consumergroup(self, resource_group):
self.kwargs.update({
'loc': 'westus2',
'rg': resource_group,
'namespacename': self.create_random_name(prefix='eventhubs-nscli', length=20),
'tags': {'tag1: value1', 'tag2: value2'},
'sku': 'Standard',
'tier': 'Standard',
'eventhubname': self.create_random_name(prefix='eventhubs-eventhubcli', length=25),
'isautoinflateenabled': 'True',
'maximumthroughputunits': 4,
'consumergroupname': self.create_random_name(prefix='clicg', length=20),
'usermetadata1': 'usermetadata',
'usermetadata2': 'usermetadata-updated'
})
# Create Namespace
self.cmd('eventhubs namespace create --resource-group {rg} --name {namespacename} --location {loc} --tags {tags} --sku {sku} --enable-auto-inflate {isautoinflateenabled} --maximum-throughput-units {maximumthroughputunits}',
checks=[self.check('sku.name', self.kwargs['sku'])])
# Get Created Namespace
self.cmd('eventhubs namespace show --resource-group {rg} --name {namespacename}', checks=[self.check('sku.name', self.kwargs['sku'])])
# Create Eventhub
self.cmd('eventhubs eventhub create --resource-group {rg} --namespace-name {namespacename} --name {eventhubname}', checks=[self.check('name', self.kwargs['eventhubname'])])
# Get Eventhub
self.cmd('eventhubs eventhub show --resource-group {rg} --namespace-name {namespacename} --name {eventhubname}', checks=[self.check('name', self.kwargs['eventhubname'])])
# Create ConsumerGroup
self.cmd('eventhubs eventhub consumer-group create --resource-group {rg} --namespace-name {namespacename} --eventhub-name {eventhubname} --name {consumergroupname} --user-metadata {usermetadata1}', checks=[self.check('name', self.kwargs['consumergroupname'])])
# Get Consumer Group
self.cmd('eventhubs eventhub consumer-group show --resource-group {rg} --namespace-name {namespacename} --eventhub-name {eventhubname} --name {consumergroupname}', checks=[self.check('name', self.kwargs['consumergroupname'])])
# Update ConsumerGroup
self.cmd('eventhubs eventhub consumer-group update --resource-group {rg} --namespace-name {namespacename} --eventhub-name {eventhubname} --name {consumergroupname} --user-metadata {usermetadata2}', checks=[self.check('userMetadata', self.kwargs['usermetadata2'])])
# Get ConsumerGroup List
listconsumergroup = self.cmd('eventhubs eventhub consumer-group list --resource-group {rg} --namespace-name {namespacename} --eventhub-name {eventhubname}').output
self.assertGreater(len(listconsumergroup), 0)
# Delete ConsumerGroup
self.cmd('eventhubs eventhub consumer-group delete --resource-group {rg} --namespace-name {namespacename} --eventhub-name {eventhubname} --name {consumergroupname}')
# Delete Eventhub
self.cmd('eventhubs eventhub delete --resource-group {rg} --namespace-name {namespacename} --name {eventhubname}')
# Delete Namespace
self.cmd('eventhubs namespace delete --resource-group {rg} --name {namespacename}')
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CR | EATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False |
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ('collectfast', )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('Blog API <noreply@djangocali.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[Blog API] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
|
, 'is_closed'):
closed = loop.is_closed()
else:
closed = loop._closed # XXX
if not closed:
loop.call_soon(loop.stop)
loop.run_forever()
loop.close()
@pytest.fixture(scope='session')
def unused_port():
"""Gets random free port."""
def fun():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
return s.getsockname()[1]
return fun
@pytest.fixture
def create_connection(_closable, loop):
"""Wrapper around aioredis.create_connection."""
async def f(*args, **kw):
kw.setdefault('loop', loop)
conn = await aioredis.create_connection(*args, **kw)
_closable(conn)
return conn
return f
@pytest.fixture(params=[
aioredis.create_redis,
aioredis.create_redis_pool],
ids=['single', 'pool'])
def create_redis(_closable, loop, request):
"""Wrapper around aioredis.create_redis."""
factory = request.param
async def f(*args, **kw):
kw.setdefault('loop', loop)
redis = await factory(*args, **kw)
_closable(redis)
return redis
return f
@pytest.fixture
def create_pool(_closable, loop):
"""Wrapper around aioredis.create_pool."""
async def f(*args, **kw):
kw.setdefault('loop', loop)
redis = await aioredis.create_pool(*args, **kw)
_closable(redis)
return redis
return f
@pytest.fixture
def create_sentinel(_closable, loop):
"""Helper instantiating RedisSentinel client."""
async def f(*args, **kw):
kw.setdefault('loop', loop)
# make it fail fast on slow CIs (if timeout argument is ommitted)
kw.setdefault('timeout', .001)
client = await aioredis.sentinel.create_sentinel(*args, **kw)
_closable(client)
return client
return f
@pytest.fixture
def pool(create_pool, server, loop):
"""Returns RedisPool instance."""
pool = loop.run_until_complete(
create_pool(server.tcp_address, loop=loop))
return pool
@pytest.fixture
def redis(create_redis, server, loop):
"""Returns Redis client instance."""
redis = loop.run_until_complete(
create_redis(server.tcp_address, loop=loop))
loop.run_until_complete(redis.flushall())
return redis
@pytest.fixture
def redis_sentinel(create_sentinel, sentinel, loop):
"""Returns Redis Sentinel client instance."""
redis_sentinel = loop.run_until_complete(
create_sentinel([sentinel.tcp_address], timeout=2, loop=loop))
assert loop.run_until_complete(redis_sentinel.ping()) == b'PONG'
return redis_sentinel
@pytest.yield_fixture
def _closable(loop):
conns = []
try:
yield conns.append
finally:
waiters = []
while conns:
conn = conns.pop(0)
conn.close()
waiters.append(conn.wait_closed())
if waiters:
loop.run_until_complete(asyncio.gather(*waiters, loop=loop))
@pytest.fixture(scope='session')
def server(start_server):
"""Starts redis-server instance."""
return start_server('A')
@pytest.fixture(scope='session')
def serverB(start_server):
"""Starts redis-server instance."""
return start_server('B')
@pytest.fixture(scope='session')
def sentinel(start_sentinel, request, start_server):
"""Starts redis-sentinel instance with one master -- masterA."""
# Adding master+slave for normal (no failover) tests:
master_no_fail = start_server('master-no-fail')
start_server('slave-no-fail', slaveof=master_no_fail)
# Adding master+slave for failover test;
masterA = start_server('masterA')
start_server('slaveA', slaveof=masterA)
return start_sentinel('main', masterA, master_no_fail)
@pytest.fixture(params=['path', 'query'])
def server_tcp_url(server, request):
def make(**kwargs):
netloc = '{0.host}:{0.port}'.format(server.tcp_address)
path = ''
if request.param == 'path':
if 'password' in kwargs:
netloc = ':{0}@{1.host}:{1.port}'.format(
kwargs.pop('password'), server.tcp_address)
if 'db' in kwargs:
path = '/{}'.format(kwargs.pop('db'))
query = urlencode(kwargs)
return urlunparse(('redis', netloc, path, '', query, ''))
return make
@pytest.fixture
def server_unix_url(server):
def make(**kwargs):
query = urlencode(kwargs)
return urlunparse(('unix', '', server.unixsocket, '', query, ''))
return make
# Internal stuff #
def pytest_addoption(parser):
parser.addoption('--redis-server', default=[],
action="append",
help="Path to re | dis-server executable,"
" defaults to `%(default)s`")
parser.addoption('--ssl-cafile', default='tests/ssl/cafile.crt',
help="Path to testing SSL CA file")
parser.addoption('--ssl-dhparam', default='tests/ssl/dhparam.pem',
help="Path to testing SSL DH params file")
parser.addoption('--ssl-cert', default='tests/ssl/cert.p | em',
help="Path to testing SSL CERT file")
parser.addoption('--uvloop', default=False,
action='store_true',
help="Run tests with uvloop")
def _read_server_version(redis_bin):
args = [redis_bin, '--version']
with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
version = proc.stdout.readline().decode('utf-8')
for part in version.split():
if part.startswith('v='):
break
else:
raise RuntimeError(
"No version info can be found in {}".format(version))
return tuple(map(int, part[2:].split('.')))
@contextlib.contextmanager
def config_writer(path):
with open(path, 'wt') as f:
def write(*args):
print(*args, file=f)
yield write
REDIS_SERVERS = []
VERSIONS = {}
def format_version(srv):
return 'redis_v{}'.format('.'.join(map(str, VERSIONS[srv])))
@pytest.fixture(scope='session')
def start_server(_proc, request, unused_port, server_bin):
"""Starts Redis server instance.
Caches instances by name.
``name`` param -- instance alias
``config_lines`` -- optional list of config directives to put in config
(if no config_lines passed -- no config will be generated,
for backward compatibility).
"""
version = _read_server_version(server_bin)
verbose = request.config.getoption('-v') > 3
servers = {}
def timeout(t):
end = time.time() + t
while time.time() <= end:
yield True
raise RuntimeError("Redis startup timeout expired")
def maker(name, config_lines=None, *, slaveof=None, password=None):
assert slaveof is None or isinstance(slaveof, RedisServer), slaveof
if name in servers:
return servers[name]
port = unused_port()
tcp_address = TCPAddress('localhost', port)
if sys.platform == 'win32':
unixsocket = None
else:
unixsocket = '/tmp/aioredis.{}.sock'.format(port)
dumpfile = 'dump-{}.rdb'.format(port)
data_dir = tempfile.gettempdir()
dumpfile_path = os.path.join(data_dir, dumpfile)
stdout_file = os.path.join(data_dir, 'aioredis.{}.stdout'.format(port))
tmp_files = [dumpfile_path, stdout_file]
if config_lines:
config = os.path.join(data_dir, 'aioredis.{}.conf'.format(port))
with config_writer(config) as write:
write('daemonize no')
write('save ""')
write('dir ', data_dir)
write('dbfilename', dumpfile)
write('port', port)
if unixsocket:
write('unixsocket', unixsocket)
tmp_files.append(unixsocket)
if password:
write('requirepass "{}"'.format(password))
write('# extra config')
for line in config_lines:
write(line)
if slaveof is not None: |
# @author Jeff Lockhart <jwlock@umich.edu>
# Script for drawing the tripartite network underlying analysis.
# version 1.0
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import sys
#add the parent directory to the current session's path
sys.path.insert(0, '../')
from network_utils import *
#read our cleaned up data
df = pd.read_csv('../data/sgm_stud/merged.tsv', sep='\t')
#The list of codes we're interested in.
code_cols = ['culture_problem',
#'culture_absent',
'culture_solution',
'culture_helpless',
'culture_victim',
'cishet_problem',
'cishet_victim',
'cishet_solution',
#'cishet_absent',
'cishet_helpless',
'sgm_victim',
'sgm_problem',
'sgm_helpless',
#'sgm_absent',
'sgm_solution',
'school_problem',
'school_solution',
#'school_absent',
'school_victim',
'school_helpless',
'community_problem',
'community_solution',
'community_helpless',
#'community_absent',
'community_victim']
#generate unique ID keys for each student and excerpt
def s_id(row):
return row['uni'] + str(row['Participant'])
def e_id(row):
return row['s_id'] + '-' + str(row['Start'])
df['s_id'] = df.apply(s_id, axis=1)
df['e_id'] = df.apply(e_id, axis=1)
#make a graph
g = nx.Graph()
#add all of our codes as nodes
for c in code_cols:
g.add_node(c, t='code')
#add each excerpt of text as a node. Connect it with relevant
#students and codes.
st = []
ex = []
last = ''
for row in df.iterrows():
#add the student node
g.add_node(row[1]['s_id'], t='student')
#if we haven't seen this student before, save the order we saw them in
if last != row[1]['s_id']:
last = row[1]['s_id']
st.append(last)
#add this excerpt node. Save its order to our list.
g.add_node(row[1]['e_id'], t='excerpt')
ex.append(row[1]['e_id'])
#add the edge joining this student and excerpt.
g.add_edge(row[1]['s_id'], row[1]['e_id'])
#for each code this excerpt has, draw an edge to it
for c in code_cols:
if row[1][c]:
g.add_edge(row[1]['e_id'], c)
#get a dictionary of our code nodes' labels
l = {}
for c in code_cols:
l[c] = c
#fix the positions of each node type in columns
pos = dict()
#space out the student and code no | des to align with excerpt column height
pos.update( (n, (1, i*5.57)) for i, n in enumerate(st) )
pos.update( (n, (2, i)) for i, n in enumerate(ex) )
| pos.update( (n, (3, i*90)) for i, n in enumerate(code_cols) )
#make our figure big so we can see
plt.figure(figsize=(20,20))
#draw our nodes
nx.draw_networkx_nodes(g, pos, nodelist=st, node_color='r',
node_shape='^')
nx.draw_networkx_nodes(g, pos, nodelist=ex, node_color='b',
node_shape='o', alpha=0.5)
#draw our edges with low alpha so we can see
nx.draw_networkx_edges(g, pos, alpha=0.2)
#axes look silly
plt.axis('off')
#save the edges and nodes as one image
plt.savefig('../data/tripartite_unlabeled.png')
#save the labels for the codes as a different image
#this lets me edit them in with GIMP so that they're better positioned.
plt.figure(figsize=(20,20))
nx.draw_networkx_labels(g, pos, labels=l, font_size=20)
nx.draw_networkx_edges(g, pos, alpha=0)
plt.axis('off')
plt.savefig('../data/tripartite_labeles.png')
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Mathieu Jourdan
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A | PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from weboob.deprecated.browser import Page
from weboob.capabilities.bill import Subscription
class LoginPage(Page):
def login(self, login, password):
self.browser.select_form('symConnexionForm')
self.browser["portlet_login_ple | in_page_3{pageFlow.mForm.login}"] = unicode(login)
self.browser["portlet_login_plein_page_3{pageFlow.mForm.password}"] = unicode(password)
self.browser.submit()
class HomePage(Page):
def on_loaded(self):
pass
class AccountPage(Page):
def get_subscription_list(self):
table = self.document.xpath('//table[@id="ensemble_contrat_N0"]')[0]
if len(table) > 0:
# some clients may have subscriptions to gas and electricity,
# but they receive a single bill
# to avoid "boobill details" and "boobill bills" returning the same
# table twice, we could return only one subscription for both.
# We do not, and "boobill details" will take care of parsing only the
# relevant section in the bill files.
for line in table[0].xpath('//tbody/tr'):
cells = line.xpath('td')
snumber = cells[2].attrib['id'].replace('Contrat_', '')
slabel = cells[0].xpath('a')[0].text.replace('offre', '').strip()
d = unicode(cells[3].xpath('strong')[0].text.strip())
sdate = date(*reversed([int(x) for x in d.split("/")]))
sub = Subscription(snumber)
sub._id = snumber
sub.label = slabel
sub.subscriber = unicode(cells[1])
sub.renewdate = sdate
yield sub
class TimeoutPage(Page):
def on_loaded(self):
pass
|
s.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
import pytest
def test_torch_Accuracy():
from bigdl.orca.learn.pytorch.pytorch_metrics import Accuracy
pred = torch.tensor([0, 2, 3, 4])
target = torch.tensor([1, 2, 3, 4])
acc = Accuracy()
acc(pred, target)
assert acc.compute() == 0.75
pred = torch.tensor([0, 2, 3, 4])
target = torch.tensor([1, 1, 2, 4])
acc(pred, target)
assert acc.compute() == 0.5
def test_torch_BinaryAccuracy():
from bigdl.orca.learn.pytorch.pytorch_metrics import BinaryAccuracy
target = torch.tensor([1, 1, 0, 0])
pred = torch.tensor([0.98, 1, 0, 0.6])
bac = BinaryAccuracy()
bac(pred, target)
assert bac.compute() == 0.75
target = torch.tensor([1, 1, 0, 0])
pred = torch.tensor([0.98, 1, 0, 0.6])
bac(pred, target, threshold=0.7)
assert bac.compute() == 0.875
def test_torch_CategoricalAccuracy():
from bigdl.orca.learn.pytorch.pytorch_metrics import CategoricalAccuracy
pred = torch.tensor([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
target = torch.tensor([[0, 0, 1], [0, 1, 0]])
cacc = CategoricalAccuracy()
cacc(pred, target)
assert cacc.compute() == 0.5
pred = torch.tensor([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
target = torch.tensor([[0, 1, 0], [0, 1, 0]])
cacc(pred, target)
assert cacc.compute() == 0.75
def test_torch_SparseCategoricalAccuracy():
from bigdl.orca.learn.pytorch.pytorch_metrics import SparseCategoricalAccuracy
pred = torch.tensor([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
target = torch.tensor([[2], [1]])
scacc = SparseCategoricalAccuracy()
scacc(pred, target)
assert scacc.compute() == 0.5
pred = torch.tensor([[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
target = torch.tensor([2, 0])
scacc(pred, target)
assert scacc.compute() == 0.25
def test_torch_Top5Accuracy():
from bigdl.orca.learn.pytorch.pytorch_metrics import Top5Accuracy
pred = torch.tensor([[0.1, 0.9, 0.8, 0.4, 0.5, 0.2],
[0.05, 0.95, 0, 0.4, 0.5, 0.2]])
target = torch.tensor([2, 2])
top5acc = Top5Accuracy()
top5acc(pred, target)
assert top5acc.compute() == 0.5
pred = torch.tensor([[0.1, 0.9, 0.8, 0.4, 0.5, 0.2],
[0.05, 0.95, 0, 0.4, 0.5, 0.2]])
target = torch.tensor([[2], [1]])
top5acc(pred, target)
assert top5acc.compute() == 0.75
def test_torch_MAE():
from bigdl.orca.learn.pytorch.pytorch_metrics import MAE
pred = torch.tensor([[1, -2], [1, 1]])
target = torch.tensor([[0, 1], [0, 1]])
m = MAE()
m(pred, target)
assert m.compute() == 1.25
pred = torch.tensor([[1, 1], [1, 1]])
target = torch.tensor([[0, 1], [0, 1]])
m(pred, target)
assert m.compute() == 0.875
pred = torch.tensor([[1.5, 2.5], [1.0, 1.0]])
target = torch.tensor([[0.2, 1.1], [0.5, 1.0]])
m(pred, target)
assert abs(m.compute() - 0.85) < 1e-7 # add fault tolerance for floating point precision
pred = torch.tensor([[1.5, 2.5, 1.5, 2.5], [1.8, 2.0, 0.5, 4.5]])
target = torch.tensor([[0, 1, 0, 0], [0, 1, 2, 2]])
m(pred, target)
assert abs(m.compute() - 1.2) < 1e-7
def test_torch_MSE():
from bigdl.orca.learn.pytorch.pytorch_metrics import MSE
pred = torch.tensor([[1, -2], [1, 1]])
target = torch.tensor([[1, 1], [1, 1]])
m = MSE()
m(pred, target)
assert m.compute() == 2.25
pred = torch.tensor([[1, 1], [1, 1]])
target = torch.tensor([[1, 1], [0, 1]])
m(pred, target)
assert m.compute() == 1.25
pred = torch.tensor([[1.3, 1.0], [0.2, 1.0]])
target = torch.tensor([[1.1, 1.0], [0.0, 1.0]])
m(pred, target)
assert abs(m.compute() - 0.84) < 1e-7
pred = torch.tensor([[1.2, 1.2, 1.2, 1.8], [0.2, 0.8, 0.9, 1.1]])
target = torch.tensor([[1, 1, 1, 2], [0, 1, 1, 1]])
m(pred, target)
assert abs(m.compute() - 0.517) < 1e-7
def test_torch_BinaryCrossEntropy():
from bigdl.orca.learn.pytorch.pytorch_metrics import BinaryCrossEntropy
pred = torch.tensor([[0.6, 0.4], [0.4, 0.6]])
target = torch.tensor([[0, 1], [0, 0]])
entropy = BinaryCrossEntropy()
entropy(pred, target)
assert abs(entropy.compute() - 0.81492424) < 1e-6
pred = torch.tensor([0.6, 0.4, 0.4, 0.6])
target = torch.tensor([0, 1, 0, 0])
entropy(pred, target)
assert abs(entropy.compute() - 0.81492424) < 1e-6
def test_torch_CategoricalCrossEntropy():
from bigdl.orca.learn.pytorch.pytorch_metrics import CategoricalCrossEntropy
pred = torch.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
target = torch.tensor([[0, 1, 0], [0, 0, 1]])
entropy = CategoricalCrossEntropy()
entropy(pred, target)
assert abs(entropy.compute() - 1.1769392) < 1e-6
def test_torch_SparseCategoricalCrossEntropy():
from bigdl.orca.learn.pytorch.pytorch_metrics import SparseCategoricalCrossEntropy
pred = torch.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
target = torch.tensor([1, 2])
entropy = SparseCategoricalCrossEntropy()
entropy(pred, target)
assert abs(entropy.compute() - 1.1769392) < 1e-6
def test_torch_KLDivergence():
from bigdl.orca.learn.pytorch.pytorch_metrics import KLDivergence
pred = torch.tensor([[0.6, 0.4], [0.4, 0.6]])
target = torch.tensor([[0, 1], [0, 0]])
div = KLDivergence()
div(pred, target)
assert abs(div.compute() - 0.45814) < 1e-5
def test_torch_Poisson():
from bigdl.orca.learn.pytorch.pytorch_metrics import Poisson
pred = torch.tensor([[1, 1], [0, 0]])
target = torch.tensor([[0, 1], [0, 0]])
poisson = Poisson()
poisson(pred, target)
assert abs(poisson.compute() - 0.49999997) < 1e-6
def test_torch_AUC():
from bigdl.orca.learn.pytorch.pytorch_metrics import AUROC
pred = torch.tensor([0.3, 0.4, 0.2, 0.5, 0.6, 0.7, 0.8])
target = torch.tensor([0, 1, 0, 1, 1, 1, 1.0])
auc = AUROC()
auc(pred, target)
print(auc.compute())
assert (auc.compute() - 1.0) < 1e-6
def test_torch_ROC():
from bigdl.orca.learn.pytorch.pytorch_metrics import ROC
pred = torch.tensor([0.3, 0.6, 0.7, 0.8])
target = torch.tensor([0, 1, 1, 1.0])
auc = ROC()
auc(pred, target)
x, y, z = auc.compute()
assert (x[4] == 1.)
assert (y[4] == 1.)
assert (z[4] - 0.3 < 10e-6)
def test_torch_F1Score():
from bigdl.orca.learn.pytorch.pytorch_metrics import F1Score
target = torch.tensor([0, 1, 2, 0, 1, 2])
preds = torch.tensor([0, 2, 1, 0, 0, 1])
f1 = F1Score()
f1(preds, target)
score = f1.compute()
assert (score - 0.3332 < 1e-3)
def test_torch_Precision():
from bigdl.orca.learn.pytorch.pytorch_metrics import Precision
target = torch.tensor([0, 1, 1, 0, 1, 1])
preds = torch.tensor([0, 0.2, 1.0, 0.8, 0.6, 0.5])
precision = Precision()
precision(preds, target)
assert (precision.compute() - 0.75 < 10e- | 6)
def test_torch_Recall():
from bigdl.orca.learn.pytorch.pytorch_metrics import Recall
target = torch.tensor([0, 1, 1, 0, 1, 1])
preds = torch.tensor([0, 0.2, 1.0, 0.8, 0.6, 0.5])
recall = Recall()
recall(preds, target)
assert (recall.compute() - 0.75 < 10e-6)
def | test_torch_PrecisionRecallCurve():
from bigdl.orca.learn.pytorch.pytorch_metrics import PrecisionRecallCurve
target = torch.tensor([0, 1, 1, 0, 1, 1])
preds = torch.tensor([0, 0.2, 1.0, 0.8, 0.6, 0.5])
curve = PrecisionRecallCurve()
curve(preds, target)
print(curve.compute())
precision, recall, thresholds = curve.compute()
assert (precision[0] - 0.8 < 10e-6)
assert (recall[0] - 1.0 < 10e-6)
assert (thresholds[0] - 0.2 < 10e-6)
if __name__ == "__ma |
# -*- coding: utf-8 -*-
### denote lines that need to be changed for different categories
import sys
reload(sys)
sys.setdefaultencoding("utf-8") # to handle UnicodeDecode errors
from math import ceil # top 20% of rankings
from traceback import format_exc # to handle errors
import pickle # to store article rankings
import json # for parsing the json response
from urllib2 import urlopen # to load urls
from os import path, listdir
from operator import itemgetter # to rank articles in the order of decreasing pageviews in a list
# from collections import OrderedDict # to store articles in the order of decreasing pageviews in a dict
from pageviews import format_date, article_views # to get pageviews
# cmlimit to specify number of articles to extract, max can be 500 (5000 for bots)
# cmtitle for name of Category to look in
# cmstartsortkeyprefix for starting the article listing from a particular alphabet or set of alphabets,
# 'b' for PA outdated
category_api_url = 'https://en.wikipedia.org/w/api.php?action=query&list=categorymembers&cmlimit=500&format=json&cmstartsortkeyprefix=m' ###
recdir = 'TL_records' + path.sep ###
def nextrecord():
try:
records = listdir(recdir)
record = 1+int(max(records)[:9])
### todo: check for improperly named files
return format(record, '09')
except:
return format(1, '09')
if __name__ == '__main__':
#category_list = ['Category:All_Wikipedia_articles_in_need_of_updating',
# 'Category:All_NPOV_disputes']
try:
category_url = '&cmtitle='.join([category_api_url, 'Category:All_NPOV_disputes']) ###
json_obj = urlopen(category_url).read()
except:
print "Error while obtaining articles from Category API"
print format_exc()
readable_json = json.loads(json_obj)
cnt = 0
d = [] # list of lists of rankings to be stored in a pickle file
for ele in readable_json['query']['categorymembers']:
title = ele['title']
link = '/'.join(['https://en.wikipedia.org/wi | ki', title.replace(' ', '_')])
categ = 'Category:All_NPOV_disputes' ###
pageviews = article_views(title)
print cnt+1, title, pageviews
d.append([title, link, pageviews, categ])
cnt = cnt+1
# od = OrderedDict(sorted(d.items(), key=lambda t:t[1][1], reverse=True)) # ordered dict in descending order of final score
od = s | orted(d, key=itemgetter(2), reverse=True) # ordered list in descending order of pageviews
print '\n\nArticle rankings based on pageviews:\n'
for item in od:
print item
#with open('npov_b_ranking.pkl', 'wb') as f:
with open('TL_pickles/npov_m_ranking.pkl', 'wb') as f: ###
pickle.dump(od, f)
# if __name__ == '__main__':
# with open('PA_pickles/npov_m_ranking.pkl', 'rb') as f: ### use when od has already been created; comment above stuff
# od = load(f)
cnt = 0
counter = int(ceil(0.2*len(od))) # top 20% of rankings
#url = 'http://127.0.0.1:5000/ask' # url for POSTing to ask. Replace with Labs/PythonAnywhere instance if needed
for i in od:
# POSTing to ask
# data = {'question':'The article '+i[1]+' is in https://en.wikipedia.org/wiki/'+i[3]+'.\nHow would you resolve it?\n'+i[3],
# 'iframeurl':i[1]}
# r = requests.post(url, data=data)
fn = recdir + nextrecord() + 'q'
print fn
if path.exists(fn):
print('A billion questions reached! Start answering!')
exit()
f = open(fn, 'w')
# use 'How would you resolve it?' for NPOV and 'How would you update it?' for outdated
f.write('The article <a target="_blank" href="' + i[1] + '">' + i[0] +
'</a> is in <a target="_blank" href = "https://en.wikipedia.org/wiki/'+ i[3] + '">' + i[3] +
'</a>. How would you resolve it?<br/><a style="float:right;" href="' +
i[1] + '">'+i[1]+'</a><iframe src="' + i[1] +
'" style="height: 40%; width: 100%;">[Can not display <a target="_blank" href="' + i[1] + '">'
+ i[1] + '</a> inline as an iframe here.]</iframe>') ###
f.close()
cnt += 1
if (cnt == counter):
exit() |
to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Knowledge database models."""
import os
from invenio_base.globals import cfg
from invenio.ext.sqlalchemy import db
from invenio.ext.sqlalchemy.utils import session_manager
from invenio_collections.models import Collection
from invenio.utils.text import slugify
from sqlalchemy.dialects import mysql
from sqlalchemy.event import listens_for
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.schema import Index
class KnwKB(db.Model):
"""Represent a KnwKB record."""
KNWKB_TYPES = {
'written_as': 'w',
'dynamic': 'd',
'taxonomy': 't',
}
__tablename__ = 'knwKB'
id = db.Column(db.MediumInteger(8, unsigned=True), nullable=False,
primary_key=True, autoincrement=True)
_name = db.Column(db.String(255), server_default='',
unique=True, name="name")
_description = db.Column(db.Text, nullable=False,
name="description", default="")
_kbtype = db.Column(db.Char(1), nullable=True, default='w', name="kbtype")
slug = db.Column(db.String(255), unique=True, nullable=False, default="")
# Enable or disable the access from REST API
is_api_accessible = db.Column(db.Boolean, default=True, nullable=False)
@db.hybrid_property
def name(self):
"""Get name."""
return self._name
@name.setter
def name(self, value):
"""Set name and generate the slug."""
self._name = value
# generate slug
if not self.slug:
self.slug = KnwKB.generate_slug(value)
@db.hybrid_property
def description(self):
"""Get description."""
return self._description
@description.setter
def description(self, value):
"""Set description."""
# TEXT in mysql don't support default value
# @see http://bugs.mysql.com/bug.php?id=21532
self._description = value or ''
@db.hybrid_property
def kbtype(self):
"""Get kbtype."""
return self._kbtype
@kbtype.setter
def kbtype(self, value):
"""Set kbtype."""
if value is None:
# set the default value
return
# or set one of the available values
kbtype = value[0] if len(value) > 0 else 'w'
if kbtype not in ['t', 'd', 'w']:
raise ValueError('unknown type "{value}", please use one of \
following values: "taxonomy", "dynamic" or \
"written_as"'.format(value=value))
self._kbtype = kbtype
def is_dynamic(self):
"""Return true if the type is dynamic."""
return self._kbtype == 'd'
def to_dict(self):
"""Return a dict representation of KnwKB."""
mydict = {'id': self.id, 'name': self.name,
'description': self.description,
'kbtype': self.kbtype}
if self.kbtype == 'd':
mydict.update((self.kbdefs.to_dict() if self.kbdefs else {}) or {})
return mydict
def get_kbr_items(self, searchkey="", searchvalue="", searchtype='s'):
"""
Return dicts of 'key' and 'value' from a knowledge base.
:param kb_name the name of the knowledge base
:param searchkey search using this key
:param searchvalue search using this value
:param searchtype s=substring, e=exact, sw=startswith
:return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
import warnings
warnings.warn("The function is deprecated. Please use the "
"`KnwKBRVAL.query_kb_mappings()` instead. "
"E.g. [kval.to_dict() for kval in "
"KnwKBRVAL.query_kb_mappings(kb_id).all()]")
if searchtype == 's' and searchkey:
searchkey = '%' + searchkey + '%'
if searchtype == 's' and searchvalue:
searchvalue = '%' + searchvalue + '%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue + '%'
if not searchvalue:
searchvalue = '%'
if not searchkey:
searchkey = '%'
kvals = KnwKBRVAL.query.filter(
KnwKBRVAL.id_knwKB.like(self.id),
KnwKBRVAL.m_value.like(searchvalue),
KnwKBRVAL.m_key.like(searchkey)).all()
return [kval.to_dict() for kval in kvals]
def get_kbr_values(self, searchkey="", searchvalue="", searchtype='s'):
"""
Return dicts of 'key' and 'value' from a knowledge base.
:param kb_name the name of the knowledge base
:param searchkey search using this key
:param searchvalue search using this value
:param searchtype s=substring, e=exact, sw=startswith
:return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
import warnings
warnings.warn("The function is deprecated. Please use the "
"`KnwKBRVAL.query_kb_mappings()` instead. "
"E.g. [(kval.m_value,) for kval in "
"KnwKBRVAL.query_kb_mappings(kb_id).all()]")
# prepare filters
if searchtype == 's':
searchkey = '%' + searchkey + '%'
if searchtype == 's' and searchvalue:
searchvalue = '%' + searchvalue + '%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue + '%'
if not searchvalue:
searchvalue = '%'
# execute query
return db.session.execute(
db.select([KnwKBRVAL.m_value],
db.and_(KnwKBRVAL.id_knwKB.like(self.id),
KnwKBRVAL.m_value.like(searchvalue),
KnwKBRVAL.m_key.like(searchkey))))
@session_manager
def set_dyn_config(self, field, expression, collection=None):
"""Set dynamic configuration."""
if self.kbdefs:
# update
self.kbdefs.output_tag = field
self.kbdefs.search_expression = expre | ssion
self.kbdefs.collection = collection
db.session.merge(self.kbdefs)
else:
# insert
self.kbdefs = KnwKBDDEF(output_tag=field,
| search_expression=expression,
collection=collection)
@staticmethod
def generate_slug(name):
"""Generate a slug for the knowledge.
:param name: text to slugify
:return: slugified text
"""
slug = slugify(name)
i = KnwKB.query.filter(db.or_(
KnwKB.slug.like(slug),
KnwKB.slug.like(slug + '-%'),
)).count()
return slug + ('-{0}'.format(i) if i > 0 else '')
@staticmethod
def exists(kb_name):
"""Return True if a kb with the given name exists.
:param kb_name: the name of the knowledge base
:return: True if kb exists
"""
return KnwKB.query_exists(KnwKB.name.like(kb_name))
@staticmethod
def query_exists(filters):
"""Return True if a kb with the given filters exists.
E.g: KnwKB.query_exists(KnwKB.name.like('FAQ'))
:param filters: filter for sqlalchemy
:return: True if kb exists
"""
return db.session.query(
KnwKB.query.filter(
filters).exists()).scalar()
def get_filename(self):
"""Construct the file name for taxonomy knoledge."""
return cfg['CFG_WEBDIR'] + "/kbfiles/" \
+ str(self.id) + ".rdf"
@listens_for(KnwKB, 'after_delete')
def del_kwnkb(mapper, connection, target):
"""Remove taxonomy file."""
if(target.kbtype == KnwKB.KNWKB_TYPES['taxonomy']):
# Delete taxonomy file
if os.path.isfile(target.get_filename()):
os.remove(target.get_filename())
class KnwKBDDEF(db.Model):
"""Represent a KnwKBDDEF record."""
__tablename__ = 'knwKBDDEF'
id_knwKB = db.Column(db.MediumInteger(8, unsigned=True),
db |
#
# Copyright (c) 1996-2000 Tyler C. Sarna <tsarna@sarna.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Tyler C. Sarna.
# 4. Neither the name of the author nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__all__ = tuple('''registerWidget getCodes getCodeNames createBarcodeDrawing createBarcodeImageInMemory'''.split())
__version__ = '0.9'
__doc__='''Popular barcodes available as reusable widgets'''
_widgets = []
def registerWidget(widget):
_widgets.append(widget)
def _reset():
_widgets[:] = []
from reportlab.graphics.barcode.widgets import BarcodeI2of5, BarcodeCode128, BarcodeStandard93,\
BarcodeExtended93, BarcodeStandard39, BarcodeExtended39,\
BarcodeMSI, BarcodeCodabar, BarcodeCode11, BarcodeFIM,\
BarcodePOSTNET, BarcodeUSPS_4State, BarcodeCode128Auto, BarcodeECC200DataMatrix
#newer codes will typically get their own module
from reportlab.graphics.barcode.eanbc import Ean13BarcodeWidget, Ean8BarcodeWidget, UPCA, Ean5BarcodeWidget, ISBNBarcodeWidget
from reportlab.graphics.barcode.qr import QrCodeWidget
for widget in (BarcodeI2of5,
BarcodeCode128,
BarcodeCode128Auto,
BarcodeStandard93,
BarcodeExtended93,
BarcodeStandard39,
BarcodeExtended39,
BarcodeMSI,
BarcodeCodabar,
BarcodeCode11,
BarcodeFIM,
BarcodePOSTNET,
BarcodeUSPS_4State,
Ean13BarcodeWidget,
Ean8BarcodeWidget,
UPCA,
Ean5BarcodeWidget,
ISBNBarcodeWidget,
| QrCodeWidget,
BarcodeECC200DataMatrix,
):
registerWidget(widget)
_reset()
from reportlab.rl_config import register_reset
register_reset(_reset)
def getCodes():
"""Returns a dict mapping code names to widgets"""
#the module exports a dictionary of names to widgets, to make it easy for
#apps and doc tools to display information about them.
codes = {}
|
for widget in _widgets:
codeName = widget.codeName
codes[codeName] = widget
return codes
def getCodeNames():
"""Returns sorted list of supported bar code names"""
return sorted(getCodes().keys())
def createBarcodeDrawing(codeName, **options):
"""This creates and returns a drawing with a barcode.
"""
from reportlab.graphics.shapes import Drawing, Group
codes = getCodes()
bcc = codes[codeName]
width = options.pop('width',None)
height = options.pop('height',None)
isoScale = options.pop('isoScale',0)
kw = {}
for k,v in options.items():
if k.startswith('_') or k in bcc._attrMap: kw[k] = v
bc = bcc(**kw)
#Robin's new ones validate when setting the value property.
#Ty Sarna's old ones do not. We need to test.
if hasattr(bc, 'validate'):
bc.validate() #raise exception if bad value
if not bc.valid:
raise ValueError("Illegal barcode with value '%s' in code '%s'" % (options.get('value',None), codeName))
#size it after setting the data
x1, y1, x2, y2 = bc.getBounds()
w = float(x2 - x1)
h = float(y2 - y1)
sx = width not in ('auto',None)
sy = height not in ('auto',None)
if sx or sy:
sx = sx and width/w or 1.0
sy = sy and height/h or 1.0
if isoScale:
if sx<1.0 and sy<1.0:
sx = sy = max(sx,sy)
else:
sx = sy = min(sx,sy)
w *= sx
h *= sy
else:
sx = sy = 1
#bc.x = -sx*x1
#bc.y = -sy*y1
d = Drawing(width=w,height=h,transform=[sx,0,0,sy,-sx*x1,-sy*y1])
d.add(bc, "_bc")
return d
def createBarcodeImageInMemory(codeName,**options):
"""This creates and returns barcode as an image in memory.
Takes same arguments as createBarcodeDrawing and also an
optional format keyword which can be anything acceptable
to Drawing.asString eg gif, pdf, tiff, py ......
"""
format = options.pop('format','png')
d = createBarcodeDrawing(codeName, **options)
return d.asString(format)
|
import os, sys, json, urlparse, urllib
def get_template(template_basename):
script_directory = os.path.dirname(os.path.abspath(__file__))
template_directory = os.path.abspath(os.path.join(script_directory,
"..",
"template"))
template_filename = os.path.join(template_directory, template_basename);
with open(template_filename, "r") as f:
return f.read()
# TODO(kristijanburnik): subdomain_prefix is a hardcoded value aligned with
# referrer-policy-test-case.js. The prefix should be configured in one place.
def get_swapped_origin_netloc(netloc, subdomain_prefix = "www1."):
if netloc.startswith(subdomain_prefix):
return netloc[len(subdomain_prefix):]
else:
return subdomain_prefix + netloc
def create_redirect_url(request, cross_origin = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
if cross_origin:
destinati | on_netloc = get_swapped_origin_netloc(parsed.netloc)
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = parsed.scheme,
netloc = destination_netloc,
path = parsed.path,
query = None,
fragment = None))
return destination_url
def redirect(url, response):
| response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def preprocess_redirection(request, response):
if "redirection" not in request.GET:
return False
redirection = request.GET["redirection"]
if redirection == "no-redirect":
return False
elif redirection == "keep-origin-redirect":
redirect_url = create_redirect_url(request, cross_origin = False)
elif redirection == "swap-origin-redirect":
redirect_url = create_redirect_url(request, cross_origin = True)
else:
raise ValueError("Invalid redirection type '%s'" % redirection)
redirect(redirect_url, response)
return True
def __noop(request, response):
return ""
def respond(request,
response,
status_code = 200,
content_type = "text/html",
payload_generator = __noop,
cache_control = "no-cache; must-revalidate",
access_control_allow_origin = "*"):
if preprocess_redirection(request, response):
return
response.add_required_headers = False
response.writer.write_status(status_code)
if access_control_allow_origin != None:
response.writer.write_header("access-control-allow-origin",
access_control_allow_origin)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", cache_control)
response.writer.end_headers()
server_data = {"headers": json.dumps(request.headers, indent = 4)}
payload = payload_generator(server_data)
response.writer.write(payload)
|
line += "%6i angles\n"%(self.count_angles())
if(self.count_dihedrals()):
line += "%6i dihedrals\n"%(self.count_dihedrals())
if(self.count_impropers()):
line += "%6i impropers\n"%(self.count_impropers())
#line += "%12.5f mass"%()
#line += "%12.5f %12.5f %12.5f com"%()
line += "\nCoords\n\n"
for node, data in self.nodes_iter2(data=True):
line += "%6i %12.5f %12.5f %12.5f\n"%(tuple ([node]+data['cartesian_coordinates'].tolist()))
line += "\nTypes\n\n"
for node, data in self.nodes_iter2(data=True):
line += "%6i %6i # %s\n"%(node, data['ff_type_index'], data['force_field_type'])
line += "\nCharges\n\n"
for node, data in self.nodes_iter2(data=True):
line += "%6i %12.5f\n"%(node, data['charge'])
#TODO(pboyd): add bonding, angles, dihedrals, impropers, etc.
if self.number_of_edges():
line += "\nBonds\n\n"
count = 0
for n1, n2, data in self.edges_iter2(data=True):
count += 1
line += "%6i %6i %6i %6i # %s %s\n"%(count, data['ff_type_index'], n1, n2,
self.node[n1]['force_field_type'],
self.node[n2]['force_field_type'])
if self.count_angles():
line += "\nAngles\n\n"
count = 0
for b, data in self.nodes_iter2(data=True):
try:
ang_data = data['angles']
for (a, c), val in ang_data.items():
count += 1
line += "%6i %6i %6i %6i %6i # %s %s(c) %s\n"%(count,
val['ff_type_index'], a, b, c,
self.node[a]['force_field_type'],
self.node[b]['force_field_type'],
self.node[c]['force_field_type'])
except KeyError:
pass
if self.count_dihedrals():
line += "\nDihedrals\n\n"
count = 0
for b, c, data in self.edges_iter2(data=True):
try:
dihed_data = data['dihedrals']
for (a, d), val in dihed_data.items():
count += 1
line += "%6i %6i %6i %6i %6i %6i # %s %s(c) %s(c) %s\n"%(count,
val['ff_type_index'], a, b, c, d,
self.node[a]['force_field_type'],
self.node[b]['force_field_type'],
self.node[c]['force_field_type'],
self.node[d]['force_field_type'])
except KeyError:
pass
if self.count_impropers():
line += "\nImpropers\n\n"
count = 0
for b, data in self.nodes_iter2(data=True):
try:
imp_data = data['impropers']
for (a, c, d), val in imp_data.items():
count += 1
line += "%6i %6i %6i %6i %6i %6i # %s %s (c) %s %s\n"%(count,
val['ff_type_index'], a, b, c, d,
self.node[a]['force_field_type'],
self.node[b]['force_field_type'],
self.node[c]['force_field_type'],
self.node[d]['force_field_type'])
except KeyError:
pass
return line
@property
def _type_(self):
return self.__class__.__name__
class CO2(Molecule):
"""Carbon dioxide parent class, containing functions applicable
to all CO2 models.
"""
@property
def O_coord(self):
"""Define the oxygen coordinates assuming carbon is centered at '0'.
angle gives t | he two oxygen atoms an orientation that deviates randomly
from the default (lying along the x-axis). |
"""
try:
return self._O_coord
except AttributeError:
self._O_coord = self.RCO*np.array([[-1., 0., 0.],[1., 0., 0.]])
#if angle == 0.:
# return self._O_coord
#else:
# generate a random axis for rotation.
axis = np.random.rand(3)
angle = 180.*np.random.rand()
# rotate using the angle provided.
R = self.rotation_matrix(axis, np.radians(angle))
self._O_coord = np.dot(self._O_coord, R.T)
return self._O_coord
def approximate_positions(self, C_pos=None, O_pos1=None, O_pos2=None):
"""Input a set of approximate positions for the carbon
and oxygens of CO2, and determine the lowest RMSD
that would give the idealized model.
"""
C = self.C_coord
O1 = self.O_coord[0]
O2 = self.O_coord[1]
v1 = np.array([C, O1, O2])
v2 = np.array([C_pos, O_pos1, O_pos2])
R = self.rotation_from_vectors(v1, v2)
self.C_coord = C_pos
self._O_coord = np.dot(self._O_coord, R.T) + C_pos
for n in self.nodes_iter2():
if n == 1:
self.node[n]['cartesian_coordinates'] = self.C_coord
elif n == 2:
self.node[n]['cartesian_coordinates'] = self.O_coord[0]
elif n == 3:
self.node[n]['cartesian_coordinates'] = self.O_coord[1]
class Water(Molecule):
"""Water parent class, containing functions applicable
to all water models.
"""
@property
def H_coord(self):
"""Define the hydrogen coords based on
HOH angle for the specific force field.
Default axis for distributing the
hydrogen atoms is the x-axis.
"""
try:
return self._H_coord
except AttributeError:
cos_theta = np.cos(np.deg2rad(self.HOH)/2.)
sin_theta = np.sin(np.deg2rad(self.HOH)/2.)
mat = np.array([[ cos_theta, sin_theta, 0.],
[-sin_theta, cos_theta, 0.],
[ 0., 0., 1.]])
cos_theta = np.cos(np.deg2rad(-self.HOH)/2.)
sin_theta = np.sin(np.deg2rad(-self.HOH)/2.)
mat2 = np.array([[ cos_theta, sin_theta, 0.],
[-sin_theta, cos_theta, 0.],
[ 0., 0., 1.]])
axis = np.array([1., 0., 0.])
length = np.linalg.norm(np.dot(axis, mat))
self._H_coord = self.ROH/length*np.array([np.dot(axis, mat), np.dot(axis, mat2)])
return self._H_coord
def compute_midpoint_vector(self, centre_vec, side1_vec, side2_vec):
""" Define a vector oriented away from the centre_vec which
is half-way between side1_vec and side2_vec. Ideal for
TIP4P to define the dummy atom.
"""
v = .5* (side1_vec - side2_vec) + (side2_vec - centre_vec)
v /= np.linalg.norm(v)
return v
def compute_orthogonal_vector(self, centre_vec, side1_vec, side2_vec):
""" Define a vector oriented orthogonal to two others,
centred by the 'centre_vec'.
Useful for other water models with dummy atoms, as
this can be used as a '4th' vector for the 'rotation_from_vectors'
calculation (since 3 vectors defined by O, H, and H is not enough
to orient properly).
The real dummy atoms can then be applied once the proper
rotation has been found.
"""
v1 = side1_vec - centre_vec
v2 = side2_vec - centre_vec
v = np.cross(v1, v2)
v /= np.linalg.norm(v)
return v
def approximate_positions(self, O_pos=None, H_pos1=None, H_pos2=None):
"""Input a set o |
from django.conf.urls import patterns, incl | ude, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'students.views.home', name='home'),
# url(r'^students/', include('students.foo.urls')),
# App 'Index Classic'
url(r"^index_classic/", include('students.index_classic.urls'), name="index_classic"),
# Uncomment the admin/doc line below to enable admin documentati | on:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/python
import os,sys,string
i | f __name__ == "__main__":
# Check the argument list size
if (len(sys.argv) < 3):
sys.stderr.write("USAGE: " + sys.argv[0] + " <input file> <output file>\n")
sys.exit(1)
infi | lename = sys.argv[1]
outfilename = sys.argv[2]
# U is the mode to open the file with universal newline support.
infile = open(infilename, "rU")
outfile = open(outfilename, "w")
# Now read the lines
lines = map( lambda x: string.strip(x, string.whitespace+"\\"),
infile.readlines() )
infile.close()
# Now write them
outfile.write("SET( MANTA_SWIG_DEPEND\n")
for l in lines[1:]:
outfile.write(l + "\n")
outfile.write(")\n")
outfile.close()
|
from sanic import Sanic
from sanic.views import CompositionView
from sanic.views import HTTPMethodView
from sanic.views import stream as stream_decorator
from sanic.blueprints import Blueprint
from sanic.response import stream, text
bp = Blueprint('blueprint_request_stream')
app = Sanic('request_stream')
class SimpleView(HTTPMethodView):
@stream_decorator
async def post(self, request):
result = ''
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode('utf-8')
return text(result)
@app.post('/stream', stream=True)
async def handler(request):
async def streaming(response):
while True:
body = await request.stream.get()
if body is None:
break
body = body.decode('utf-8').replace('1', 'A')
await re | sponse.write(body)
return stream(streaming)
@bp | .put('/bp_stream', stream=True)
async def bp_handler(request):
result = ''
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode('utf-8').replace('1', 'A')
return text(result)
async def post_handler(request):
result = ''
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode('utf-8')
return text(result)
app.blueprint(bp)
app.add_route(SimpleView.as_view(), '/method_view')
view = CompositionView()
view.add(['POST'], post_handler, stream=True)
app.add_route(view, '/composition_view')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
|
import json
from urllib import urlencode
from nose.tools import ok_
from rest_framework.generics import GenericAPIView
from rest_framework.request import Request
from rest_framework.settings import api_settings
from access.middleware import ACLMiddleware
from amo.tests import TestCase
from users.models import UserProfile
from mkt.collections.authorization import (CanBeHeroAuthorization,
CuratorAuthorization,
StrictCuratorAuthorization)
from mkt.collections.tests import CollectionTestMixin
from mkt.site.fixtures import fixture
from test_utils import RequestFactory
class TestCuratorAuthorization(CollectionTestMixin, TestCase):
auth_class = CuratorAuthorization
fixtures = fixture('user_2519')
def setUp(self):
super(TestCuratorAuthorization, self).setUp()
self.collection = self.make_collection()
self.auth = self.auth_class()
self.user = UserProfile.objects.get(pk=2519)
self.profile = self.user
self.view = GenericAPIView()
def give_permission(self):
self.grant_permission(self.profile, 'Collections:Curate')
def make_curator(self):
self.collection.add_curator(self.profile)
def request(self, verb):
request = getattr(RequestFactory(), verb.lower())('/')
request.user = self.user
ACLMiddleware().process_request(request)
return request
def is_authorized(self, request):
return self.auth.has_permission(request, self.view)
def is_authorized_object(self, request):
return self.auth.has_object_permission(request, self.view,
self.collection)
def test_get_list(self):
ok_(self.is_authorized(self.request('GET')))
def test_get_list_permission(self):
self.give_permission()
ok_(self.is_authorized(self.request('GET')))
def test_post_list(self):
ok_(not self.is_authorized(self.request('POST')))
def test_post_list_permission(self):
self.give_permission()
ok_(self.is_authorized(self.request('POST')))
def test_delete_list(self):
ok_(not self.is_authorized(self.request('DELETE')))
def test_delete_list_permission(self):
self.give_permission()
ok_(self.is_authorized(self.request('DELETE')))
def test_get_detail(self):
ok_(self.is_authorized_object(self.request('GET')))
def test_get_detail_permission(self):
self.give_permission()
ok_(self.is_authorized_object(self.request('GET')))
def test_get_detail_curator(self):
self.make_curator()
ok_(self.is_authorized_object(self.request('GET')))
d | ef test_get_detail_permission_curator(self):
self.give_permission()
| self.make_curator()
ok_(self.is_authorized_object(self.request('GET')))
def test_post_detail(self):
ok_(not self.is_authorized_object(self.request('POST')))
def test_post_detail_permission(self):
self.give_permission()
ok_(self.is_authorized_object(self.request('POST')))
def test_post_detail_curator(self):
self.make_curator()
ok_(self.is_authorized_object(self.request('POST')))
def test_post_detail_permission_curator(self):
self.give_permission()
self.make_curator()
ok_(self.is_authorized_object(self.request('POST')))
def test_delete_detail(self):
ok_(not self.is_authorized_object(self.request('DELETE')))
def test_delete_detail_permission(self):
self.give_permission()
ok_(self.is_authorized_object(self.request('DELETE')))
def test_delete_detail_curator(self):
self.make_curator()
ok_(not self.is_authorized_object(self.request('DELETE')))
def test_delete_detail_permission_curator(self):
self.give_permission()
self.make_curator()
ok_(self.is_authorized_object(self.request('DELETE')))
class TestStrictCuratorAuthorization(TestCuratorAuthorization):
auth_class = StrictCuratorAuthorization
def test_get_list(self):
ok_(not self.is_authorized(self.request('GET')))
def test_get_detail(self):
ok_(not self.is_authorized_object(self.request('GET')))
class TestCanBeHeroAuthorization(CollectionTestMixin, TestCase):
enforced_verbs = ['POST', 'PUT']
fixtures = fixture('user_2519')
def setUp(self):
super(TestCanBeHeroAuthorization, self).setUp()
self.collection = self.make_collection()
self.auth = CanBeHeroAuthorization()
self.user = UserProfile.objects.get(pk=2519)
self.profile = self.user
self.view = GenericAPIView()
def give_permission(self):
self.grant_permission(self.profile, 'Collections:Curate')
def is_authorized_object(self, request):
return self.auth.has_object_permission(request, self.view,
self.collection)
def request(self, verb, qs=None, content_type='application/json',
encoder=json.dumps, **data):
if not qs:
qs = ''
request = getattr(RequestFactory(), verb.lower())
request = request('/?' + qs, content_type=content_type,
data=encoder(data) if data else '')
request.user = self.user
ACLMiddleware().process_request(request)
return Request(request, parsers=[parser_cls() for parser_cls in
api_settings.DEFAULT_PARSER_CLASSES])
def test_unenforced(self):
"""
Should always pass for GET requests.
"""
ok_(self.is_authorized_object(self.request('GET')))
def test_no_qs_modification(self):
"""
Non-GET requests should not be rejected if there is a can_be_true
querystring param (which hypothetically shouldn't do anything).
We're effectively testing that request.GET doesn't bleed into
request.POST.
"""
self.give_permission()
for verb in self.enforced_verbs:
request = self.request(verb, qs='can_be_hero=1')
ok_(not self.auth.hero_field_modified(request), verb)
def test_change_permission(self):
"""
Should pass if the user is attempting to modify the can_be_hero field
and has the permission.
"""
self.give_permission()
for verb in self.enforced_verbs:
request = self.request(verb, can_be_hero=True)
ok_(self.auth.hero_field_modified(request), verb)
def test_change_permission_urlencode(self):
"""
Should pass if the user is attempting to modify the can_be_hero field
and has the permission.
"""
self.give_permission()
for verb in self.enforced_verbs:
request = self.request(verb, encoder=urlencode,
content_type='application/x-www-form-urlencoded',
can_be_hero=True)
ok_(self.auth.hero_field_modified(request), verb)
def test_no_change_no_permission(self):
"""
Should pass if the user does not have the permission and is not
attempting to modify the can_be_hero field.
"""
for verb in self.enforced_verbs:
request = self.request(verb)
ok_(self.is_authorized_object(request), verb)
def test_no_change(self):
"""
Should pass if the user does have the permission and is not attempting
to modify the can_be_hero field.
"""
self.give_permission()
for verb in self.enforced_verbs:
request = self.request(verb)
ok_(self.is_authorized_object(request), verb)
def test_post_change_no_permission(self):
"""
Should not pass if the user is attempting to modify the can_be_hero
field without the permission.
"""
for verb in self.enforced_verbs:
request = self.request(verb, can_be_hero=True)
ok_(not self.is_authorized_object(request), verb)
|
#!/usr/bin/python
# make-unicode.py -- build the Unicode version of btcrecover from the ASCII version
# Copyright (C) 2014, 2015 Christopher Gurnee
#
# This file is part of btcrecover.
#
# btcrecover is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version
# 2 of the License, or (at your option) any later version.
#
# btcrecover is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
# If you find this program helpful, please consider a small
# donation to the developer at the following Bitcoin address:
#
# 17LGpN2z62zp7RS825jXwYtE7zZ19Mxxu8
#
# Thank You!
from __future__ import print_function
import os.path as path
install_dir = path.dirname(__file__)
# This is a bit fragile, but it's probably good enough. It simply looks
# for certain strings, and comments or uncomments code between them.
def make_unicode_version(ascii_name, unicode_name):
ascii_version_path = path.join(install_dir, ascii_name)
unicode_version_path = path.join(install_dir, unicode_name)
if not path.isfile(ascii_version_path):
exit("can't find " + ascii_version_path)
if path.isfile (unicode_version_path) and \
path.getmtime(unicode_version_path) >= path.getmtime(ascii_version_path):
print("existing Unicode version "+unicode_name+" is up-to-date")
return False
print("making "+unicode_name)
with open(ascii_version_path, "rb") as ascii_version:
with open(unicode_version_path, "wb") as unicode_version:
# Search for the first "key" string
for line in ascii_version:
unicode_version.write(line)
if line.startswith("# Uncomment for Unicode support"):
break
# Uncommen | t the block of code up until the next "key" string
for line in ascii_version:
if line.startswith("# Uncomment for ASCII-only support"):
unicode_version.write(line)
break
unicode_version.write(line[1:] if line.startswith("#") else line)
# Comment out the next block of | code up until the first empty line
for line in ascii_version:
if line.strip() == "":
unicode_version.write(line)
break
unicode_version.write("#")
unicode_version.write(line)
# Copy the rest of the file
for line in ascii_version:
unicode_version.write(line)
return True
if __name__ == '__main__':
import argparse, atexit, unittest
parser = argparse.ArgumentParser()
parser.add_argument("--no-quicktests", action="store_true", help="don't run the QuickTests suite")
parser.add_argument("--no-pause", action="store_true", help="don't prompt 'Press Enter to exit'")
args = parser.parse_args()
# By default, pause before exiting
if not args.no_pause:
atexit.register(lambda: raw_input("\nPress Enter to exit ..."))
# Build the Unicode versions of btcrecover and the test-btcr test suite
modified1 = make_unicode_version("btcrecover.py", "btcrecoveru.py")
modified2 = make_unicode_version("test-btcr.py", "test-btcru.py")
if not modified1 and not modified2:
print("nothing left to do, exiting")
# If at least one of the files were updated, by default run the QuickTests suite
elif not args.no_quicktests:
print("\nRunning quick tests\n")
test_btcr = __import__("test-btcru")
if unittest.TextTestRunner(buffer=True).run(test_btcr.QuickTests()).wasSuccessful():
print("\nStart test-btcru.py to run the full test suite.")
else:
exit(1)
|
height = letterImage.get_height()
width = letterImage.get_width()
x = 0
y = 0
while y+charheight < height and x < width:
letters.append( letterImage.subsurface(pygame.Rect(x, y, charwidth, charheight) ) )
y = y + charheight
return letters
def big_Alpha():
char_width = 32
char_height = 40
alpha = load_alphabet(os.path.join('data', 'abcdefghijkl_big.tga' ), char_width, char_height)
alpha.extend( load_alphabet(os.path.join('data', 'mnopqrstuvwx_big.tga' ), char_width, char_height) )
alpha.extend( load_alphabet(os.path.join('data', 'yzplus_big.tga' ), char_width, char_height) )
alpha.extend( load_alphabet(os.path.join('data', 'numeralsBig.tga' ), char_width, char_height) )
return alpha
def small_Alpha():
char_width = 16
char_height = 20
alpha = load_alphabet(os.path.join('data', 'abcdefghijkl.tga' ), char_width, char_height)
alpha.extend( load_alphabet(os.path.join('data', 'mnopqrstuvwx.tga' ), char_width, char_height) )
alpha.extend( load_alphabet(os.path.join('data', 'yzplus.tga' ), char_width, char_height) )
alpha.extend( load_alphabet(os.path.join('data', 'numerals.tga' ), char_width, char_height) )
return alpha
def colorfy(image, color):
for x in range(0, image.get_width()):
for y in range(0, image.get_height()):
pixel = image.get_at((x,y))
image.set_at((x,y) , (color[0], color[1], color[2], pixel[3]))
return image
def shrinkImages(images, scale_x=-1, scale_y=-1):
for x in range(0, len(images)):
images[x] = pygame.transform.scale(images[x], (scale_x, scale_y))
return images
#------------------------------------------------------------------------------------------#
# Global Variables Joseph Grasser #
#------------------------------------------------------------------------------------------#
# This section is where all the global variables are defined and initilizd. #
# -----------------------------------------------------------------------------------------#
windowDimension = (620, 500)
screen = pygame.display.set_mode(windowDimension)
scene = 0
letterKey = ['a','b','c','d','e','f','g','h', 'i', 'j', 'k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z', '+', ' ', '0','1','2','3','4','5','6','7','8','9','.']
#Sprite Images
black = 0, 0, 0
image1 = pygame.image.load( os.path.join('data', 'blue_indent.png' ) )
image2 = pygame.image.load( os.path.join('data', 'blue_shiny.png' ) )
image3 = pygame.image.load( os.path.join('data', 'green_dot.png' ) )
image4 = pygame.image.load( os.path.join('data', 'yellow_cross.png' ) )
image5 = pygame.image.load( os.path.join('data', 'red_circle.png' ) )
images = [image1, image2, image3, image4, image5]
images = shrinkImages(images, 40, 40)
big_alphabet = big_Alpha()
small_alphabet = small_Alpha()
#------------------------------------------------------------------------------------------#
# GUI Utilities Joseph Grasser #
#------------------------------------------------------------------------------------------#
# Functions for drawing text, and borders on the screen. #
# -----------------------------------------------------------------------------------------#
def init():
pygame.init()
pygame.display.set_caption('Russian Squares v1.0')
pygame.mouse.set_visible(1)
def addText(screen, alpha, text, location):
x = location[0]
y = location[1]
text = text.lower()
for i in range(0, len(text)):
letter = text[i]
key = letterKey.index(letter)
screen.blit(alpha[key], (x+i*alpha[key].get_width()/2,y))
def addParagraph(screen, title, text, position):
addText(screen, big_alphabet, title, position)
text = text.split('\n')
y = 1
for line in text:
y = y + 1
addText(screen, small_alphabet, line, (position[0], position[1] + 16*y))
def drawBorder(Surface):
paneWidth = Surface.get_width()
paneHeight = Surface.get_height()
borderTop = pygame.image.load(os.path.join('data','gridLineTop.tga'))
borderBottom = pygame.image.load(os.path.join('data','gridLineBottom.tga'))
borderLeft = pygame.image.load(os.path.join('data','gridLineLeft.tga'))
borderRight = pygame.image.load(os.path.join('data','gridLineRight.tga'))
for x in range( 0, paneWidth // 40):
for y in range( 0, paneHeight // 40):
if x == 0 and y >= 0:
Surface.blit(borderLeft, (0, y*40))
if x >= 0 and y == 0:
Surface.blit(borderTop, (x*40,0))
if x+1 == paneWidth // 40 and y >= 0:
Surface.blit(borderRight, ((x)*40, y*40))
if x >= 0 and y+1 == paneHeight // 40:
Surface.blit(borderBottom, (x*40, (y)*40))
#------------------------------------------------------------------------------------------#
# GUI Components Joseph Grasser #
#------------------------------------------------------------------------------------------#
# Section contains the following gui components: Menus, Enterboxes, and Scoreboards #
# -----------------------------------------------------------------------------------------#
class GUI_Menu(pygame.Surface):
def __init__(self, commands, width, alphabet, border = 1):
pygame.Surface.__init__(self, (width+40, 20+len(commands)*alphabet[0].get_height()), SRCALPHA)
self.commands = commands
self.alphabet = alphabet
self.border = border
self.printOptions()
self.selectionSquare = self.load_selection_square()
self.angle = 0
self.index = 0
def load_selection_square(self):
image = pygame.image.load(os.path.join('data', 'Hammer_and_sickle.png'))
image = pygame.transform.scale(image, (self.alphabet[0].get_height(), self.alphabet[0].get_height()))
return image
def up(self):
if self.index > 0:
self.index -= 1
def down(self):
if self.index+1 < len(self.commands):
self.index += 1
def printOptions(self):
y = 10
for x in range(0, len(self.commands)):
addText(self, self.alphabet, self.commands[x], (40, y + self.alphabet[0].get_height()*x) )
def update(self):
self.fill(black)
self.printOptions()
if self.border == 1 :
drawBorder(self)
self.blit(self.selectionSquare, (10, 10+self.index*self.alphabet[0].get_height()))
class GUI_EnterBox(pygame.Surface):
def __init__(self, alpha):
pygame.Surface.__init__(self, (250, 60), SRCALPHA)
self.alphabet = alpha
self.rect = pygame.Rect((5, 5), (240, 50))
def update(self, data):
self.fill(black)
pygame.draw.rect(self, (0,0,255), self.rect, 3)
addText(self, self.alpha | bet, data, (0,10))
class GUI_Scoreboard(pygame.Surface):
def __init__(self, timeLimit):
pygame.Surface.__init__(self, (windowDimension[0], 55), SRCALPHA)
self.xoffset = 10
self.yoffset = 10
self.score = 0
self.started = 0
self.time = timeLimit
self.image = ""
def setStart(self, start ):
self.started = start
def setSpecialColor(self, image):
self.image = image
def update(self, time, sc | ore):
self.fill(black)
drawBorder(self)
self.score = self.score + score
self.time = self.time - int(time - self.started) + int(score*25)
if self.time < 0 :
return "TIMESUP"
addText(self, small_alphabet, str(self.time), (windowDimension[0]/2-25, self.get_height()/2))
addText(self, small_alphabet, str(self.score) , (25, self.get_height()/2) )
addText(self, small_alphabet, "Red x3", (windowDimension[0]-100, self.get_height()/2))
return "OK"
#------------------------------------------------------------------------------------------#
# Scene Section |
self._info = {
'threshold': self.threshold,
'similarity_name': self.similarity_name,
'window': self.window,
'ngram_length': self.ngram_length,
'min_match_length': self.min_match_length,
'accepted_semtypes': sorted(self.accepted_semtypes),
'negations': sorted(self.negations),
'valid_punct': sorted(self.valid_punct)
}
return self._info
def _is_valid_token(self, tok):
return not(
tok.is_punct or tok.is_space or
tok.pos_ == 'ADP' or tok.pos_ == 'DET' or tok.pos_ == 'CONJ'
)
def _is_valid_start_token(self, tok):
return not(
tok.like_num or
| (self._is_stop_term(tok) and tok.lemma_ not in self.negations) or
tok.pos_ == 'ADP' or tok.pos_ == 'DET' or tok.pos_ == 'CONJ'
)
def _is_stop_term(self, tok):
return tok.is_stop or tok.lemma_ == 'time'
| def _is_valid_end_token(self, tok):
return not(
tok.is_punct or tok.is_space or self._is_stop_term(tok) or
tok.pos_ == 'ADP' or tok.pos_ == 'DET' or tok.pos_ == 'CONJ'
)
def _is_valid_middle_token(self, tok):
return (
not(tok.is_punct or tok.is_space) or
tok.is_bracket or
tok.text in self.valid_punct
)
def _is_ok_semtype(self, target_semtypes):
if self.accepted_semtypes is None:
ok = True
else:
ok = any(sem in self.accepted_semtypes for sem in target_semtypes)
return ok
def _is_longer_than_min(self, span):
return (span.end_char - span.start_char) >= self.min_match_length
def _make_ngrams(self, sent):
sent_length = len(sent)
# do not include teterminers inside a span
skip_in_span = {token.i for token in sent if token.pos_ == 'DET'}
# invalidate a span if it includes any on these symbols
invalid_mid_tokens = {
token.i for token in sent if not self._is_valid_middle_token(token)
}
for i in toolbox.xrange3(sent_length):
tok = sent[i]
if not self._is_valid_token(tok):
continue
# do not consider this token by itself if it is
# a number or a stopword.
if self._is_valid_start_token(tok):
compensate = False
else:
compensate = True
span_end = min(sent_length, i + self.window) + 1
# we take a shortcut if the token is the last one
# in the sentence
if (
i + 1 == sent_length and # it's the last token
self._is_valid_end_token(tok) and # it's a valid end token
len(tok) >= self.min_match_length # it's of miminum length
):
yield(tok.idx, tok.idx + len(tok), tok.text)
for j in toolbox.xrange3(i + 1, span_end):
if compensate:
compensate = False
continue
if sent[j - 1] in invalid_mid_tokens:
break
if not self._is_valid_end_token(sent[j - 1]):
continue
span = sent[i:j]
if not self._is_longer_than_min(span):
continue
yield (
span.start_char, span.end_char,
''.join(token.text_with_ws for token in span
if token.i not in skip_in_span).strip()
)
def _get_all_matches(self, ngrams):
matches = []
for start, end, ngram in ngrams:
ngram_normalized = ngram
if self.normalize_unicode_flag:
ngram_normalized = unidecode(ngram_normalized)
# make it lowercase
if self.to_lowercase_flag:
ngram_normalized = ngram_normalized.lower()
# if the term is all uppercase, it might be the case that
# no match is found; so we convert to lowercase;
# however, this is never needed if the string is lowercased
# in the step above
if not self.to_lowercase_flag and ngram_normalized.isupper():
ngram_normalized = ngram_normalized.lower()
prev_cui = None
ngram_cands = list(self.ss_db.get(ngram_normalized))
ngram_matches = []
for match in ngram_cands:
cuisem_match = sorted(self.cuisem_db.get(match))
for cui, semtypes, preferred in cuisem_match:
match_similarity = toolbox.get_similarity(
x=ngram_normalized,
y=match,
n=self.ngram_length,
similarity_name=self.similarity_name
)
if not self._is_ok_semtype(semtypes):
continue
if prev_cui is not None and prev_cui == cui:
if match_similarity > ngram_matches[-1]['similarity']:
ngram_matches.pop(-1)
else:
continue
prev_cui = cui
ngram_matches.append(
{
'start': start,
'end': end,
'ngram': ngram,
'term': toolbox.safe_unicode(match),
'cui': cui,
'similarity': match_similarity,
'semtypes': semtypes,
'preferred': preferred
}
)
if len(ngram_matches) > 0:
matches.append(
sorted(
ngram_matches,
key=lambda m: m['similarity'] + m['preferred'],
reverse=True
)
)
return matches
@staticmethod
def _select_score(match):
return (match[0]['similarity'], (match[0]['end'] - match[0]['start']))
@staticmethod
def _select_longest(match):
return (match[0]['similarity'], (match[0]['end'] - match[0]['start']))
def _select_terms(self, matches):
sort_func = (
self._select_longest if self.overlapping_criteria == 'length'
else self._select_score
)
matches = sorted(matches, key=sort_func, reverse=True)
intervals = toolbox.Intervals()
final_matches_subset = []
for match in matches:
match_interval = (match[0]['start'], match[0]['end'])
if match_interval not in intervals:
final_matches_subset.append(match)
intervals.append(match_interval)
return final_matches_subset
def _make_token_sequences(self, parsed):
for i in range(len(parsed)):
for j in toolbox.xrange3(
i + 1, min(i + self.window, len(parsed)) + 1):
span = parsed[i:j]
yield (span.start_char, span.end_char, span.text)
def _print_verbose_status(self, parsed, matches):
if not self.verbose:
return False
print(
'[{}] {:,} extracted from {:,} tokens'.format(
datetime.datetime.now().isoformat(),
sum(len(match_group) for match_group in matches),
len(parsed)
),
file=sys.stderr
)
return True
def match(self, text, best_match=True, ignore_syntax=False):
parsed = self.nlp(u'{}'.format(text))
if ignore_syntax:
ngrams = self._make_token_sequences(parsed)
else:
ngrams = self._make_ngrams(parsed)
matches = self._get_all_matches(ngrams)
if best_match:
matches = self._select_terms(matches)
self._print_verbose_status(parsed, matches)
|
# Copyright <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (https:// | www.gnu.org/licenses/agpl).
from odoo.tests.common import HttpCase, TransactionCase
class SomethingCase(TransactionCase):
def setUp(self, *args, **kwargs):
super(SomethingCase, self).setUp(*args, **kwargs)
# TODO Replace this for something useful or delete this method
self | .do_something_before_all_tests()
def tearDown(self, *args, **kwargs):
# TODO Replace this for something useful or delete this method
self.do_something_after_all_tests()
return super(SomethingCase, self).tearDown(*args, **kwargs)
def test_something(self):
"""First line of docstring appears in test logs.
Other lines do not.
Any method starting with ``test_`` will be tested.
"""
pass
class UICase(HttpCase):
post_install = True
at_install = False
def test_ui_web(self):
"""Test backend tests."""
self.phantom_js(
"/web/tests?debug=assets&module=module_name",
"",
login="admin",
)
def test_ui_website(self):
"""Test frontend tour."""
self.phantom_js(
url_path="/?debug=assets",
code="odoo.__DEBUG__.services['web.Tour']"
".run('test_module_name', 'test')",
ready="odoo.__DEBUG__.services['web.Tour'].tours.test_module_name",
login="admin")
|
# -*- coding: utf-8 -*-
import cx_Oracle
db = cx_Oracle.connect("username", "pass | word", "10.17.1.220:1521/db")
cursor = db.cursor()
cursor.execute("select loginid from sys_user")
for loginid in cursor:
print("loginid: ", loginid)
cursor.close()
cursor = db.cursor()
#插入一条记录
cursor.execu | te("""insert into tb_user values(1,'admin','password')""");
#再插入一条数据
param = {'id': 2, 'n': 'admin', 'p': 'password'}
cursor.execute('insert into tb_user values(:id,:n,:p)', param);
#一次插入多条数据,参数为字典列表形式
param = [{'id': 3, 'n': 'admin', 'p': 'password'}, {'id': 4, 'n': 'admin','p': 'password'}, {'id': 5, 'n': 'admin', 'p': 'password'}];
cursor.executemany('insert into tb_user values(:id,:n,:p)', param);
#再一次插入多条数据
param = [];
#生成5条插入数据,参数为元组列表形式
for i in range(6, 11): # [6,7,8,9,10]
param.append((i, 'user' + str(i), 'password' + str(i)))
#插入数据
cursor.executemany('insert into tb_user values(:1,:2,:3)', param);
cursor.close()
db.commit()
db.close()
|
from .relationship_manager import RelationshipManager, ZeroOrMore # noqa
class ZeroOrOne(RelationshipManager):
description = "zero or one relationship"
def single(self):
nodes = super(ZeroOrOne, self).all()
if len(nodes) == 1:
return nodes[0]
if len(nodes) > 1:
raise CardinalityViolation(self, len(nodes))
def all(self):
node = self.single()
return [node] if node else []
def connect(self, obj, properties=None):
if len(self):
raise AttemptedCardinalityViolation(
"Node already has {0} can't connect more".format(self))
else:
return super(ZeroOrOne, self).connect(obj, properties)
class OneOrMore(RelationshipManager):
description = "one or more relationships"
def single(self):
nodes = super(OneOrMore, self).all()
if nodes:
return nodes[0]
raise CardinalityViolation(self, 'none')
def all(self):
nodes = super(OneOrMore, self).all()
if nodes:
return nodes
raise CardinalityViolation(self, 'none')
def disconnect(self, obj):
if super(OneOrMore, self).__len__() < 2:
raise AttemptedCardinalityViolation("One or more expected")
return super(OneOrMore, self).disconnect(obj)
class One(RelationshipManager):
description = "one relationship"
def single(self):
nodes = super(One, self).all()
if nodes:
if len(nodes) == 1:
return node | s[0]
else:
raise CardinalityViolation(self, len(nodes))
else:
raise CardinalityViolation(self, 'none')
def all(self):
return [self.single()]
| def disconnect(self, obj):
raise AttemptedCardinalityViolation("Cardinality one, cannot disconnect use reconnect")
def connect(self, obj, properties=None):
if not hasattr(self.source, '_id'):
raise ValueError("Node has not been saved cannot connect!")
if len(self):
raise AttemptedCardinalityViolation("Node already has one relationship")
else:
return super(One, self).connect(obj, properties)
class AttemptedCardinalityViolation(Exception):
pass
class CardinalityViolation(Exception):
def __init__(self, rel_manager, actual):
self.rel_manager = str(rel_manager)
self.actual = str(actual)
def __str__(self):
return "CardinalityViolation: Expected {0} got {1}".format(self.rel_manager, self.actual)
|
ize) == 2 and size[1] != out.rows():
# flow/fixed widgets rendered too large/small
out = CompositeCanvas(out)
out.pad_trim_top_bottom(0, size[1] - out.rows())
return out
def get_cursor_coords(self, size):
"""Return the cursor coordinates of the focus widget."""
if not self.selectable():
return None
if not hasattr(self.focus_item, 'get_cursor_coords'):
return None
i = self.focus_position
w, (f, height) = self.contents[i]
item_rows = None
maxcol = size[0]
if f == GIVEN or (f == WEIGHT and len(size) == 2):
if f == GIVEN:
maxrow = height
else:
if item_rows is None:
item_rows = self.get_item_rows(size, focus=True)
maxrow = item_rows[i]
coords = self.focus_item.get_cursor_coords((maxcol, maxrow))
else:
coords = self.focus_item.get_cursor_coords((maxcol,))
if coords is None:
return None
x,y = coords
if i > 0:
| if item_rows is None:
item_rows = self.get_item_rows(size, focus=True)
for r in item_rows[:i]:
y += r
return x, y
def rows(self, size, focus=False ):
return sum(self.get_item_rows(size, focus))
def keypress(self, size, key ):
"""Pass the keypress to the widget in focus.
Unhandled 'up' and 'down' keys may cause a focus change."""
if not self.contents:
return key
ite | m_rows = None
if len(size) == 2:
item_rows = self.get_item_rows(size, focus=True)
i = self.focus_position
if self.selectable():
tsize = self.get_item_size(size, i, True, item_rows)
key = self.focus.keypress(tsize, key)
if self._command_map[key] not in ('cursor up', 'cursor down'):
return key
if self._command_map[key] == 'cursor up':
candidates = range(i-1, -1, -1) # count backwards to 0
else: # self._command_map[key] == 'cursor down'
candidates = range(i+1, len(self.contents))
if not item_rows:
item_rows = self.get_item_rows(size, focus=True)
for j in candidates:
if not self.contents[j][0].selectable():
continue
self._update_pref_col_from_focus(size)
self.focus_position = j
if not hasattr(self.focus, 'move_cursor_to_coords'):
return
rows = item_rows[j]
if self._command_map[key] == 'cursor up':
rowlist = range(rows-1, -1, -1)
else: # self._command_map[key] == 'cursor down'
rowlist = range(rows)
for row in rowlist:
tsize = self.get_item_size(size, j, True, item_rows)
if self.focus_item.move_cursor_to_coords(
tsize, self.pref_col, row):
break
return
# nothing to select
return key
def _update_pref_col_from_focus(self, size):
"""Update self.pref_col from the focus widget."""
if not hasattr(self.focus, 'get_pref_col'):
return
i = self.focus_position
tsize = self.get_item_size(size, i, True)
pref_col = self.focus.get_pref_col(tsize)
if pref_col is not None:
self.pref_col = pref_col
def move_cursor_to_coords(self, size, col, row):
"""Capture pref col and set new focus."""
self.pref_col = col
#FIXME guessing focus==True
focus=True
wrow = 0
item_rows = self.get_item_rows(size, focus)
for i, (r, w) in enumerate(zip(item_rows,
(w for (w, options) in self.contents))):
if wrow + r > row:
break
wrow += r
else:
return False
if not w.selectable():
return False
if hasattr(w, 'move_cursor_to_coords'):
tsize = self.get_item_size(size, i, focus, item_rows)
rval = w.move_cursor_to_coords(tsize, col, row-wrow)
if rval is False:
return False
self.focus_position = i
return True
def mouse_event(self, size, event, button, col, row, focus):
"""
Pass the event to the contained widget.
May change focus on button 1 press.
"""
wrow = 0
item_rows = self.get_item_rows(size, focus)
for i, (r, w) in enumerate(zip(item_rows,
(w for (w, options) in self.contents))):
if wrow + r > row:
break
wrow += r
focus = focus and self.focus_item == w
if is_mouse_press(event) and button == 1:
if w.selectable():
self.focus_position = i
if not hasattr(w, 'mouse_event'):
return False
tsize = self.get_item_size(size, i, focus, item_rows)
return w.mouse_event(tsize, event, button, col, row-wrow,
focus)
class ColumnsError(Exception):
pass
class Columns(Widget, WidgetContainerMixin, WidgetContainerListContentsMixin):
"""
Widgets arranged horizontally in columns from left to right
"""
_sizing = frozenset([FLOW, BOX])
def __init__(self, widget_list, dividechars=0, focus_column=None,
min_width=1, box_columns=None):
"""
:param widget_list: iterable of flow or box widgets
:param dividechars: number of blank characters between columns
:param focus_column: index into widget_list of column in focus,
if ``None`` the first selectable widget will be chosen.
:param min_width: minimum width for each column which is not
calling widget.pack() in *widget_list*.
:param box_columns: a list of column indexes containing box widgets
whose height is set to the maximum of the rows
required by columns not listed in *box_columns*.
*widget_list* may also contain tuples such as:
(*given_width*, *widget*)
make this column *given_width* screen columns wide, where *given_width*
is an int
(``'pack'``, *widget*)
call :meth:`pack() <Widget.pack>` to calculate the width of this column
(``'weight'``, *weight*, *widget*)`
give this column a relative *weight* (number) to calculate its width from the
screen columns remaining
Widgets not in a tuple are the same as (``'weight'``, ``1``, *widget*)
If the Columns widget is treated as a box widget then all children
are treated as box widgets, and *box_columns* is ignored.
If the Columns widget is treated as a flow widget then the rows
are calcualated as the largest rows() returned from all columns
except the ones listed in *box_columns*. The box widgets in
*box_columns* will be displayed with this calculated number of rows,
filling the full height.
"""
self.__super.__init__()
self._contents = MonitoredFocusList()
self._contents.set_modified_callback(self._invalidate)
self._contents.set_focus_changed_callback(lambda f: self._invalidate())
self._contents.set_validate_contents_modified(self._contents_modified)
box_columns = set(box_columns or ())
for i, original in enumerate(widget_list):
w = original
if not isinstance(w, tuple):
self.contents.append((w, (WEIGHT, 1, i in box_columns)))
elif w[0] in (FLOW, PACK): # 'pack' used to be called 'flow'
f = PACK
_ignored, w = w
self.contents.append((w, (f, None, i in box_columns)))
elif len(w) == 2:
width, w = w
self.contents.append((w, (GIVEN, width, i in box_columns)))
elif w[0] == FIXED: # backwards compatibility
f = GIVEN
_ignored, width, w = w
self.contents.append |
import numpy as np
from scipy.optimize import check_grad
## Two-layer NN with ReLU
# Two-layer NN, with 200 units per layer with ReLu ai = max(0,oi)
# X - (W01) - Layer1 - (W12) - Layer2 - (W23) - Output
# ((D+1)*nh) + ((nh+1)*nh) + ((nh+1)*K)
nh = 200
def getAvgGradient(w, X, y, L, K):
[N,D] = X.shape
W01,b1,W12,b2,W23,b3 = parseParams(w,D,K)
# Forward pass
h1 = np.maximum(0, np.dot(X, W01) + np.tile(b1,(N,1))) # N x nh, ReLU
h2 = np.maximum(0, np.dot(h1, W12) + np.tile(b2,(N,1))) # N x nh, ReLU
scores = np.dot(h2, W23) + np.tile(b3,(N,1)) # N x K
exp_scores = np.exp(scores-np.tile(np.max(scores,axis=1,keepdims=True),(1,K)))
probs = exp_scores / np.tile(exp_scores.sum(axis=1,keepdims=True),(1,K)) # N x K
l = -np.log(probs[range(N),y]).mean() + .5*L*((W01**2).sum()+(W12**2).sum()+(W23**2).sum())
# Backward pass
dscores = probs # N x K
dscores[range(N),y] -= 1
#dscores /= N
dW23 = np.dot(h2.T, dscores)/N + L*W23 # nh x K
db3 = np.sum(dscores, axis=0, keepdims=True)/N # nh x 1
dh2 = np.dot(dscores, W23.T) # N x K x K x nh = N x nh
dh2[h2 <= 0.] = 0.
dW12 = np.dot(h1.T, dh2)/N + L*W12
db2 = np.sum(dh2, axis=0, keepdims=True)/N
dh1 = np.dot(dh2, W12.T)
dh1[h1 <= 0.] = 0.
dW01 = np.dot(X.T, dh1)/N + L*W01
db1 = np.sum | (dh1, axis=0, keepdims=True)/N
g = np.concatenate((dW01.flatten(), db1.flatten(), dW12.flatten(), db2.flatten(), dW23.flatten(), db3.flatten()), axis=0)
return (g, l)
def predict(w, X, K):
N,D = X.shape
W01,b1,W12,b2,W23,b3 = parseParams(w,D,K)
# Forward pass
h1 = | np.maximum(0, np.dot(X, W01) + np.tile(b1,(N,1))) # N x nh, ReLU
h2 = np.maximum(0, np.dot(h1, W12) + np.tile(b2,(N,1))) # N x nh, ReLU
scores = np.dot(h2, W23) + np.tile(b3,(N,1)) # N x K
#exp_scores = np.exp(scores-np.tile(np.max(scores,axis=1,keepdims=True),(1,K)))
#probs = exp_scores / np.tile(exp_scores.sum(axis=1,keepdims=True),(1,K)) # N x K
#ypred = np.argmax(probs,axis=1)
ypred = np.argmax(scores,axis=1)
return ypred
def parseParams(w,D,K):
cnt = 0
W01 = w[:D*nh].reshape((D,nh))
cnt += D*nh
b1 = w[cnt:cnt+nh].reshape((1,nh))
cnt += nh
W12 = w[cnt:cnt+nh*nh].reshape((nh,nh))
cnt += nh*nh
b2 = w[cnt:cnt+nh].reshape((1,nh))
cnt += nh
W23 = w[cnt:cnt+nh*K].reshape((nh,K))
cnt += nh*K
b3 = w[cnt:cnt+K].reshape((1,K))
cnt += K
if (cnt != w.size):
print 'Error: wrong param size'
exit()
return (W01,b1,W12,b2,W23,b3)
def init(D,K):
d = (D+1)*nh + (nh+1)*nh + (nh+1)*K
w = 1.e-1*np.random.normal(size=(d,))
#w = np.zeros((d,))
return w
def loss(w, X, y, L, K):
_,l = getAvgGradient(w, X, y, L, K)
return l
def grad(w, X, y, L, K):
g,_ = getAvgGradient(w, X, y, L, K)
return g
def self_test1():
D = 100
K = 2
N = 10
L = 1e-6
# check parsing
W01 = np.random.randn(D,nh)
b1 = np.random.randn(1,nh)
W12 = np.random.randn(nh,nh)
b2 = np.random.randn(1,nh)
W23 = np.random.randn(nh,K)
b3 = np.random.randn(1,K)
w = np.concatenate((W01.flatten(), b1.flatten(), W12.flatten(), b2.flatten(), W23.flatten(), b3.flatten()), axis=0)
W01_,b1_,W12_,b2_,W23_,b3_ = parseParams(w,D,K)
print ((W01-W01_)**2).sum()/(W01**2).sum()
print ((b1-b1_)**2).sum()/(b1**2).sum()
print ((W12-W12_)**2).sum()/(W12**2).sum()
print ((b2-b2_)**2).sum()/(b2**2).sum()
print ((W23-W23_)**2).sum()/(W23**2).sum()
print ((b3-b3_)**2).sum()/(b3**2).sum()
w = init(D, K)
w = 1e-0*np.random.normal(size=w.size)
X = np.random.normal(size=(N,D))
y = np.random.randint(K,size=(N,))
err = check_grad(loss, grad, w, X, y, L, K)
print err
|
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = 'simplui',
version = '1.0.4',
author = | 'Tristam MacDonald',
author_email = 'swiftcoder@gmail.com',
description = 'Light-weight GUI toolkit for pyglet',
url = 'http://simplui.googlecode.com/',
platforms = ['all'],
license = 'BSD',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language | :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Software Development :: User Interfaces',
],
packages = find_packages(),
install_requires = ['simplejson >= 2.0', 'pyglet >= 1.1']
)
|
DELETE FROM animalvaccination WHERE ID >= %s;" % START_ID
print "DELETE FROM log WHERE ID >= %s;" % START_ID
print "DELETE FROM owner WHERE ID >= %s;" % START_ID
print "DELETE FROM adoption WHERE ID >= %s;" % START_ID
print "DELETE FROM media;" # They want media cleared out, shouldn't be enough to worry about orphans
# Create an unknown owner
uo = asm.Owner()
owners.append(uo)
uo.OwnerSurname = "Unknown Owner"
uo.OwnerName = uo.OwnerSurname
# Deal with people first
for d in asm.csv_to_list(PERSON_FILENAME, remove_non_ascii=True):
# Each row contains a person
o = asm.Owner()
owners.append(o)
ppo[d["People_Ctr"]] = o
o.OwnerForeNames = d["PERSONFIRSTNAME"]
o.OwnerSurname = d["PERSONLASTNAME"]
o.OwnerName = o.OwnerForeNames + " " + o.OwnerSurname
o.OwnerAddress = d["PERSONADDRESS"]
o.OwnerTown = d["PERSONCITY"]
o.OwnerCounty = d["PERSONSTATE"]
o.OwnerPostcode = d["PERSONZIPCODE"]
o.EmailAddress = d["PERSONEMAIL"]
o.WorkTelephone = d["PERSONWORKPHONE"]
o.MobileTelephone = d["PERSONCELLPHONE"]
o.IsBanned = asm.iif(d["PERSONFLAGS"].find("Banned") != -1, 1, 0)
o.IsDonor = asm.iif(d["PERSONDONOR"] == "1", 1, 0)
o.IsFosterer = asm.iif(d["PERSONFOSTERER"] == "1", 1, 0)
o.Comments = d["PERSONCOMMENTS"]
o.JurisdictionID = asm.jurisdiction_from_db(d["PERSONADDITIONALCOUNCILNAME"])
# Animal intake records
for d in asm.csv_to_list(ANIMAL_FILENAME, remove_non_ascii=True):
# Each row contains an animal with intake info:
a = asm.Animal()
animals.append(a)
ppa[d["Animal_Identifier"]] = a
a.AnimalTypeID = asm.type_from_db(d["Pound_Reason"])
a.SpeciesID = asm.species_id_for_name(d["Species"])
a.AnimalName = d["Name"]
if a.AnimalName.strip() == "":
a.AnimalName = "(unknown)"
a.DateBroughtIn = getdate(d["Date_Admitted"]) or asm.today()
if d["Date_Of_Birth"].strip() != "":
a.DateOfBirth = getdate(d["Date_Of_Birth"])
if a.DateOfBirth is None:
a.DateOfBirth = asm.subtract_days(a.DateBroughtIn, 365)
a.CreatedDate = a.DateBroughtIn
a.LastChangedDate = a.DateBroughtIn
#asm.additional_field("Legacy_Tag_No", 0, a.ID, d["Tag_no"])
#asm.additional_field("Legacy_Tag_No_Q", 0, a.ID, d["Tag_no_qualifier"])
a.ShortCode = "%s:%s" % (d["Tag_no"], d["Tag_no_qualifier"])
a.ShelterCode = a.ShortCode
a.BreedID = asm.breed_from_db(d["Breed"], 1)
a.BreedName = d["Breed"]
if d["Cross_Breed"] != "":
a.Breed2ID = asm.breed_from_db(d["Cross_Breed"], 1)
a.CrossBreed = 1
a.BreedName = "%s / %s" % (d["Breed"], d["Cross_Breed"])
#a.BaseColourID = asm.colour_id_for_names(d["Base_Colour"], d["Secondary_Colour"])
a.BaseColourID = asm.colour_from_db(d["Base_"])
a.AnimalComments = d["Notes"]
a.Sex = asm.getsex_mf(d["Sex"])
a.Size = asm.size_id_for_name(d["Size"])
a.NeuteredDate = getdate(d["Date_Desexed"])
if a.NeuteredDate is not None: a.Neutered = 1
a.IsNotForRegistration = 0
a.IsNotAvailableForAdoption = 1
a.IdentichipNumber = d["Microchip_no"]
a.Identichip2Number = d["Alternate_Chip_No"]
asm.additional_field("MChipType", 5, a.ID, d["Microchip_Type"]) # MChipType additional field
if a.IdentichipNumber != "": a.Identichipped = 1
if a.IdentichipNumber == "0":
a.Identichipped = 0
a.IdentichipNumber = ""
a.IdentichipDate = asm.getdate_ddmmyyyy(d["Date_Microchipped"])
a.IsGoodWithCats = 2
a.IsGoodWithDogs = 2
a.IsGoodWithChildren = 2
a.HouseTrained = 0
a.AcceptanceNumber = d["Litter No"]
comments = "Breed: " + d["Breed"] + "/" + d["Cross_Breed"]
comments += "\nSpecies: " + d["Species"]
comments += "\nMicrochip Type: " + d["Microchip_Type"]
comments += "\nSize: " + d["Size"]
comments += "\nCondition: " + d["Animal_Condition"]
a.HiddenAnimalDetails = comments
entrycomments = "Pound Reason: " + d["Pound_Reason"]
entrycomments += "\nWhere Found: " + d["Where_found"]
entrycomments += "\nStreet Found: " + d["Street_Found_In"]
a.ReasonForEntry = entrycomments
a.EntryReasonID = 17 # Surrender
#if d["InShelterSearchFlag"] == "N":
# a.Archived = 1
if d["Location"] != "": a.ShelterLocation = asm.location_from_db(d["Location"])
if d["Unit"] != "": a.ShelterLocationUnit = d["Unit"]
# Animal log, recording medical history and linking adoptions/surrenderers/etc
for d in asm.csv_to_list(LOG_FILENAME, remove_non_ascii=True):
if d["Animal_Identifier"] not in ppa: continue
a = ppa[d["Animal_Identifier"]]
o = uo
if d["People_ctr"] != "": o = ppo[d["People_ctr"]]
ed = getdate(d["Entry_date"])
if not ed: continue
if d["Weight"] != "0" and d["Weight"] != "":
try:
a.Weight = float(d["Weight"])
except ValueError:
pass
l = asm.Log()
logs.append(l)
l.LogTypeID = 4 # Weight
l.LinkID = a.ID
l.LinkType = 0
l.Date = ed
l.Comments = d["Weight"]
if d["Action"] == "Admission" and d["Log_Description"] == "Owner Surrender" and o:
a.OriginalOwnerID = o.ID
a.BroughtInByOwnerID = o.ID
a.DateBroughtIn = ed
a.CreatedBy = d["User_Id"]
elif d["Action"] == "Veterinary" and d["Log_Description"] == "Desexed":
a.Neutered = 1
a.NeuteredDate = ed
animalmedicals.append( asm.animal_regimen_single(a.ID, ed, d | ["Log_Description"], "N/A", d["Log_Notes"]) )
elif d["Action"] == "Veterinary":
animalmedicals.append( asm.animal_regimen_single(a.ID, ed, d["Log_Description"], "N/A", d["Log_Notes"]) )
elif d["Action"] == "Vaccination":
vacctypes = {
"C3": | 16,
"C5": 18,
"F3": 22,
"F4": 23
}
av = asm.AnimalVaccination()
animalvaccinations.append(av)
av.AnimalID = a.ID
av.VaccinationID = 8
for k, i in vacctypes.iteritems():
if d["Log_Description"].find(k) != -1: av.VaccinationID = i
av.DateRequired = ed
av.DateOfVaccination = ed
av.Comments = "Type: %s\n%s" % (d["Log_Description"], d["Log_Notes"])
av.CreatedBy = d["User_Id"]
elif d["Action"] == "Foster Care" and d["Log_Description"] == "Foster Care":
o.IsFosterer = 1
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = o.ID
m.MovementType = 2
m.MovementDate = ed
m.Comments = d["Log_Notes"]
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementDate = m.MovementDate
a.ActiveMovementType = 2
a.LastChangedDate = ed
movements.append(m)
elif d["Action"] == "Foster Care" and d["Log_Description"] == "Carer Return":
# Return this person's most recent foster
for m in movements:
if m.AnimalID == a.ID and m.ReturnDate is None and m.MovementType == 2 and m.OwnerID == o.ID:
m.ReturnDate = ed
break
elif d["Action"] == "Adoption" or d["Action"] == "Exit Log":
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = o.ID
m.MovementType = 1
m.MovementDate = ed
m.Comments = d["Log_Notes"]
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementDate = m.MovementDate
a.ActiveMovementType = 1
a.LastChangedDate = ed
movements.append(m)
elif d["Action"] == "Claim":
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = o.ID
m.MovementType = 5
m.MovementDate = ed
m.Comments = d["Log_Notes"]
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementDate = m.MovementDate
a.ActiveMovementType = 5
a.LastChangedDate = ed
movements.append(m)
elif d["Action"] == "Outlet Transfer":
m = asm.Movement()
m.AnimalID = a.ID
m.OwnerID = o.ID
m.MovementType = 3
m.MovementDate = ed
m.Comments = d["Log_Notes"]
a.Archived = 1
a.ActiveMovementID = m.ID
a.ActiveMovementDate = m.MovementDate
|
import logging
def registerLoggingHander(dependencies):
plexHandler = PlexLoggerHandler()
for dependency in dependencies:
Log.Debug("Registering LoggerHandler for dependency: %s" % dependency)
log = logging.getLogger(dependency)
log.setLevel('DEBUG')
log.addHandler(plexHandler)
class PlexLoggerHandler(logging.StreamHandler):
def __init__(self, level=0):
super(PlexLoggerHandler, self).__init__(level)
def getFormattedString(self, record):
return record.name + ": " + record.getMessage()
def | emit(self, record):
if record.levelno == logging.DEBUG:
Log.Debug(self.getFormattedString(record | ))
elif record.levelno == logging.INFO:
Log.Info(self.getFormattedString(record))
elif record.levelno == logging.WARNING:
Log.Warn(self.getFormattedString(record))
elif record.levelno == logging.ERROR:
Log.Error(self.getFormattedString(record))
elif record.levelno == logging.CRITICAL:
Log.Critical(self.getFormattedString(record))
elif record.levelno == logging.FATAL:
Log.Exception(self.getFormattedString(record))
else:
Log.Error("UNKNOWN LEVEL: %s", record.getMessage()) |
# -*- coding: utf-8 -*-
from collections import Counter
from .design_pattern import singleton
@singleton()
class ListUtilsClass(object):
def most_common_inspect(self, list1):
new_list = []
for s1 in list1:
if not isinstance(s1, unicode):
s1 = str(s1).decode("UTF-8")
new_list.append(s1)
cc = Counter(new_list).most_common()
if len(cc) > 0:
max_len = len(max([c1[0] for c1 in cc], key=lambda x1: len(x1))) + 5
for c1 in cc:
print c1[0].ljust(max_len, ' '), ' : ', c1[1]
return cc
def uniq_seqs(self, seqs, uniq_lambda=None):
if uniq_lambda is None:
return list | (set(seqs))
__uniq = set([])
__remove_idxes = []
for idx1, seq1 in enumerate(seqs[:]):
__id = uniq_lambda(seq1)
if __id in __uniq:
__remove_idxes.append(idx1)
else:
__uniq.add(__id)
new_seqs = []
for idx1, seq1 in enumerate(seqs[:]):
if idx1 not in __r | emove_idxes:
new_seqs.append(seq1)
seqs = new_seqs
return seqs
ListUtils = ListUtilsClass()
uniq_seqs = ListUtils.uniq_seqs
|
'''
Copyright 2015
This file is part of Orbach.
Orbach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
th | e Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Orbach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY | or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Orbach. If not, see <http://www.gnu.org/licenses/>.
'''
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from orbach.core import views
router = DefaultRouter()
router.register(r'galleries', views.GalleryViewSet)
router.register(r'image_files', views.ImageFileViewSet)
router.register(r'users', views.UserViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
|
import glob
from pythonforandroid.toolchain import (
CythonRecipe,
Recipe,
current_directory,
info,
shprint,
)
from os.path import join
import sh
class TwistedRecipe(CythonRecipe):
version = '17.9.0'
url = 'https://github.com/twisted/twisted/archive/twisted-{version}.t | ar.gz'
depends = ['setuptools', 'zope_interface', 'incremental', 'constantly']
call_hostpython_via_targetpython = False
install_in_hostpython = True
def prebuild_arch(self, arch):
super(TwistedRecipe, self).prebuild_arch(arch)
# TODO Need to whitelist tty.pyo and termios.so here
print('Should remove twisted | tests etc. here, but skipping for now')
def get_recipe_env(self, arch):
env = super(TwistedRecipe, self).get_recipe_env(arch)
# We add BUILDLIB_PATH to PYTHONPATH so twisted can find _io.so
env['PYTHONPATH'] = ':'.join([
self.ctx.get_site_packages_dir(),
env['BUILDLIB_PATH'],
])
return env
recipe = TwistedRecipe()
|
import numpy as np
from sverchok.utils.testing import *
from sverchok.utils.logging import debug, info
from sverchok.utils.geom import CubicSpline
class CubicSplineTests(SverchokTestCase):
def setUp(self):
super().setUp()
vertices = [(-1, -1, 0), (0, 0, 0), (1, 2, 0), (2, 3, 0)]
self.spline = CubicSpline(vertices, metric="DISTANCE")
def test_eval(self):
t_in = np.array([0.0, 0.1, 0.4, 0.5, 0.7, 1.0])
| result = self.spline.eval(t_in)
#info(result)
expected_result = np.array(
[[-1.0, -1.0, 0.0 ],
[-0.60984526, -0.66497986, 0.0 ],
[ 0.29660356, 0.5303721, 0.0 | ],
[ 0.5, 1.0, 0.0 ],
[ 0.94256655, 1.91347161, 0.0 ],
[ 2.0, 3.0, 0.0 ]])
self.assert_numpy_arrays_equal(result, expected_result, precision=8)
def test_tangent(self):
t_in = np.array([0.0, 0.1, 0.4, 0.5, 0.7, 1.0])
result = self.spline.tangent(t_in)
#info(result)
expected_result = np.array(
[[ 0.00789736, 0.00663246, 0.0 ],
[ 0.00761454, 0.0068363, 0.0 ],
[ 0.00430643, 0.00922065, 0.0 ],
[ 0.0039487, 0.0094785, 0.0 ],
[ 0.00537964, 0.00844713, 0.0 ],
[ 0.00789736, 0.00663246, 0.0 ]])
self.assert_numpy_arrays_equal(result, expected_result, precision=8)
|
from components.base.automotive_component import AutomotiveComponent
from config import project_registration as proj
from tools.ecu_logging import ECULogger as L
import random
class AbstractECU(AutomotiveComponent):
'''
This abstract class defines the interface of
an ECU as it is found in an automotive network
'''
def __init__(self, sim_env, ecu_id, data_rate):
''' Constructor
Input: sim_env simpy.Environment environment of this component
e | cu_id string id of the corresponding AbstractECU
data_rate float datarate of the ecu
Output: -
'''
AutomotiveComponent.__init__(self, sim_env)
self._ABSTRACT_ECU = True
self._ecu_id = ecu_id # ID of the ECU
self.ecuSW = None # what is done
self.ecuHW = None # what is used to make it happen
self.MessageClass = | proj.BUS_MSG_CLASS # what kind of messages are exchanged
self.connected_bus = None # Bus that is connected to the ECU
self.data_rate = proj.BUS_ECU_DATARATE # Datarate with which bits are put on the bus
self._effective_datarate = 0 # Bit per second
self._effective_bittime = 0 # seconds
self._jitter = 1
self.startup_delay = False
def set_startup_delay(self, start_time):
''' this method sets the startup delay. When this delay is set
this ECU is activated after the defined start time
Input: start_time float time when the ECU starts running
Output: -
'''
self.startup_delay = start_time
if start_time:
self.ecuHW.transceiver.ecu_is_active = False
def set_jitter(self, jitter_range):
''' sets the jitter which will be multiplied onto each
timeout value. It will be within jitter_range
e.g. jitter_range of 0.1 means that any random value
between 1.0 and 1.1 will be used
Input: jitter_range: float dispersion from 1.0
Output: -
'''
# determine jitter
self._jitter = 1 + (random.random() * jitter_range)
# apply jitter on layers
try: self.ecuSW.comm_mod.physical_lay.transceiver._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod.transp_lay._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod.datalink_lay._jitter = self._jitter
except: pass
try: self.ecuSW.comm_mod.physical_lay.transceiver._jitter = self._jitter
except: pass
try: self.ecuSW.app_lay._jitter = self._jitter
except: pass
def _connect_hw_sw(self):
''' connect all hardware components with their
associated software connections
Input: -
Output: -
'''
# application Layer
self.ecuSW.app_lay.microcontroller = self.ecuHW.mic_controller
# physical and data link layer '''
self.ecuSW.comm_mod.datalink_lay.controller = self.ecuHW.controller
self.ecuSW.comm_mod.physical_lay.transceiver = self.ecuHW.transceiver
self.ecuSW.comm_mod.datalink_lay.effective_bittime = self._effective_bittime
def connect_to(self, bus):
''' connects the bus to the ECU
Input: bus CANBus Bus that will be connected
Output: -
'''
self.ecuHW.transceiver.connect_bus(bus)
self.connected_bus = bus
def get_type_id(self):
''' returns the id of this ECU type
Input: -
Output: ecu_type string type of this ECU; e.g.'TLSECU'
'''
raise NotImplementedError(" get_type_id() was not implemented by class %s" % self.__class__)
def get_rec_buffer_items(self):
''' returns the current content of the receiving buffer
Input: -
Output: rec_buffer list list of items in the receiving buffer
'''
return self.ecuHW.controller.receive_buffer.items
def get_trans_buffer_items(self):
''' returns the current content of the transmit buffer
Input: -
Output: trans_buffer list list of items in the transmit buffer
'''
return self.ecuHW.controller.transmit_buffer.items
def install_hw_filter(self, allowed_items_list):
''' installs a hardware filter that filters all
message ids that are not defined in the passed
list. This filter is applied on the transceiver
Input: allowed_items_list list list of message_ids that are let pass by the transceiver
Output: -
'''
try:
self.ecuHW.transceiver.install_filter(allowed_items_list)
except:
L().log_err(300)
def _GET_ABSTRACT_ECU(self):
''' marker that this is a AbstractECU '''
return self._ABSTRACT_ECU
@property
def ecu_id(self):
return self._ecu_id
@ecu_id.setter
def ecu_id(self, value):
self._ecu_id = value
def set_monitor(self, monitor):
self.monitor = monitor
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Better Collada Exporter",
"author": "Juan Linietsky",
"blender": (2, 5, 8),
"api": 38691,
"location": "File > Import-Export",
"description": ("Export DAE Scenes, This plugin actually works better! otherwise contact me."),
"warning": "",
"wiki_url": ("http://www.godotengine.org"),
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "e | xport_dae" in locals():
imp.reload(export_dae)
import bpy
from bpy.props import StringProperty, BoolProperty, FloatProperty, EnumProperty
from bpy_extras.io_utils import (ExportHelper,
path_reference_mode,
axis_conversion,
)
cl | ass ExportDAE(bpy.types.Operator, ExportHelper):
'''Selection to DAE'''
bl_idname = "export_scene.dae"
bl_label = "Export DAE"
bl_options = {'PRESET'}
filename_ext = ".dae"
filter_glob = StringProperty(default="*.dae", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
object_types = EnumProperty(
name="Object Types",
options={'ENUM_FLAG'},
items=(('EMPTY', "Empty", ""),
('CAMERA', "Camera", ""),
('LAMP', "Lamp", ""),
('ARMATURE', "Armature", ""),
('MESH', "Mesh", ""),
('CURVE', "Curve", ""),
),
default={'EMPTY', 'CAMERA', 'LAMP', 'ARMATURE', 'MESH','CURVE'},
)
use_export_selected = BoolProperty(
name="Selected Objects",
description="Export only selected objects (and visible in active layers if that applies).",
default=False,
)
use_mesh_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply modifiers to mesh objects (on a copy!).",
default=True,
)
use_tangent_arrays = BoolProperty(
name="Tangent Arrays",
description="Export Tangent and Binormal arrays (for normalmapping).",
default=False,
)
use_triangles = BoolProperty(
name="Triangulate",
description="Export Triangles instead of Polygons.",
default=False,
)
use_copy_images = BoolProperty(
name="Copy Images",
description="Copy Images (create images/ subfolder)",
default=False,
)
use_active_layers = BoolProperty(
name="Active Layers",
description="Export only objects on the active layers.",
default=True,
)
use_exclude_ctrl_bones = BoolProperty(
name="Exclude Control Bones",
description="Exclude skeleton bones with names that begin with 'ctrl'.",
default=True,
)
use_anim = BoolProperty(
name="Export Animation",
description="Export keyframe animation",
default=False,
)
use_anim_action_all = BoolProperty(
name="All Actions",
description=("Export all actions for the first armature found in separate DAE files"),
default=False,
)
use_anim_skip_noexp = BoolProperty(
name="Skip (-noexp) Actions",
description="Skip exporting of actions whose name end in (-noexp). Useful to skip control animations.",
default=True,
)
use_anim_optimize = BoolProperty(
name="Optimize Keyframes",
description="Remove double keyframes",
default=True,
)
anim_optimize_precision = FloatProperty(
name="Precision",
description=("Tolerence for comparing double keyframes "
"(higher for greater accuracy)"),
min=1, max=16,
soft_min=1, soft_max=16,
default=6.0,
)
use_metadata = BoolProperty(
name="Use Metadata",
default=True,
options={'HIDDEN'},
)
@property
def check_extension(self):
return True#return self.batch_mode == 'OFF'
def check(self, context):
return True
"""
isretur_def_change = super().check(context)
return (is_xna_change or is_def_change)
"""
def execute(self, context):
if not self.filepath:
raise Exception("filepath not set")
""" global_matrix = Matrix()
global_matrix[0][0] = \
global_matrix[1][1] = \
global_matrix[2][2] = self.global_scale
"""
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"global_scale",
"check_existing",
"filter_glob",
"xna_validate",
))
from . import export_dae
return export_dae.save(self, context, **keywords)
def menu_func(self, context):
self.layout.operator(ExportDAE.bl_idname, text="Better Collada (.dae)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
|
#! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright 2009-2013 Lorenzo Pallara l.pallara@avalpa.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import string
from dvbobjects.MPEG.Section import Section
from dvbobjects.utils import *
from dvbobjects.DVB.Descriptors | import *
######################################################################
class update_notification_section(Section):
table_id = 0x4B
section_max_siz | e = 4096
def pack_section_body(self):
self.table_id_extension = self.action_type << 8 | ((self.OUI >> 16) ^ ((self.OUI >> 8) & 0xFF) ^ (self.OUI & 0xFF))
# pack common_descriptor_loop
common_bytes = string.join(
map(lambda x: x.pack(),
self.common_descriptor_loop),
"")
# pack compatibility_descriptor_loop
compatibility_bytes = string.join(
map(lambda x: x.pack(),
self.compatibility_descriptor_loop),
"")
fmt = "!HBBH%ds%ds" % (len(common_bytes), len(compatibility_bytes))
return pack(fmt,
self.OUI >> 8,
self.OUI & 0xFF,
self.processing_order,
0xF000 | len(common_bytes),
common_bytes,
compatibility_bytes,
)
######################################################################
class unt_compatibility_descriptor_loop_item(DVBobject):
def pack(self):
# pack target_descriptor_loop
tdl_bytes = string.join(
map(lambda x: x.pack(),
self.target_descriptor_loop),
"")
# pack operational descriptor_loop
odl_bytes = string.join(
map(lambda x: x.pack(),
self.operational_descriptor_loop),
"")
fmt = "!%dsHH%dsH%ds" % (len(self.compatibility_descriptor), len(tdl_bytes), len(odl_bytes))
return pack(fmt,
self.compatibility_descriptor,
len(tdl_bytes) + len(odl_bytes),
0xF000 | len(tdl_bytes),
tdl_bytes,
0xF000 | len(odl_bytes),
odl_bytes,
)
|
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Is this a triangle?
#Problem level: 7 kyu
def is_tri | angle(a, b, c):
return (a+b>c) and (b+c>a) and | (c+a>b)
|
from django.contrib import admin
from .models import *
class ProductAdmin(admin.ModelAdmin):
list_display = ('id', 'prd_process_id', 'prd_name',
'prd_display_name', 'prd_owner', 'prd_product_id', 'prd_date',
'prd_class', 'prd_filter', 'prd_is_public', 'prd_is_permanent',)
list_display_links = ('id', 'prd_process_id', 'prd_name',)
search_fields = ('prd_process_id', 'prd_name', 'prd_display_name', 'prd_product_id',)
class ProductReleaseAdmin(admin.ModelAdmin):
list_display = ('id', 'product', 'release',)
list_display_links = ('id', 'product', 'release',)
search_fields = ('product', 'release',)
class ProductTagAdmin(admin.ModelAdmin):
list_display = ('id', 'product', 'tag',)
list_display_links = ('id', 'product', 'tag',)
search_fields = ('product', 'tag',)
class FileAdmin(admin.ModelAdmin):
list_display = ('id', 'prd_name', 'prd_display_name',
'prd_class', 'fli_base_path', 'fli_name',)
list_display_links = ('id', 'prd_name', 'prd_display_name', 'prd_class',)
search_fields = ('fli_name',)
class TableAdmin(admin.ModelAdmin):
list_display = ('id', 'prd_name', 'prd_display_name',
'prd_class', 'tbl_database', 'tbl_schema', 'tbl_name',)
list_display_links = ('id', 'prd_name', 'prd_display_name',
'prd_class', 'tbl_schema', 'tbl_name',)
search_fields = ('tbl_schema', 'tbl_name',)
class CatalogAdmin(admin.ModelAdmin):
list_display = (
'id', 'prd_name', 'prd_display_name', 'prd_class', 'ctl_num_objects',
)
class MapAdmin(admin.ModelAdmin):
list_display = (
'id', 'prd_name', 'prd_display_name', 'prd_class', 'mpa_nside', 'mpa_ordering', 'prd_filter', 'prd_is_public', 'prd_is_permanent'
)
list_display_links = ('id', 'prd_name')
search_fields = ('prd_name',)
class CutOutJobAdmin(admin.ModelAdmin):
list_display = (
'id', 'cjb_product', 'cjb_display_name', 'cjb_status', 'cjb_tag', 'owner',
)
list_display_links = ('id',)
search_fields = ('cjb_display_name',)
class DesjobAdmin(admin.ModelAdmin):
list_display = (
'id', 'djb_cutout_job', 'djb_jobid', 'djb_status', 'djb_start_time', 'djb_finish_time', 'djb_message',
)
list_display_links = ('id',)
search_fields = ('djb_jobid',)
class CutoutAdmin(admin.ModelAdmin):
list_display = (
'id', 'cjb_cutout_job', 'ctt_object_id', 'ctt_object_ra', 'ctt_object_dec', 'ctt_img_format', 'ctt_filter',
'ctt_file_name', 'ctt_file_path', 'ctt_file_type', 'ctt_file_size', )
list_display_links = ('id',)
search_fields = ('id',)
class MaskAdmin(admin.ModelAdmin):
list_display = (
'id', 'prd_name', 'prd_display_name', 'prd_class', 'msk_filter',
)
list_display_links = ('id', 'prd_name')
search_fields = ('prd_name',)
class ProductContentAdmin(admin.ModelAdmin):
list_display = ('id', 'pcn_product_id', 'pcn_column_name', 'pcn_ucd')
list_display_links = ('pcn_column_name',)
search_fields = ('pcn_column_name',)
class ProductContentAssociationAdmin(admin.ModelAdmin):
list_display = ('id', 'pca_product', 'pca_class_content', 'pca_product_content',)
search_fields = ('pca_product__prd_display_name', 'pca_product__prd_name')
class ProductContentSettingAdmin(admin.ModelAdmin):
list_display = ('id', 'pcs_content', 'pcs_setting', 'pcs_is_visible', 'pcs_order')
class ProductSettingAdmin(admin.ModelAdmin):
list_display = (
'id', 'cst_product', 'owner', 'cst_display_name', 'cst_description', 'cst_is_public', 'cst_is_editable',)
search_fields = ('cst_product__prd_display_name', 'cst_display_name', 'cst_description',)
class CurrentSettingAdmin(admin.ModelAdmin):
list_display = ('id', 'cst_product', 'cst_setting', 'owner',)
class WorkgroupAdmin(admin.ModelAdmin):
list_display = ('id', 'wgp_workgroup', 'owner',)
class WorkgroupUserAdmin(admin.ModelAdmin):
list_display = ('id', 'wgu_workgroup', 'wgu_user',)
class PermissionAdmin(admin.ModelAdmin):
list_display = ('id', 'prm_product', 'prm_user', 'prm_workgroup',)
class ProductRelatedAdmin(admin.ModelAdmin):
list_display = ('id', 'prl_product', 'prl_related', 'prl_relation_type', 'prl_cross_identification',)
class FiltersetdAdmin(admin.ModelAdmin):
list_display = ('id', 'product', 'owner', 'fst_name',)
class FilterConditionAdmin(admin.ModelAdmin):
list_display = ('id', 'filterset', 'fcd_property', 'fcd_property_name', 'fcd_operation', 'fcd_value')
class BookmarkedAdmin(admin.ModelAdmin):
list_display = ('id', 'product', 'owner', 'is_starred')
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductRelease, ProductReleaseAdmin)
admin.site.register(ProductTag, ProductTagAdmin)
admin.site.register(File, FileAdmin)
admin.site.register(Table, TableAdmin)
admin.site.register(Catalog, CatalogAdmin)
admin.site.register(Map, MapAdmin)
admin.site.register(CutOutJob, CutOutJobAdmin)
admin.site.register(Desjob, DesjobAdmin)
admin.site.register(Cutout, CutoutAdmin)
admin.site.register(Mask, MaskAdmin)
admin.site.regi | ster(ProductContent, ProductContentAdmin)
admin.site.register(ProductContentAssociation, ProductContentAssociationAdmin)
admin.site.register(ProductContentSetting, ProductContentSettingAdmin)
admin.site.register(ProductSetting, ProductSettingAdmin)
admin.site.register(CurrentSetting, CurrentSettingAdmin)
admin.site.register(Permission, PermissionAdmin)
admin.site.register(ProductRelated, ProductRelatedAdmin)
admin.site.register(Workgroup, WorkgroupAdmin)
admin.site | .register(WorkgroupUser, WorkgroupUserAdmin)
admin.site.register(Filterset, FiltersetdAdmin)
admin.site.register(FilterCondition, FilterConditionAdmin)
admin.site.register(BookmarkProduct, BookmarkedAdmin)
|
#!/usr/bin/env python
# -*- coding: ut | f-8 -*-
import re
import urlparse
from scrapy import log
from scrapy.http import Request
from base.base_wolf import Base_Wolf
class Wolf(Base_Wolf):
def __init__(self, *args, **kwargs):
super(Wolf, self).__init__(*args, **kwargs)
self.name = 'cnscg'
self.seed_urls = [
'http://www.cnscg.org/',
]
self.base_url = 'http://www.cnscg.org/'
self.rule[ | 'follow'] = re.compile(r'show-')
self.anchor['desc'] = "//*[@class='intro']"
def get_resource(self, item, response, tree):
item = super(Wolf, self).get_resource(item, response, tree)
resource = tree.xpath("//*[@class='original download']//a/@href")
downloads = [urlparse.urljoin(self.base_url, r) for r in resource if re.match(r'down.php', r)]
if len(downloads):
return self.download_bt(item, [Request(d, cookies=self.cookiejar._cookies,) for d in downloads])
else:
self.log("No Resource DropItem %s" % item['source'], level=log.WARNING)
return None
|
#!/usr/bin/env python
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in oscar.__init__.py
- Run: python setup.py sdist upload
"""
from setuptools import setup, find_packages
import os
import sys
from oscar import get_version
PROJECT_DIR = os.path.dirname(__file__)
# Change to the current directory to solve an issue installing Oscar on the
# Vagrant machine.
if PROJECT_DIR:
os.chdir(PROJECT_DIR)
setup(name='django-oscar',
version=get_version().replace(' ', '-'),
url='https://github.com/tangentlabs/django-oscar',
author="David Winterbottom",
author_email="david.winterbottom@tangentlabs.co.uk",
description="A domain-driven e-commerce framework for Django",
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
keywords="E-commerce, Django, domain-driven",
license='BSD',
platforms=['linux'],
packages=find_packages(exclude=["sandbox*", "tests*"]),
include_package_data=True,
install_requires=[
'django>=1.4,<1.6',
# PIL is required for image fields, Pillow is the "friendly" PIL fork
'pillow>=1.7.8,<2.0.0',
# Oscar ships with migrations
'South>=0.7.6,<0.8',
# We use the ModelFormSetView from django-extra-views for the basket
# page
'django-extra-views>=0.2,<0.6',
# We ship a simple Haystack implementation (that needs to be
# improved). We are using the 2.0-beta release from Github and
# eagerly anticipating a stable 2.0 release on PyPI.
'django-haystack==2.0.0-beta',
# Treebeard is used for categories
'django-treebeard==2.0b1',
# Sorl is used as the default thumbnailer
'sorl-thumbnail==11.12',
'python-memcached>=1.48,<1.52',
# Babel is used for currency formatting
'Babel>=0.9,<0.10',
# Oscar's default templates use compressor (but you can override
# this)
'django-compressor>=1.2,<1.4',
# For converting non-ASCII to ASCII when creating slugs
'Unidecode>=0.04.12,<0.05',
# For manipulating search URLs
'purl>=0.7'
],
dependency_links=['https://github.com/toastdriven/django-haystack/tarball/fd83d3f449c2197f93040bb3d7bc6083ea8e48b7#egg=django-haystack-2.0.0-beta'],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Dja | ngo',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
| 'Programming Language :: Python',
'Topic :: Other/Nonlisted Topic']
)
# Show contributing instructions if being installed in 'develop' mode
if len(sys.argv) > 1 and sys.argv[1] == 'develop':
docs_url = 'http://django-oscar.readthedocs.org/en/latest/internals/contributing/index.html'
mailing_list = 'django-oscar@googlegroups.com'
mailing_list_url = 'https://groups.google.com/forum/?fromgroups#!forum/django-oscar'
twitter_url = 'https://twitter.com/django_oscar'
msg = (
"You're installing Oscar in 'develop' mode so I presume you're thinking\n"
"of contributing:\n\n"
"(a) That's brilliant - thank you for your time\n"
"(b) If you have any questions, please use the mailing list:\n %s\n"
" %s\n"
"(c) There are more detailed contributing guidelines that you should "
"have a look at:\n %s\n"
"(d) Consider following @django_oscar on Twitter to stay up-to-date\n"
" %s\n\nHappy hacking!") % (mailing_list, mailing_list_url,
docs_url, twitter_url)
line = '=' * 82
print "\n%s\n%s\n%s" % (line, msg, line)
|
self.reversed_operands = True
self.ifm, self.ifm2 = self.ifm2, self.ifm
self.valid_broadcast = True
else:
self.valid_broadcast = False
def is_valid(self):
"""
This function checks whether BinaryElementwise has compatible attributes with the NPU
"""
if np.dtype(self.ofm) == np.int32 and self.activation is not None:
return False
# Due to | identity operator requiring ofm != int32 for now
| if np.dtype(self.ofm) == np.int32 and len(self.ofm.shape) < 4:
return False
if len(self.ifm.shape) > 4 or len(self.ifm2.shape) > 4:
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if len(self.ifm2.shape) == 4 and self.ifm2.shape[0] != 1:
return False
if not self.valid_broadcast:
return False
return True
class AddParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Add composite function
and extract the parameter information.
"""
composite_name = "ethos-u.add"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "ADD", True)
def is_valid(self):
"""
This function checks whether Add has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_add_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.add with optional fused RELU activation.
"""
pattern = is_op("qnn.add")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class SubParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Sub composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sub"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SUB", True)
def is_valid(self):
"""
This function checks whether Sub has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_subtract_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.subtract with optional fused RELU activation.
"""
pattern = is_op("qnn.subtract")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MulParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Mul composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mul"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MUL", True)
def is_valid(self):
"""
This function checks whether Mul has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_mul_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.mul with optional fused RELU activation.
"""
pattern = is_op("qnn.mul")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MinParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Min composite function
and extract the parameter information.
"""
composite_name = "ethos-u.min"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MIN", False)
def is_valid(self):
"""
This function checks whether Min has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
return True
def minimum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for minimum with optional fused RELU activation.
"""
pattern = is_op("minimum")(wildcard(), wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class MaxParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Max composite function
and extract the parameter information.
"""
composite_name = "ethos-u.max"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MAX", False)
def is_valid(self):
"""
This function checks whether Max has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
return True
def maximum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for maximum with optional fused RELU activation.
"""
pattern = is_op("maximum")(wildcard(), wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class ShlParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Shl composite function
and extract the parameter information.
"""
composite_name = "ethos-u.shl"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SHL", False)
def is_valid(self):
"""
This function checks whether Shl has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes([self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.int32]):
return False
return True
def shl_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for left_shift with optional fused RELU activation.
"""
pattern = is_op("left_shift")(wildcard(), wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class ReshapeParams:
"""
This class will parse a call to a ethosu.reshape composite function
and extract the parameter information.
"""
composite_name = "ethos-u.reshape"
def __init__(self, func_body: Call):
self.new_shape = func_body.attrs.newshape
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.of |
##
## pyGBot - Versatile IRC Bot
## Copyright (C) 2008 Morgan Lokhorst-Blight, Alex Soborov
| ##
## Th | is program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from datetime import datetime
from pyGBot import log
from pyGBot.Plugins.system.Commands import BaseCommand
from pyGBot.Plugins.system.Auth import AuthLevels as AL
class Seen(BaseCommand):
level = AL.User
def __init__(self, bot, channel, user, args):
args = args.strip().split()
if not args:
bot.replyout(channel, user, 'Command usage: seen <user> [channel]')
return
searchNick = args[0]
try:
searchChannel = args[1]
except IndexError:
searchChannel = None
try:
event = bot.plugins['features.Seen'].get_latest(searchNick, searchChannel)
except IndexError, e:
bot.replyout(channel, user, str(e))
return
outmessage = "The user, %s, was last seen " % event.user
if event.channel:
outmessage += "on channel %s " % event.channel
else:
outmessage += "on this network "
lastseen = datetime.now() - event.timestamp
days = lastseen.days
hours = lastseen.seconds / 3600
minutes = (lastseen.seconds % 3600) / 60
seconds = lastseen.seconds % 60
timemessage = []
if days != 0:
timemessage.append("%i days" % days)
if hours != 0:
timemessage.append("%i hours" % hours)
if minutes != 0:
timemessage.append("%i minutes" % minutes)
if seconds != 0:
timemessage.append("%i seconds" % seconds)
if len(outmessage) > 0:
outmessage += ", ".join(timemessage) + " ago, "
else:
outmessage += "just now, "
if event.type == "Say":
outmessage += "saying: <%s> %s" % (event.user, event.message)
elif event.type == "Do":
outmessage += "performing the action: * %s %s" % (event.user, event.message)
elif event.type == "Msg":
outmessage += "sending me a private message."
elif event.type == "Part":
outmessage += "parting the channel."
elif event.type == "Join":
outmessage += "joining the channel."
elif event.type == "Quit":
outmessage += "quitting with the message: %s" % event.message
elif event.type == "Kick":
outmessage += "getting kicked %s" % event.message
elif event.type == "NickTo":
outmessage += "changing nick to %s." % event.message
elif event.type == "NickFrom":
outmessage += "changing nick from %s." % event.message
bot.replyout(channel, user, outmessage)
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
with open('README.rst') as file:
long_description = file.read()
except IOError:
long_descr | iption = 'Python lib for sniffets.com'
setup(
name='sniffets',
packages=['sniffets'],
version='0.1.8',
long_description=long_description,
description='Python lib for sniffets.com',
author='Doniyor Jurabayev',
author_email='behconsci@gmail.com',
url='https://github.com/behconsci/sniffets-python',
download_url='https://github.com/behconsci/sniffets-python/archive/0.1.8.tar.gz',
keywords=['track', 'monitor', 'bug'],
classifiers=[],
install_requires=[
'requests', 'grequests'
],
)
| |
"""
Concurrent downloaders
"""
import os
import sys
import signal
import logging
import itertools
from functools import partial
from concurrent.futures import ProcessPoolExecutor
from pomp.core.base import (
BaseCrawler, BaseDownloader, BaseCrawlException,
)
from pomp.contrib.urllibtools import UrllibDownloadWorker
from pomp.core.utils import iterator, Planned
log = logging.getLogger('pomp.contrib.concurrent')
def _run_download_worker(params, request):
pid = os.getpid()
log.debug("Download worker pid=%s params=%s", pid, params)
try:
# Initialize worker and call get_one method
return params['worker_class'](
**params.get('worker_kwargs', {})
).process(request)
except Exception:
log.exception(
"Exception on download worker pid=%s request=%s", pid, request
)
raise
def _run_crawler_worker(params, response):
pid = os.getpid()
log.debug("Crawler worker pid=%s params=%s", pid, params)
try:
# Initialize crawler worker
worker = params['worker_class'](**params.get('worker_kwargs', {}))
# process response
items = worker.extract_items(response)
next_requests = worker.next_requests(response)
if next_requests:
return list(
itertools.chain(
iterator(items),
iterator(next_requests),
)
)
return list(iterator(items))
except Exception:
log.exception(
"Exception on crawler worker pid=%s request=%s", pid, response
)
raise
class ConcurrentMixin(object):
def _done(self, request, done_future, future):
try:
response = future.result()
except Exception as e:
log.exception('Exception on %s', request)
done_future.set_result(BaseCrawlException(
request,
exception=e,
exc_info=sys.exc_info(),
))
else:
done_future.set_result(response)
class ConcurrentDownloader(BaseDownloader, ConcurrentMixin):
"""Concurrent ProcessPoolExecutor downloader
:param pool_size: size of ThreadPoolExecutor
:param timeout: request timeout in seconds
"""
def __init__(
self, worker_class,
worker_kwargs=None, pool_size=5,):
# configure executor
self.pool_size = pool_size
self.executor = ProcessPoolExecutor(max_workers=self.pool_size)
# prepare worker params
self.worker_params = {
'worker_class': worker_class,
'worker_kwargs': worker_kwargs or {},
}
# ctrl-c support for python2.x
# trap sigi | nt
signal.signal(signal.SIGINT, lambda s, f: s)
super(ConcurrentDownloader, self).__init__()
d | ef process(self, crawler, request):
# delegate request processing to the executor
future = self.executor.submit(
_run_download_worker, self.worker_params, request,
)
# build Planned object
done_future = Planned()
# when executor finish request - fire done_future
future.add_done_callback(
partial(self._done, request, done_future)
)
return done_future
def get_workers_count(self):
return self.pool_size
def stop(self, crawler):
self.executor.shutdown()
class ConcurrentUrllibDownloader(ConcurrentDownloader):
"""Concurrent ProcessPoolExecutor downloader for fetching data with urllib
:class:`pomp.contrib.SimpleDownloader`
:param pool_size: pool size of ProcessPoolExecutor
:param timeout: request timeout in seconds
"""
def __init__(self, pool_size=5, timeout=None):
super(ConcurrentUrllibDownloader, self).__init__(
pool_size=pool_size,
worker_class=UrllibDownloadWorker,
worker_kwargs={
'timeout': timeout
},
)
class ConcurrentCrawler(BaseCrawler, ConcurrentMixin):
"""Concurrent ProcessPoolExecutor crawler
:param pool_size: pool size of ProcessPoolExecutor
:param timeout: request timeout in seconds
"""
def __init__(self, worker_class, worker_kwargs=None, pool_size=5):
# configure executor
self.pool_size = pool_size
self.executor = ProcessPoolExecutor(max_workers=self.pool_size)
# prepare worker params
self.worker_params = {
'worker_class': worker_class,
'worker_kwargs': worker_kwargs or {},
}
# inherit ENTRY_REQUESTS from worker_class
self.ENTRY_REQUESTS = getattr(worker_class, 'ENTRY_REQUESTS', None)
def process(self, response):
# delegate response processing to the executor
future = self.executor.submit(
_run_crawler_worker, self.worker_params, response,
)
# build Planned object
done_future = Planned()
# when executor finish response processing - fire done_future
future.add_done_callback(
partial(self._done, response, done_future)
)
return done_future
|
def descriptor(rcut=4, desc_type='se_ar', mneiba=150):
smth_frac = 0.85
if desc_type == 'se_ar':
rmult = 1.5
mneibr = 500
ar_smth = rcut*smth_frac
desc = {
'type': 'se_ar',
'a': {
'sel': [mneiba],
'rcut_smth': ar_smth,
'rcut': rcut,
'neuron': [10, 20, 40],
'resnet_dt': False,
'axis_neuron': 4,
'seed': 1,
},
'r': {
'sel': [mneibr],
'rcut_smth': ar_smth*rmult,
'rcut': rcut*rmult,
'neuron': [5, 10, 20],
'resnet_dt': False,
'seed': 1
}
}
elif desc_type == 'se_a':
desc = {
'type': 'se_a',
'sel': [mneiba],
'rcut_smth': rcut*smth_frac,
'rcut': rcut,
'neuron': [16, 32, 64],
'resnet_dt': False,
'axis_neuron': 4,
'seed': 1,
}
else:
msg = 'please add inputs for descriptor type %s' % desc_type
raise RuntimeError(msg)
return desc
def fitting_net():
fn = {
'neuron': [240, 240, 240],
'resnet_dt': True,
'seed': 1
}
return fn
def loss_function():
loss = {
'start_pref_e': 0.02,
'limit_pref_e': 1,
'start_pref_f': 1000,
'limit_pref_f': 1,
'start_pref_v': 1000,
'limit_pref_v': 1
}
return loss
def calc_decay_steps(stop_batch, start_lr, stop_lr, decay_rate):
import numpy as np
decay = np.log(stop_lr/start_lr)/np.log(decay_rate)
decay_steps = int(round(stop_batch/decay))
return decay_steps
def learning_rate(stop_batch, start_lr=5e-3, stop_lr=5e-8,
decay_rate=0.95):
decay_steps = calc_decay_steps(stop_batch, start_lr, stop_lr,
decay_rate)
lr = {
'type': 'exp',
'start_lr': start_lr,
'stop_lr': stop_lr,
'decay_steps': decay_steps,
'decay_rate': decay_rate
}
return lr
def training(stop_batch, batch_size):
tr = {
'seed': 1,
'systems': ['.'],
'set_prefix': 'set',
'batch_size': batch_size,
'stop_batch': | stop_batch,
}
display = {
'disp_file': 'lcurve.out',
'disp_freq': 1000,
'numb_test': 64,
'disp_training': True,
'time_training': True,
'profiling': False,
'profiling_file': 'timeline.json',
}
checkpoint = {
'save_ckpt': 'model.ckpt',
'load | _ckpt': 'model.ckpt',
'save_freq': 10000,
}
tr.update(display)
tr.update(checkpoint)
return tr
def default_input(stop_batch=100000, batch_size=32,
desc_kws=None,
lr_kws=None,
):
if desc_kws is None:
desc_kws = dict()
if lr_kws is None:
lr_kws = dict()
dpmd_input = {
'model': {
'type_map': ['H'],
'descriptor': descriptor(**desc_kws),
'fitting_net': fitting_net(),
},
'loss': loss_function(),
'learning_rate': learning_rate(stop_batch, **lr_kws),
'training': training(stop_batch, batch_size)
}
return dpmd_input
|
ng count returned by weapi.
:return: a Song object.
"""
result = self.search(song_name, search_type=1, limit=limit)
if result['result']['songCount'] <= 0:
LOG.warning('Song %s not existed!', song_name)
raise SearchNotFound('Song {} not existed.'.format(song_name))
else:
songs = result['result']['songs']
if quiet:
song_id, song_name = songs[0]['id'], songs[0]['name']
song = Song(song_id, song_name)
return song
else:
return self.display.select_one_song(songs)
def search_album(self, album_name, quiet=False, limit=9):
"""Search album by album name.
:params album_name: album name.
:params quiet: automatically select the best one.
:params limit: album count returned by weapi.
:return: a Album object.
"""
result = self.search(album_name, search_type=10, limit=limit)
if result['result']['albumCount'] <= 0:
LOG.warning('Album %s not existed!', album_name)
raise SearchNotFound('Album {} not existed'.format(album_name))
else:
albums = result['result']['albums']
if quiet:
album_id, album_name = albums[0]['id'], albums[0]['name']
album = Album(album_id, album_name)
return album
else:
return self.display.select_one_album(albums)
def search_artist(self, artist_name, quiet=False, limit=9):
"""Search artist by artist name.
:params artist_name: artist name.
:params quiet: automatically select the best one.
:params limit: artist count returned by weapi.
:return: a Artist object.
"""
result = self.search(artist_name, search_type=100, limit=limit)
if result['result']['artistCount'] <= 0:
LOG.warning('Artist %s not existed!', artist_name)
raise SearchNotFound('Artist {} not existed.'.format(artist_name))
else:
artists = result['result']['artists']
if quiet:
artist_id, artist_name = artists[0]['id'], artists[0]['name']
artist = Artist(artist_id, artist_name)
return artist
else:
return self.display.select_one_artist(artists)
def search_playlist(self, playlist_name, quiet=False, limit=9):
"""Search playlist by playlist name.
:params playlist_name: playlist name.
:params quiet: automatically select the best one.
:params limit: playlist count returned by weapi.
:return: a Playlist object.
"""
result = self.search(playlist_name, search_type=1000, limit=limit)
if result['result']['playlistCount'] <= 0:
LOG.warning('Playlist %s not existed!', playlist_name)
raise SearchNotFound('playlist {} not existed'.format(playlist_name))
else:
playlists = result['result']['playlists']
if quiet:
playlist_id, playlist_name = playlists[0]['id'], playlists[0]['name']
playlist = Playlist(playlist_id, playlist_name)
return playlist
else:
return self.display.select_one_playlist(playlists)
def search_user(self, user_name, quiet=False, limit=9):
"""Search user by user name.
:params user_name: user name.
:params quiet: automatically select the best one.
:params limit: user count returned by weapi.
:return: a User object.
"""
result = self.search(user_name, search_type=1002, limit=limit)
if result['result']['userprofileCount'] <= 0:
LOG.warning('User %s not existed!', user_name)
raise SearchNotFound('user {} not existed'.format(user_nam | e))
else:
users = result['result']['userprofiles']
if quiet:
user_id, user_name = users[0]['userId'], users[0]['nickname']
user = User(user_id, user_name)
return user
else:
return self.display.select_one_user(users)
def get_user_playlists(self, user_id, limit=1000):
"""Get a us | er's all playlists.
warning: login is required for private playlist.
:params user_id: user id.
:params limit: playlist count returned by weapi.
:return: a Playlist Object.
"""
url = 'http://music.163.com/weapi/user/playlist?csrf_token='
csrf = ''
params = {'offset': 0, 'uid': user_id, 'limit': limit,
'csrf_token': csrf}
result = self.post_request(url, params)
playlists = result['playlist']
return self.display.select_one_playlist(playlists)
def get_playlist_songs(self, playlist_id, limit=1000):
"""Get a playlists's all songs.
:params playlist_id: playlist id.
:params limit: length of result returned by weapi.
:return: a list of Song object.
"""
url = 'http://music.163.com/weapi/v3/playlist/detail?csrf_token='
csrf = ''
params = {'id': playlist_id, 'offset': 0, 'total': True,
'limit': limit, 'n': 1000, 'csrf_token': csrf}
result = self.post_request(url, params)
songs = result['playlist']['tracks']
songs = [Song(song['id'], song['name']) for song in songs]
return songs
def get_album_songs(self, album_id):
"""Get a album's all songs.
warning: use old api.
:params album_id: album id.
:return: a list of Song object.
"""
url = 'http://music.163.com/api/album/{}/'.format(album_id)
result = self.get_request(url)
songs = result['album']['songs']
songs = [Song(song['id'], song['name']) for song in songs]
return songs
def get_artists_hot_songs(self, artist_id):
"""Get a artist's top50 songs.
warning: use old api.
:params artist_id: artist id.
:return: a list of Song object.
"""
url = 'http://music.163.com/api/artist/{}'.format(artist_id)
result = self.get_request(url)
hot_songs = result['hotSongs']
songs = [Song(song['id'], song['name']) for song in hot_songs]
return songs
def get_song_url(self, song_id, bit_rate=320000):
"""Get a song's download address.
:params song_id: song id<int>.
:params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000}
:return: a song's download address.
"""
url = 'http://music.163.com/weapi/song/enhance/player/url?csrf_token='
csrf = ''
params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf}
result = self.post_request(url, params)
song_url = result['data'][0]['url'] # download address
if song_url is None: # Taylor Swift's song is not available
LOG.warning(
'Song %s is not available due to copyright issue. => %s',
song_id, result)
raise SongNotAvailable(
'Song {} is not available due to copyright issue.'.format(song_id))
else:
return song_url
def get_song_lyric(self, song_id):
"""Get a song's lyric.
warning: use old api.
:params song_id: song id.
:return: a song's lyric.
"""
url = 'http://music.163.com/api/song/lyric?os=osx&id={}&lv=-1&kv=-1&tv=-1'.format( # NOQA
song_id)
result = self.get_request(url)
if 'lrc' in result and result['lrc']['lyric'] is not None:
lyric_info = result['lrc']['lyric']
else:
lyric_info = 'Lyric not found.'
return lyric_info
@exception_handle
def get_song_by_url(self, song_url, song_name, folder, lyric_info):
"""Download a song and save it to disk.
:params song_url: download address.
:params song_name: song name.
:params folder: storage path.
:params lyric: lyric info.
"""
if not os |
from __future__ import print_function
# bench related imports
import numpy as np
import shutil
import bquery
import pandas as pd
import itertools as itt
import cytoolz
import cytoolz.dicttoolz
from toolz import valmap, compose
from cytoolz.curried import pluck
import blaze as blz
# other imports
import contextlib
import tempfile
import os
import time
t_elapsed = 0.0
@contextlib.contextmanager
def ctime(message=None):
"Counts the time spent in some context"
global t_elapsed
t_elapsed = 0.0
print('\n')
t = time.time()
yield
if message:
print(message + ": ", end='')
t_elapsed = time.time() - t
print(round(t_elapsed, 4), "sec")
ga = itt.cycle(['ES', 'NL'])
gb = itt.cycle(['b1', 'b2', 'b3', 'b4', 'b5'])
gx = itt.cycle([1, 2])
gy = itt.cycle([-1, -2])
rootdir = 'bench-data.bcolz'
if os.path.exists(rootdir):
shutil.rmtree(rootdir)
n_rows = 1000000
print('Rows: ', n_rows)
# -- data
z = np.fromiter(((a, b, x, y) for a, b, x, y in i | tt.izip(ga, gb, gx, gy)),
dtype='S2,S2,i8,i8', count=n_rows)
ct = bquery.ctable(z, rootdir=rootdir, )
print(ct)
# -- pandas --
df = pd.DataFrame(z)
with ctime(m | essage='pandas'):
result = df.groupby(['f0'])['f2'].sum()
print(result)
t_pandas = t_elapsed
# -- cytoolz --
with ctime(message='cytoolz over bcolz'):
# In Memory Split-Apply-Combine
# http://toolz.readthedocs.org/en/latest/streaming-analytics.html?highlight=reduce#split-apply-combine-with-groupby-and-reduceby
r = cytoolz.groupby(lambda row: row.f0, ct)
result = valmap(compose(sum, pluck(2)), r)
print('x{0} slower than pandas'.format(round(t_elapsed/t_pandas, 2)))
print(result)
# -- blaze + bcolz --
blaze_data = blz.Data(ct.rootdir)
expr = blz.by(blaze_data.f0, sum_f2=blaze_data.f2.sum())
with ctime(message='blaze over bcolz'):
result = blz.compute(expr)
print('x{0} slower than pandas'.format(round(t_elapsed/t_pandas, 2)))
print(result)
# -- bquery --
with ctime(message='bquery over bcolz'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed/t_pandas, 2)))
print(result)
ct.cache_factor(['f0'], refresh=True)
with ctime(message='bquery over bcolz (factorization cached)'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed/t_pandas, 2)))
print(result)
shutil.rmtree(rootdir)
|
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings |
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'app.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^add/$', 'cart.views.add'),
url(r'^clear/$', 'cart.views.clear'),
url(r'^show/$', 'cart.views.show'),
url(r'^remove/(?P<pk>\d+)/$', 'cart.views.remove'),
url(r'^checkout/$', 'cart.views.checkout | '),
)
|
"""autogenerated by genpy from hoverboard/ServoRaw.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class ServoRaw(genpy.Message):
_md5sum = "cf1c9d17f7bbedbe8dd2c29cdb7700f8"
_type = "hoverboard/ServoRaw"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
# The servo port 0 through 5 inclusive
int8 port
# The value to send 0 to 100 inclusive
int8 value
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','port','value']
_slot_types = ['std_msgs/Header','int8','int8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,port,value
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ServoRaw, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.port is None:
self.port = 0
if self.value is None:
self.value = 0
else:
self.header = std_msgs.msg.Header()
self.port = 0
self.value = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2b.pack(_x.port, _x.value))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 2
(_x.port, _x.value,) = _struct_2b.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_ | x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2b.pack(_x.port, _x.value))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for a | rray types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 2
(_x.port, _x.value,) = _struct_2b.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_2b = struct.Struct("<2b")
|
import sys
import struct
ULL_BYTES = struct.calcsize('!Q')
SHORT_BYTES = struct.calcsize('!h')
INDEX_HEADER = 'IDX'
def search(dbfile, prefix):
"""Returns all words having a given prefix using a dbfile."""
idx = Index.from_file(dbfile)
for letter in prefix:
if letter not in idx.nodes:
print 'No completions'
return
else:
idx = Index.from_file(dbfile, idx.nodes[letter])
if idx.nodes:
print 'Completions:'
for completion in idx.nodes:
print ' - ' + completion
if idx.leafs:
print 'Pattern found in files:'
for path in idx.leafs:
print ' - ' + path
class Index(object):
def __init__(self, data):
self.data = data
self.nodes = {}
self.leafs = []
self.parse()
@classmethod
def from_file(self, f, offset=0):
f.seek(offset)
size_data = f.read(len(INDEX_HEADER) + ULL_BYTES)
header, index_size = struct.unpack('!%dsQ' % len(INDE | X_HEADER), size_data)
| if header != INDEX_HEADER:
import ipdb; ipdb.set_trace()
data = f.read(index_size - ULL_BYTES - len(INDEX_HEADER))
return Index(data)
def parse(self):
node_count, = struct.unpack_from('!Q', self.data)
offset = ULL_BYTES
# Read nodes
for n in range(node_count):
letter_bytes, = struct.unpack_from('!h', self.data, offset)
offset += SHORT_BYTES
letter, index_offset = struct.unpack_from('!%dsQ' % letter_bytes, self.data, offset)
offset += letter_bytes + ULL_BYTES
self.nodes[letter] = index_offset
# Read leafs
while offset < len(self.data):
path_bytes, = struct.unpack_from('!h', self.data, offset)
offset += SHORT_BYTES
path, = struct.unpack_from('!%ds' % path_bytes, self.data, offset)
offset += path_bytes
self.leafs.append(path)
if __name__ == '__main__':
f = file(sys.argv[1], 'rb')
search(f, sys.argv[2])
|
import numpy as np
import struct
import wave
from winsou | nd import PlaySound, SND_FILENAME, SND_ASYNC
import matplotlib.pyplot as plt
CHUNK = 1 << 8
def play(filename):
PlaySound(filename, SND_FILENAME | SND_ASYNC)
fn = r"D:\b.wav"
f = wave.open(fn)
print(f.getparams())
ch = f.getnchannels()
sw = f.getsampwidth()
n = f.getnframes()
data = bytearray()
while len(data) < n * ch * sw:
data.extend(f.readframes(CHUNK))
data = np.array(struct.unpack('{n}h'.format(n=n * ch), data))
w = np.fft.fft(data)
freqs = np. | fft.fftfreq(len(w))
module = np.abs(w)
idmax = module.argmax()
print(abs(freqs[idmax]) * f.getframerate())
plt.specgram(data)
plt.show()
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.commits as bsc
#------------------------------------ | -----------------------------------------
# Setup
#-----------------------------------------------------------------------------
| ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.commits", ALL))
@pytest.mark.sampledata
def test_data(pd):
import bokeh.sampledata.commits as bsc
assert isinstance(bsc.data, pd.DataFrame)
# check detail for package data
assert len(bsc.data) == 4916
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
# -*- coding: UTF-8 -*-
_ | _copyright__ = """\
Copyright (c) 2012-2013 Luc Saffre.
This software comes with ABSOLUTELY NO WARRANTY and is
distributed under the terms of the GNU Lesser General Public License.
See file COPYING.txt for | more information."""
|
# coding=utf8
#
# Copyright 2013 Dreamlab Onet.pl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 3.0.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, visit
#
# http://www.gnu.org/licenses/lgpl.txt
#
import logging
from rmock.runners.http.handler import MockHttpHandler
from rmock.runners.http.handler import with_exception_handling
from rmock.core.call import Call
from rmock.runners.http.handler import HttpCode
logger = logging.getLogger("rmock.http-proxy")
class ProxyMockHttpHandler(MockHttpHandler):
@with_exception_handling
def initialize(self,
rmock_data,
protocol_class,
protocol_args,
slug,
childs,
child_chooser):
super(ProxyMockHttpHandler, self).initialize(
rmock_data,
protocol_class,
protocol_args,
slug
)
self.childs = childs
self.child_chooser = child_chooser
def _process_function_call_impl(self, funcname, args, kwargs, headers):
data = Call._make(funcname=funcname,
args=args,
kwargs=kwargs,
headers=headers)
mock = self.child_chooser(data, self.childs)
if mock is None:
logger.info("404: matching mock not found")
return HttpCode(404)
log | ger.info("proxying request to: %s", mock.name)
return mock._rmock_data.register_call_and_get_re | sult(
funcname, args, kwargs,
headers=headers
)
|
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import os
import tempfile
# internal modules:
from yotta.lib.fsutils import mkDirP, rmRf
from yotta.lib.detect import systemDefaultTarget
from yotta.lib import component
from .cli import cli
Test_Files = {
'.yotta_ignore': '''
#comment
/moo
b/c/d
b/c/*.txt
/a/b/test.txt
b/*.c
/source/a/b/test.txt
/test/foo
sometest/a
someothertest
ignoredbyfname.c
''',
'module.json': '''
{
"name": "test-testdep-f",
"version": "0.0.6",
"description": "Module to test test-dependencies and ignoring things",
"author": "autopulated",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {},
"testDependencies": {}
}
''',
'a/b/c/d/e/f/test.txt': '',
'a/b/c/d/e/test.c': '#error should be ignored',
'a/b/c/d/e/test.txt': '',
'a/b/c/d/test.c': '#error should be ignored',
'a/b/c/d/test.txt': '',
'a/b/c/d/z/test.c':'#error should be ignored',
'a/b/c/test.txt': '',
'a/b/test.txt':'',
'a/test.txt':'',
'comment':'# should not be ignored',
'f/f.h':'''
#ifndef __F_H__
#define __F_H__
int f();
#endif
''',
'source/moo/test.txt':'',
'source/a/b/c/d/e/f/test.txt': '',
'source/a/b/c/d/e/test.c': '#error should be ignored',
'source/a/b/c/d/e/test.txt': '',
'source/a/b/c/d/test.c': '#error should be ignored',
'source/a/b/c/d/test.txt': '',
'source/a/b/c/d/z/test.c':'#error should be ignored',
'source/a/b/c/test.txt': '',
'source/a/b/test.txt':'',
'source/a/test.txt':'',
'source/f.c':'''
int f(){
return 6;
}
''',
'test/anothertest/ignoredbyfname.c':'#error should be ignored',
'test/anothertest/ignoredbyfname.c':'''
#include <stdio.h>
#include "f/f.h"
int main(){
int result = f();
printf("%d\n", result);
return !(result == 6);
}
''',
'test/foo/ignored.c':'''
#error should be ignored
''',
'test/someothertest/alsoignored.c':'''
#error should be ignored
''',
'test/sometest/a/ignored.c':'''
#error should be ignored
'''
}
Default_Test_Files = {
'module.json': '''
{
"name": "test-testdep-f",
"version": "0.0.6",
"license": "Apache-2.0"
}'''
}
def isWindows():
# can't run tests that hit github without an authn token
return os.name == 'nt'
def writeTestFiles(files):
test_dir = tempfile.mkdtemp()
for path, contents in files.items():
path_dir, file_name = os.path.split(path)
path_dir = os.path.join(test_dir, path_dir)
mkDirP(path_dir)
with open(os.path.join(path_dir, file_name), 'w') as f:
f.write(contents)
| return test_dir
class TestPackIgnores(u | nittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = writeTestFiles(Test_Files)
@classmethod
def tearDownClass(cls):
rmRf(cls.test_dir)
def test_absolute_ignores(self):
c = component.Component(self.test_dir)
self.assertTrue(c.ignores('moo'))
self.assertTrue(c.ignores('test/foo/ignored.c'))
def test_glob_ignores(self):
c = component.Component(self.test_dir)
self.assertTrue(c.ignores('a/b/c/test.txt'))
self.assertTrue(c.ignores('a/b/test.txt'))
self.assertTrue(c.ignores('a/b/test.c'))
self.assertTrue(c.ignores('source/a/b/c/test.txt'))
self.assertTrue(c.ignores('source/a/b/test.txt'))
self.assertTrue(c.ignores('source/a/b/test.c'))
def test_relative_ignores(self):
c = component.Component(self.test_dir)
self.assertTrue(c.ignores('a/b/c/d/e/f/test.txt'))
self.assertTrue(c.ignores('a/b/test.txt'))
self.assertTrue(c.ignores('source/a/b/c/d/e/f/test.txt'))
self.assertTrue(c.ignores('source/a/b/test.txt'))
self.assertTrue(c.ignores('test/anothertest/ignoredbyfname.c'))
self.assertTrue(c.ignores('test/someothertest/alsoignored.c'))
def test_default_ignores(self):
default_test_dir = writeTestFiles(Default_Test_Files)
c = component.Component(default_test_dir)
self.assertTrue(c.ignores('.something.c.swp'))
self.assertTrue(c.ignores('.something.c~'))
self.assertTrue(c.ignores('path/to/.something.c.swm'))
self.assertTrue(c.ignores('path/to/.something.c~'))
self.assertTrue(c.ignores('.DS_Store'))
self.assertTrue(c.ignores('.git'))
self.assertTrue(c.ignores('.hg'))
self.assertTrue(c.ignores('.svn'))
self.assertTrue(c.ignores('yotta_modules'))
self.assertTrue(c.ignores('yotta_targets'))
self.assertTrue(c.ignores('build'))
self.assertTrue(c.ignores('.yotta.json'))
rmRf(default_test_dir)
def test_comments(self):
c = component.Component(self.test_dir)
self.assertFalse(c.ignores('comment'))
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_build(self):
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'clean'], self.test_dir)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], self.test_dir)
self.assertNotIn('ignoredbyfname', stdout)
self.assertNotIn('someothertest', stdout)
self.assertNotIn('sometest', stdout)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_test(self):
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'clean'], self.test_dir)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], self.test_dir)
self.assertNotIn('ignoredbyfname', stdout)
self.assertNotIn('someothertest', stdout)
self.assertNotIn('sometest', stdout)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=self.test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return stdout or stderr
if __name__ == '__main__':
unittest.main()
|
kpc
_FEHTAG= 'FE_H'
_AFETAG= 'AVG_ALPHAFE'
_AFELABEL= r'$[\left([\mathrm{O+Mg+Si+S+Ca}]/5\right)/\mathrm{Fe}]$'
catpath = '../catalogues/'
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
def get_rgbsample(loggcut = [1.8, 3.0],
teffcut = [0, 10000],
add_ages = False,
agetype='Martig',
apply_corrections=False,
distance_correction=False,
verbose = False):
"""
Get a clean sample of dr12 APOGEE data with Michael Haydens distances
---
INPUT:
None
OUTPUT:
Clean rgb sample with added distances
HISTORY:
Started - Mackereth 02/06/16
"""
#get the allStar catalogue using apogee python (exlude all bad flags etc)
allStar = apread.allStar(rmcommissioning=True,
exclude_star_bad=True,
exclude_star_warn=True,
main=True,
ak=True,
adddist=False)
#cut to a 'sensible' logg range (giants which are not too high on the RGB)
allStar = allStar[(allStar['LOGG'] > loggcut[0])&(allStar['LOGG'] < loggcut[1])&
(allStar['TEFF'] > teffcut[0])&(allStar['TEFF'] < teffcut[1])]
if verbose == True:
print str(len(allStar))+' Stars before Distance catalogue join (after Log(g) cut)'
#load the distance VAC
dists = fits.open(catpath+'DR12_DIST_R-GC.fits')[1].data
#convert to astropy Table
allStar_tab = Table(data=allStar)
dists_tab = Table(data=dists)
#join table
tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
data = tab.as_array()
data= esutil.numpy_util.add_fields(data,[('M_J', float),
('M_H', float),
('M_K', float),
('MH50_DIST', float),
('MH50_GALR', float),
('MH50_GALZ', float),
('MH50_GALPHI', float),
('AVG_ALPHAFE', float)])
data['MH50_DIST'] = (10**((data['HAYDEN_DISTMOD_50']+5)/5))/1e3
if distance | _correction == True:
data['MH50_DIST'] *= 1.05
XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
data['GLAT'],
data['MH50_DIST'],
degree=True)
RphiZ= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
XYZ[:,1],
XYZ[:,2],
Xsun=8.,Zsun=0.025)
data['MH50_GALR']= RphiZ[:,0]
data['MH50_G | ALPHI']= RphiZ[:,1]
data['MH50_GALZ']= RphiZ[:,2]
data['M_J'] = data['J0']-data['HAYDEN_DISTMOD_50']
data['M_H'] = data['H0']-data['HAYDEN_DISTMOD_50']
data['M_K'] = data['K0']-data['HAYDEN_DISTMOD_50']
data['AVG_ALPHAFE'] = avg_alphafe_dr12(data)
data[_FEHTAG] += -0.1
#remove locations not in the apogee selection function (FIND OUT WHATS UP HERE)
data = data[np.in1d(data['LOCATION_ID'], apo.list_fields())]
# Remove locations outside of the Pan-STARRS dust map
# In the Southern hemisphere
data= data[data['LOCATION_ID'] != 4266] #240,-18
data= data[data['LOCATION_ID'] != 4331] #5.5,-14.2
data= data[data['LOCATION_ID'] != 4381] #5.2,-12.2
data= data[data['LOCATION_ID'] != 4332] #1,-4
data= data[data['LOCATION_ID'] != 4329] #0,-5
data= data[data['LOCATION_ID'] != 4351] #0,-2
data= data[data['LOCATION_ID'] != 4353] #358,0
data= data[data['LOCATION_ID'] != 4385] #358.6,1.4
# Close to the ecliptic pole where there's no data (is it the ecliptic pole?
data= data[data['LOCATION_ID'] != 4528] #120,30
data= data[data['LOCATION_ID'] != 4217] #123,22.4
#remove any non-finite magnitudes
data = data[np.isfinite(data['M_H'])]
if verbose == True:
print str(len(data))+' Stars with distance measures (and in good fields...)'
if add_ages == True:
if agetype == 'Martig':
ages = fits.open(catpath+'DR12_martigages_vizier.fits')[1].data
idtag = '2MASS_ID'
if agetype == 'Cannon':
ages = fits.open(catpath+'RGB_Cannon_Ages.fits')[1].data
ages = esutil.numpy_util.add_fields(ages,[('Age', float)])
ages['Age'] = np.exp(ages['ln_age'])
idtag = 'ID'
ages_tab = Table(data=ages)
ages_tab.rename_column(idtag, 'APOGEE_ID')
tab = join( ages_tab,data, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
allStar_full = tab.as_array()
data = allStar_full
if verbose == True:
print str(len(data))+' Stars with ages'
if apply_corrections == True:
#martig1 = np.genfromtxt(catpath+'martig2016_table1.txt', dtype=None, names=True, skip_header=2)
martig1 = fits.open(catpath+'martig_table1.fits')
fit = lowess(np.log10(martig1['Age_out']),np.log10(martig1['Age_in']))
xs = np.linspace(-0.3,1.2,100)
xsinterpolate = interp1d(xs,xs)
fys = fit[:,0]-xsinterpolate(fit[:,1])
interp = UnivariateSpline(fit[:,1], fys)
corr_age = np.log10(data['Age'])+(interp(np.log10(data['Age'])))
corr_age = 10**corr_age
data['Age'] = corr_age
return data
def avg_alphafe_dr12(data):
weight_o= np.ones(len(data))
weight_s= np.ones(len(data))
weight_si= np.ones(len(data))
weight_ca= np.ones(len(data))
weight_mg= np.ones(len(data))
weight_o[data['O_H'] == -9999.0]= 0.
weight_s[data['S_H'] == -9999.0]= 0.
weight_si[data['SI_H'] == -9999.0]= 0.
weight_ca[data['CA_H'] == -9999.0]= 0.
weight_mg[data['MG_H'] == -9999.0]= 0.
return (weight_o*data['O_H']+weight_s*data['S_H']
+weight_si*data['SI_H']+weight_ca*data['CA_H']
+weight_mg*data['MG_H'])/(weight_o+weight_s
+weight_si+weight_ca
+weight_mg)\
-data['FE_H']-0.05
# Define the low-alpha, low-iron sample
def _lowlow_lowfeh(afe):
# The low metallicity edge
return -0.6
def _lowlow_highfeh(afe):
# The high metallicity edge
return -0.25
def _lowlow_lowafe(feh):
# The low alpha edge (-0.15,-0.075) to (-0.5,0)
return (0--0.075)/(-0.5--0.15)*(feh+0.1--0.15)-0.075
def _lowlow_highafe(feh):
# The high alpha edge (-0.15,0.075) to (-0.5,0.15)
return (0.15-0.075)/(-0.5--0.15)*(feh+0.1--0.15)+0.075
def get_lowlowsample():
"""
NAME:
get_lowlowsample
PURPOSE:
get the RGB sample at low alpha, low iron
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _lowlow_lowfeh(0.)
highfeh= _lowlow_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _lowlow_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _lowlow_highafe(data[_FEHTAG]))
return data[indx]
# Define the high-alpha sample
def _highalpha_lowfeh(afe):
# The low metallicity edge
return -0.8
def _highalpha_highfeh(afe):
# The high metallicity edge
return -0.2
def _highalpha_lowafe(feh):
# The low alpha edge (-0.125,0.115) to (-0.6,0.215)
return (0.2-0.1)/(-0.6--0.125)*(feh+0.1--0.125)+0.115
def _highalpha_highafe(feh):
# The high alpha edge (-0.125,0.19) to (-0.6,0.29)
return (0.275-0.175)/(-0.6--0.125)*(feh+0.1--0.125)+0.19
def get_highalphasample():
"""
NAME:
get_highalphasample
PURPOSE:
get the RC sample at high alpha
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-24 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _highalpha_lowfeh(0.)
highfeh= _highalpha_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highalpha_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highalpha_highafe(data[_FEHTAG]))
return data[indx]
# Define the solar sample
def _solar_lowfeh(afe):
# The low metallicity edg |
from ems.typehint import accepts
from ems.resource.repository import Repository
class DictAttributeRepository(Repository):
@accepts(Repository)
def __init__(self, sourceRepo, sourceAttribute='data'):
self._sourceRepo = sourceRepo
self.sourceAttribute = sourceAttribute
def get(self, id_):
"""
Return an object by its id
:returns: dict
"""
model = self._sourceRepo.get(id_)
data = getattr(model, self.sourceAttribute)
data['ID'] = id_
return data
def new(self, attributes=None):
"""
Inst | antiate an object
:returns: object
"""
model = self._sourceRepo.new()
data = getattr(model, self.sourceAttribute)
for key in attributes:
data[key] = attributes[key]
return data
def store(self, attributes, obj=None):
"""
Store a new object. Create on if non passed, if one passed store the
passed one
:returns: object
"""
if obj:
raise Type | Error("Obj has to be None")
sourceAttributes = {self.sourceAttribute:self.new(attributes)}
if 'ID' not in sourceAttributes:
raise KeyError("attributes have to contain ")
model = self._sourceRepo.store(sourceAttributes)
return getattr(model, self.sourceAttribute)
def update(self, model, changedAttributes):
"""
Update model by changedAttributes and save it
:returns: object
"""
pass |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class ShareonlineBiz(Account):
__name__ = "ShareonlineBiz"
__type__ = "account"
__version__ = "0.41"
__status__ = "testing"
__description__ = """Share-online.biz account plugin"""
__license__ = "GPLv3"
__authors__ | = [("Walter Purcaro", "vuolter@gmail.com")]
def api_response(self, user, passwor | d):
res = self.load("https://api.share-online.biz/cgi-bin",
get={'q' : "userdetails",
'aux' : "traffic",
'username': user,
'password': password},
decode=False)
self.log_debug(res)
api = dict(line.split("=") for line in res.splitlines() if "=" in line)
if not 'a' in api:
self.fail_login(res.strip('*'))
if api['a'].lower() == "not_available":
self.fail_login(_("No info available"))
return api
def grab_info(self, user, password, data):
premium = False
validuntil = None
trafficleft = -1
maxtraffic = 100 * 1024 * 1024 * 1024 #: 100 GB
api = self.api_response(user, password)
premium = api['group'] in ("PrePaid", "Premium", "Penalty-Premium")
validuntil = float(api['expire_date'])
traffic = float(api['traffic_1d'].split(";")[0])
if maxtraffic > traffic:
trafficleft = maxtraffic - traffic
else:
trafficleft = -1
maxtraffic /= 1024 #@TODO: Remove `/ 1024` in 0.4.10
trafficleft /= 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {'premium' : premium,
'validuntil' : validuntil,
'trafficleft': trafficleft,
'maxtraffic' : maxtraffic}
def signin(self, user, password, data):
api = self.api_response(user, password)
set_cookie(self.req.cj, "share-online.biz", 'a', api['a'])
|
# -*- coding: utf-8 -*-
"""
CSS Testing
:copyright: (C) 2014 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
from os.path import join
from cssutils imp | ort CSSParser
import unittest
import trytond.tests.test_tryton
dir = 'static/css/'
class CSSTest(unittest.TestCase):
"""
Test case for CSS.
"""
def validate(self, filename):
"""
Uses cssutils to validate a css file.
Prints output using a logger.
"""
CSSParser(raiseExceptions=True).parseFile(filename, validate=True)
def test_css(self):
"""
Test for CSS validation using W3C standards.
"""
cssfile = join(di | r, 'style.css')
self.validate(cssfile)
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(CSSTest)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
token = strategy.session_get('partial_pipeline_token')
partial_object = strategy.partial_load(token)
pipeline_data = None
if partial_object:
pipeline_data = {'kwargs': partial_object.kwargs, 'backend': partial_object.backend}
return pipeline_data
def get_real_social_auth_object(request):
"""
At times, the pipeline will have a "social" kwarg that contains a dictionary
rather than an actual DB-backed UserSocialAuth object. We need the real thing,
so this method allows us to get that by passing in the relevant request.
"""
running_pipeline = get(request)
if running_pipeline and 'social' in running_pipeline['kwargs']:
| social = running_pipeline['kwargs']['social']
if isinstance(social, dict):
social = social_django.models.UserSocialAuth.objects.get(**social)
return social
def quarantine_session(request, locations):
"""
Set a session variable indicating that the session is restricted
to being used in | views contained in the modules listed by string
in the `locations` argument.
Example: ``quarantine_session(request, ('enterprise.views',))``
"""
request.session['third_party_auth_quarantined_modules'] = locations
def lift_quarantine(request):
"""
Remove the session quarantine variable.
"""
request.session.pop('third_party_auth_quarantined_modules', None)
def get_authenticated_user(auth_provider, username, uid):
"""Gets a saved user authenticated by a particular backend.
Between pipeline steps User objects are not saved. We need to reconstitute
the user and set its .backend, which is ordinarily monkey-patched on by
Django during authenticate(), so it will function like a user returned by
authenticate().
Args:
auth_provider: the third_party_auth provider in use for the current pipeline.
username: string. Username of user to get.
uid: string. The user ID according to the third party.
Returns:
User if user is found and has a social auth from the passed
provider.
Raises:
User.DoesNotExist: if no user matching user is found, or the matching
user has no social auth associated with the given backend.
AssertionError: if the user is not authenticated.
"""
match = social_django.models.DjangoStorage.user.get_social_auth(provider=auth_provider.backend_name, uid=uid)
if not match or match.user.username != username:
raise User.DoesNotExist
user = match.user
user.backend = auth_provider.get_authentication_backend()
return user
def _get_enabled_provider(provider_id):
"""Gets an enabled provider by its provider_id member or throws."""
enabled_provider = provider.Registry.get(provider_id)
if not enabled_provider:
raise ValueError('Provider %s not enabled' % provider_id)
return enabled_provider
def _get_url(view_name, backend_name, auth_entry=None, redirect_url=None,
extra_params=None, url_params=None):
"""Creates a URL to hook into social auth endpoints."""
url_params = url_params or {}
url_params['backend'] = backend_name
url = reverse(view_name, kwargs=url_params)
query_params = OrderedDict()
if auth_entry:
query_params[AUTH_ENTRY_KEY] = auth_entry
if redirect_url:
query_params[AUTH_REDIRECT_KEY] = redirect_url
if extra_params:
query_params.update(extra_params)
return u"{url}?{params}".format(
url=url,
params=urllib.urlencode(query_params)
)
def get_complete_url(backend_name):
"""Gets URL for the endpoint that returns control to the auth pipeline.
Args:
backend_name: string. Name of the python-social-auth backend from the
currently-running pipeline.
Returns:
String. URL that finishes the auth pipeline for a provider.
Raises:
ValueError: if no provider is enabled with the given backend_name.
"""
if not any(provider.Registry.get_enabled_by_backend_name(backend_name)):
raise ValueError('Provider with backend %s not enabled' % backend_name)
return _get_url('social:complete', backend_name)
def get_disconnect_url(provider_id, association_id):
"""Gets URL for the endpoint that starts the disconnect pipeline.
Args:
provider_id: string identifier of the social_django.models.ProviderConfig child you want
to disconnect from.
association_id: int. Optional ID of a specific row in the UserSocialAuth
table to disconnect (useful if multiple providers use a common backend)
Returns:
String. URL that starts the disconnection pipeline.
Raises:
ValueError: if no provider is enabled with the given ID.
"""
backend_name = _get_enabled_provider(provider_id).backend_name
if association_id:
return _get_url('social:disconnect_individual', backend_name, url_params={'association_id': association_id})
else:
return _get_url('social:disconnect', backend_name)
def get_login_url(provider_id, auth_entry, redirect_url=None):
"""Gets the login URL for the endpoint that kicks off auth with a provider.
Args:
provider_id: string identifier of the social_django.models.ProviderConfig child you want
to disconnect from.
auth_entry: string. Query argument specifying the desired entry point
for the auth pipeline. Used by the pipeline for later branching.
Must be one of _AUTH_ENTRY_CHOICES.
Keyword Args:
redirect_url (string): If provided, redirect to this URL at the end
of the authentication process.
Returns:
String. URL that starts the auth pipeline for a provider.
Raises:
ValueError: if no provider is enabled with the given provider_id.
"""
assert auth_entry in _AUTH_ENTRY_CHOICES
enabled_provider = _get_enabled_provider(provider_id)
return _get_url(
'social:begin',
enabled_provider.backend_name,
auth_entry=auth_entry,
redirect_url=redirect_url,
extra_params=enabled_provider.get_url_params(),
)
def get_duplicate_provider(messages):
"""Gets provider from message about social account already in use.
python-social-auth's exception middleware uses the messages module to
record details about duplicate account associations. It records exactly one
message there is a request to associate a social account S with an edX
account E if S is already associated with an edX account E'.
This messaging approach is stringly-typed and the particular string is
unfortunately not in a reusable constant.
Returns:
string name of the python-social-auth backend that has the duplicate
account, or None if there is no duplicate (and hence no error).
"""
social_auth_messages = [m for m in messages if m.message.endswith('is already in use.')]
if not social_auth_messages:
return
assert len(social_auth_messages) == 1
backend_name = social_auth_messages[0].extra_tags.split()[1]
return backend_name
def get_provider_user_states(user):
"""Gets list of states of provider-user combinations.
Args:
django.contrib.auth.User. The user to get states for.
Returns:
List of ProviderUserState. The list of states of a user's account with
each enabled provider.
"""
states = []
found_user_auths = list(social_django.models.DjangoStorage.user.get_social_auth_for_user(user))
for enabled_provider in provider.Registry.enabled():
association = None
for auth in found_user_auths:
if enabled_provider.match_social_auth(auth):
association = auth
break
if enabled_provider.accepts_logins or association:
states.append(
ProviderUserState(enabled_provider, user, association)
)
return states
def running(request):
"""Returns True iff request is running a third-party auth pipeline."""
return get(request) is not None # Avoid False for {}.
# Pipe |
import discord
import random
import aiohttp
from lxml import html as l
async def cyanideandhappiness(cmd, message, args):
comic_number = random.randint(1, 4562)
comic_url = f'http://explosm.net/comics/{comic_number}/'
async with aiohttp.ClientSession() as session:
async with session.get(comic_ | url) as data:
page = await data.text()
root = l.fromstring(page)
comic_element = root.cssselect('#main-comic')
comic_img_url = comic_element[0].attrib['src']
if comic_img_url.startswith('//'):
comic_img_url = 'https:' + comic_img_url
embed = discord.Embed(color=0x1ABC9C)
| embed.set_image(url=comic_img_url)
await message.channel.send(None, embed=embed)
|
import multiprocessing
import socket
import re
import time
def handle(connection, address):
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("process-%r" % (address,))
try:
logger.debug("Connected %r at %r", connection, address)
while True:
data = connection.recv(1024)
if data == "":
logger.debug("Socket closed remotely")
connection.shutdown(1)
break
logger.debug("Received data %r", data)
if re.search("^GETTEMP",data):
logger.debug("Send temperatura")
connection.sendall(str(18)+'\n\r')
if re.search("^ACCENDI",data):
logger.debug("Accendo termosifoni")
connection.sendall('ACCESO\n\r')
if re.search("^SPEGNI",data):
logger.debug("Spegno termosifoni")
connection.sendall('SPEGNI\n\r')
except:
logger.exception("Problem handling request")
finally:
logger.debug("Closing socket")
connection.close()
class Server(object):
def __init__(self, hostname, port):
import logging
self.logger = logging.getLogger("server")
self.hostname = hostname
self.port = port
def start(self):
self.logger.debug("listening")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.hostname, self.port))
self.socket.listen(1)
while True:
conn, address = self.socket.accept()
self.logger.debug("Got connection")
process = multiprocessing.Process(target=handle, args=(conn, address))
process.daemon = True
process.start()
self.logger.debug("Started process %r", process)
def getTemp():
return 18
def checkTemp():
logging.info("Start checktemp")
stato=2
while True:
f= open("config.ini","r")
(mintemp,maxtemp)=f.readline().split("|")
mintemp=mintemp.rstrip()
maxtemp=maxtemp.rstrip()
logging.debug("Min: %s Max: %s" % (mintemp,maxtemp))
f.close()
if getTemp()<int(mintemp) and stato != 0:
logging.debug("Temperatura bassa accendo i termosifoni")
stato=0
time.sleep(3)
if getTemp()>=int(maxtemp) and stato != 1:
logging.debug("Temperatura alta spegno i termosifoni")
stato=1
time.sleep(7)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG,format="%(asctime)s - %(levelname)s - %(message)s")
process = multiprocessing.Process(target=checkTemp)
pr | ocess.daemon = True
process.start()
server = Server("0.0.0.0", 9000)
server.allow_reuse_address=True
try:
logging.info("Listening")
server.start()
except:
logging.exception("Unexpec | ted exception")
finally:
logging.info("Shutting down")
for process in multiprocessing.active_children():
logging.info("Shutting down process %r", process)
process.terminate()
process.join()
logging.info("All done")
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (https | ://www.gnu.org/licenses/agpl.html)
from | . import measuring_device
|
import time
import sys
def createDots(length, delay):
fo | r i in range(length):
print('.', end='')
sys.stdout.flush()
time.sleep(delay)
def createHash(length, delay):
for i in range(length):
print('#', end='')
sys.stdout.flush()
time.sleep(delay)
def createVrD | ots(length, delay):
for i in range(length):
print('.')
time.sleep(delay)
def deGa():
time.sleep(.3)
|
'''
Created on 30 Jun 2015
@author: @willu47
'''
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from SALib.sample.ff import sample, find_smallest, extend_bounds
from SALib.analyze.ff import analyze, interactions
def test_find_smallest():
'''
'''
num_vars = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 31, 32, 33]
expected = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6]
for x, y in zip(num_vars, expected):
actual = find_smallest(x)
assert_equal(actual, y)
def test_extend_bounds():
problem = {'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T,
'num_vars': 12,
'names': ["x" + str(x + 1) for x in range(12)]
}
actual = extend_bounds(problem)
expected = {'names': ['x1', 'x2', 'x3', 'x4',
'x5', 'x6', 'x7', 'x8',
'x9', 'x10', 'x11', 'x12',
'dummy_0', 'dummy_1', 'dummy_2', 'dummy_3'],
'bounds': [np.array([-1, 1]), np.array([-1, 1]),
np.array([-1, 1]), np.array([-1, 1]),
np.array([-1, 1]), np.array([-1, 1]),
np.array([-1, 1]), np.array([-1, 1]),
np.array([-1, 1]), np.array([-1, 1]),
np.array([-1, 1]), np.array([-1, 1]),
np.array([0, 1]), np.array([0, 1]),
np.array([0, 1]), np.array([0, 1])],
'num_vars': 16}
assert_equal(actual, expected)
def test_ff_sample():
problem = {'bounds': [[0., 1.], [0., 1.], [0., 1.], [0., 1.]],
'num_vars': 4,
'names': ['x1', 'x2', 'x3', 'x4']}
actual = sample(problem)
expected = np.array([[ 1, 1, 1, 1],
[ 1, 0, 1, 0],
[ 1, 1, 0, 0],
[ 1, 0, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 1, 1, 0]], dtype=np.float)
assert_equal(actual, expected)
def test_ff_sample_scaled():
'''
'''
problem = {'bounds': [[0., 2.5], [0., 1.], [0., 1.], [0., 1.]],
'num_vars': 4,
'names': ['x1', 'x2', 'x3', 'x4']}
actual = sample(problem)
expected = np.array([[ 2.5, 1, 1, 1] | ,
[ 2.5, 0, 1, 0],
[ 2.5, 1, 0, 0],
[ 2.5, 0, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 1, 1, 0]], dtype=np.float)
assert_equal(actual, expected)
def test_ff_analyze():
'''
'''
pr | oblem = {'bounds': [[0., 2.5], [0., 1.], [0., 1.], [0., 1.]],
'num_vars': 4,
'names': ['x1', 'x2', 'x3', 'x4']}
X = np.array([[ 1, 1, 1, 1],
[ 1, 0, 1, 0],
[ 1, 1, 0, 0],
[ 1, 0, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 1, 1, 0]], dtype=np.float)
Y = np.array([1.5, 1, 1.5, 1, 2, 2.5, 2, 2.5], dtype=np.float)
actual = analyze(problem, X, Y)
expected = {'ME': np.array([ -0.5 , 0.25, 0. , 0. ]), 'names': ['x1', 'x2', 'x3', 'x4']}
assert_equal(actual, expected)
def test_ff_example():
'''
'''
problem = {'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T,
'num_vars': 12,
'names': ["x" + str(x + 1) for x in range(12)]
}
X = sample(problem)
Y = X[:, 0] + 2 * X[:, 1] + 3 * X[:, 2] + 4 * X[:, 6] * X[:, 11]
expected = np.array([10, -2, 4, -8, 2, 6, -4,
0, 2, 6, -4, 0, 10, -2, 4, -8,
- 2, -6, 4, 0, -10, 2, -4, 8,
- 10, 2, -4, 8, -2, -6, 4, 0])
assert_equal(Y, expected)
Si = analyze(problem, X, Y)
expected = np.array([1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float)
assert_equal(expected, Si['ME'])
def test_interactions_from_saltelli():
'''
'''
problem = {'bounds': np.repeat([-1, 1], 12).reshape(2, 12).T,
'num_vars': 12,
'names': ["x" + str(x + 1) for x in range(12)]
}
X = sample(problem)
Y = np.array([10, -2, 4, -8, 2, 6, -4, 0,
2, 6, -4, 0, 10, -2, 4, -8,
- 2, -6, 4, 0, -10, 2, -4, 8,
- 10, 2, -4, 8, -2, -6, 4, 0])
Si = analyze(problem, X, Y, second_order=True)
actual = Si['IE']
expected = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
assert_equal(actual, expected)
def test_interactions():
'''
'''
problem = {'bounds': [[0., 2.5], [0., 1.], [0., 1.], [0., 1.]],
'num_vars': 4,
'names': ['x1', 'x2', 'x3', 'x4']}
X = np.array([[ 2.5, 1.0, 1.0, 1.0],
[ 2.5, 0, 1.0, 0],
[ 2.5, 1.0, 0, 0],
[ 2.5, 0, 0, 1.0],
[0, 0, 0, 0],
[0, 1.0, 0, 1.0],
[0, 0, 1.0, 1.0],
[0, 1.0, 1.0, 0]], dtype=np.float)
Y = X[:, 0] + (0.1 * X[:, 1]) + ((1.2 * X[:, 2]) * (1.3 + X[:, 3]))
# Y = np.array([1.5, 1, 1.5, 1, 2, 2.5, 2, 2.5], dtype=np.float)
ie_names, ie = interactions(problem, Y, print_to_console=True)
actual = ie
assert_allclose(actual, [0.3, 0, 0, 0, 0, 0.3], rtol=1e-4, atol=1e-4)
|
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES | OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
SCons compatibility package for old Python versions
This subpackage holds modules that provide backwards-compatible
implementations of various things that we'd like to use in SCons but which
only show up in later versions of Python than the early, old version(s)
we still support.
Other code will not generall | y reference things in this package through
the SCons.compat namespace. The modules included here add things to
the builtins namespace or the global module list so that the rest
of our code can use the objects and names imported here regardless of
Python version.
Simply enough, things that go in the builtins name space come from
our _scons_builtins module.
The rest of the things here will be in individual compatibility modules
that are either: 1) suitably modified copies of the future modules that
we want to use; or 2) backwards compatible re-implementations of the
specific portions of a future module's API that we want to use.
GENERAL WARNINGS: Implementations of functions in the SCons.compat
modules are *NOT* guaranteed to be fully compliant with these functions in
later versions of Python. We are only concerned with adding functionality
that we actually use in SCons, so be wary if you lift this code for
other uses. (That said, making these more nearly the same as later,
official versions is still a desirable goal, we just don't need to be
obsessive about it.)
We name the compatibility modules with an initial '_scons_' (for example,
_scons_subprocess.py is our compatibility module for subprocess) so
that we can still try to import the real module name and fall back to
our compatibility module if we get an ImportError. The import_as()
function defined below loads the module as the "real" name (without the
'_scons'), after which all of the "import {module}" statements in the
rest of our code will find our pre-loaded compatibility module.
"""
__revision__ = "src/engine/SCons/compat/__init__.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os
import sys
import imp # Use the "imp" module to protect imports from fixers.
def import_as(module, name):
"""
Imports the specified module (from our local directory) as the
specified name, returning the loaded module object.
"""
dir = os.path.split(__file__)[0]
return imp.load_module(name, *imp.find_module(module, [dir]))
def rename_module(new, old):
"""
Attempts to import the old module and load it under the new name.
Used for purely cosmetic name changes in Python 3.x.
"""
try:
sys.modules[new] = imp.load_module(old, *imp.find_module(old))
return True
except ImportError:
return False
rename_module('builtins', '__builtin__')
import _scons_builtins
try:
import hashlib
except ImportError:
# Pre-2.5 Python has no hashlib module.
try:
import_as('_scons_hashlib', 'hashlib')
except ImportError:
# If we failed importing our compatibility module, it probably
# means this version of Python has no md5 module. Don't do
# anything and let the higher layer discover this fact, so it
# can fall back to using timestamp.
pass
try:
set
except NameError:
# Pre-2.4 Python has no native set type
import_as('_scons_sets', 'sets')
import builtins, sets
builtins.set = sets.Set
try:
import collections
except ImportError:
# Pre-2.4 Python has no collections module.
import_as('_scons_collections', 'collections')
else:
try:
collections.UserDict
except AttributeError:
exec('from UserDict import UserDict as _UserDict')
collections.UserDict = _UserDict
del _UserDict
try:
collections.UserList
except AttributeError:
exec('from UserList import UserList as _UserList')
collections.UserList = _UserList
del _UserList
try:
collections.UserString
except AttributeError:
exec('from UserString import UserString as _UserString')
collections.UserString = _UserString
del _UserString
try:
import io
except ImportError:
# Pre-2.6 Python has no io module.
import_as('_scons_io', 'io')
try:
os.devnull
except AttributeError:
# Pre-2.4 Python has no os.devnull attribute
_names = sys.builtin_module_names
if 'posix' in _names:
os.devnull = '/dev/null'
elif 'nt' in _names:
os.devnull = 'nul'
os.path.devnull = os.devnull
try:
os.path.lexists
except AttributeError:
# Pre-2.4 Python has no os.path.lexists function
def lexists(path):
return os.path.exists(path) or os.path.islink(path)
os.path.lexists = lexists
# When we're using the '-3' option during regression tests, importing
# cPickle gives a warning no matter how it's done, so always use the
# real profile module, whether it's fast or not.
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is None:
# Not a regression test with '-3', so try to use faster version.
# In 3.x, 'pickle' automatically loads the fast version if available.
rename_module('pickle', 'cPickle')
# In 3.x, 'profile' automatically loads the fast version if available.
rename_module('profile', 'cProfile')
# Before Python 3.0, the 'queue' module was named 'Queue'.
rename_module('queue', 'Queue')
# Before Python 3.0, the 'winreg' module was named '_winreg'
rename_module('winreg', '_winreg')
try:
import subprocess
except ImportError:
# Pre-2.4 Python has no subprocess module.
import_as('_scons_subprocess', 'subprocess')
try:
sys.intern
except AttributeError:
# Pre-2.6 Python has no sys.intern() function.
import builtins
try:
sys.intern = builtins.intern
except AttributeError:
# Pre-2.x Python has no builtin intern() function.
def intern(x):
return x
sys.intern = intern
del intern
try:
sys.maxsize
except AttributeError:
# Pre-2.6 Python has no sys.maxsize attribute
# Wrapping sys in () is silly, but protects it from 2to3 renames fixer
sys.maxsize = (sys).maxint
if os.environ.get('SCONS_HORRIBLE_REGRESSION_TEST_HACK') is not None:
# We can't apply the 'callable' fixer until the floor is 2.6, but the
# '-3' option to Python 2.6 and 2.7 generates almost ten thousand
# warnings. This hack allows us to run regression tests with the '-3'
# option by replacing the callable() built-in function with a hack
# that performs the same function but doesn't generate the warning.
# Note that this hack is ONLY intended to be used for regression
# testing, and should NEVER be used for real runs.
from types import ClassType
def callable(obj):
if hasattr(obj, '__call__'): return True
if isinstance(obj, (ClassType, type)): return True
return False
import builtins
builtins.callable = callable
del callable
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
from django.conf.urls.defa | ults import *
urlpatterns = patterns('',
(r'^(\d+)/$', 'onpsx.gallery.views.index'),
(r'^$', 'onpsx.gallery.views.index') | ,
)
|
from logging import StreamHandler
from django.conf import settings
from celery import current_task
from celery.signals import task_prerun, task_postrun
from kombu import Connection, Exchange, Queue, Producer
from kombu.mixins import ConsumerMixin
from .utils import get_celery_task_log_path
routing_key = 'celery_log'
celery_log_exchange = Exchange('celery_log_exchange', type='direct')
celery_log_queue = [Queue('celery_log', celery_log_exchange, routing_key=routing_key)]
class CeleryLoggerConsumer(ConsumerMixin):
def __init__(self):
self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
def get_consumers(self, Consumer, channel):
return [Consumer(queues=celery_log_queue,
accept=['pickle', 'json'],
callbacks=[self.process_task])
]
def handle_task_start(self, task_id, message):
pass
def handle_task_end(self, task_id, message):
pass
def handle_task_log(self, task_id, msg, message):
pass
def process_task(self, body, message):
action = body.get('action')
task_id = body.get('task_id')
msg = body.get('msg')
if action == CeleryLoggerProducer.ACTION_TASK_LOG:
self.handle_task_log(task_id, msg, message)
elif action == CeleryLoggerProducer.ACTION_TASK_START:
self.handle_task_start(task_id, message)
elif action == CeleryLoggerProducer.ACTION_TASK_END:
self.handle_task_end(task_id, message)
class CeleryLoggerProducer:
ACTION_TASK_START, ACTION_TASK_LOG, ACTION_TASK_END = range(3)
def __init__(self):
self.connection = Connection(settings.CELERY_LOG_BROKER_URL)
@property
def producer(self):
return Producer(self.connection)
def publish(self, payload):
self.producer.publish(
payload, seria | lizer='json', exchange=celery_log_exc | hange,
declare=[celery_log_exchange], routing_key=routing_key
)
def log(self, task_id, msg):
payload = {'task_id': task_id, 'msg': msg, 'action': self.ACTION_TASK_LOG}
return self.publish(payload)
def read(self):
pass
def flush(self):
pass
def task_end(self, task_id):
payload = {'task_id': task_id, 'action': self.ACTION_TASK_END}
return self.publish(payload)
def task_start(self, task_id):
payload = {'task_id': task_id, 'action': self.ACTION_TASK_START}
return self.publish(payload)
class CeleryTaskLoggerHandler(StreamHandler):
terminator = '\r\n'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
task_prerun.connect(self.on_task_start)
task_postrun.connect(self.on_start_end)
@staticmethod
def get_current_task_id():
if not current_task:
return
task_id = current_task.request.root_id
return task_id
def on_task_start(self, sender, task_id, **kwargs):
return self.handle_task_start(task_id)
def on_start_end(self, sender, task_id, **kwargs):
return self.handle_task_end(task_id)
def after_task_publish(self, sender, body, **kwargs):
pass
def emit(self, record):
task_id = self.get_current_task_id()
if not task_id:
return
try:
self.write_task_log(task_id, record)
self.flush()
except Exception:
self.handleError(record)
def write_task_log(self, task_id, msg):
pass
def handle_task_start(self, task_id):
pass
def handle_task_end(self, task_id):
pass
class CeleryTaskMQLoggerHandler(CeleryTaskLoggerHandler):
def __init__(self):
self.producer = CeleryLoggerProducer()
super().__init__(stream=None)
def write_task_log(self, task_id, record):
msg = self.format(record)
self.producer.log(task_id, msg)
def flush(self):
self.producer.flush()
class CeleryTaskFileHandler(CeleryTaskLoggerHandler):
def __init__(self):
self.f = None
super().__init__(stream=None)
def emit(self, record):
msg = self.format(record)
if not self.f or self.f.closed:
return
self.f.write(msg)
self.f.write(self.terminator)
self.flush()
def flush(self):
self.f and self.f.flush()
def handle_task_start(self, task_id):
log_path = get_celery_task_log_path(task_id)
self.f = open(log_path, 'a')
def handle_task_end(self, task_id):
self.f and self.f.close()
|
#!/usr/bin/env python
# coding:utf-8
import urllib
domain = 'http://www.liaoxuefeng.com' #廖雪峰的域名
path = r'C:\Users\cyhhao2013\Desktop\temp\\' #html要保存的路径
# 一个html的头文件
input = open(r'C:\Users\cyhhao2013\Desktop\0.html', 'r')
head = input.read()
# 打开python教程主界面
f = urllib.urlopen("http://www.liaoxuefeng.c | om/wiki/001374738125095c955c1e6d8bb493182103fac9270762a000")
home = f.read()
f.close()
# 替换所有空格回车(这样容易好获取url)
geturl = home.replace("\n", "")
geturl = geturl.replace(" ", "")
# 得到包含url的字符串
list = geturl.split(r'em;"><ahref="')[1:]
# 强迫症犯了,一定要把第一个页面也加进去才完美
list.insert(0, '/wiki | /001374738125095c955c1e6d8bb493182103fac9270762a000">')
# 开始遍历url List
for li in list:
url = li.split(r'">')[0]
url = domain + url #拼凑url
print url
f = urllib.urlopen(url)
html = f.read()
# 获得title为了写文件名
title = html.split("<title>")[1]
title = title.split(" - 廖雪峰的官方网站</title>")[0]
# 要转一下码,不然加到路径里就悲剧了
title = title.decode('utf-8').replace("/", " ")
# 截取正文
html = html.split(r'<!-- block main -->')[1]
html = html.split(r'<h4>您的支持是作者写作最大的动力!</h4>')[0]
html = html.replace(r'src="', 'src="' + domain)
# 加上头和尾组成完整的html
html = head + html+"</body></html>"
# 输出文件
output = open(path + "%d" % list.index(li) + title + '.html', 'w')
output.write(html)
output.close()
|
n hash")
tx['hash'] = util.double_sha256(tx['__data__'])
else:
tx['hash'] = chain.transaction_hash(tx['__data__'])
tx_hash_array.append(tx['hash'])
tx['tx_id'] = store.tx_find_id_and_value(tx, pos == 0)
if tx['tx_id']:
all_txins_linked = False
else:
if store.commit_bytes == 0:
tx['tx_id'] = store.import_and_commit_tx(tx, pos == 0, chain)
else:
tx['tx_id'] = store.import_tx(tx, pos == 0, chain)
if tx.get('unlinked_count', 1) > 0:
all_txins_linked = False
if tx['value_in'] is None:
b['value_in'] = None
elif b['value_in'] is not None:
b['value_in'] += tx['value_in']
b['value_out'] += tx['value_out']
b['value_destroyed'] += tx['value_destroyed']
# Get a new block ID.
block_id = int(store.new_id("block"))
b['block_id'] = block_id
if chain is not None:
# Verify Merkle root.
if b['hashMerkleRoot'] != chain.merkle_root(tx_hash_array):
raise MerkleRootMismatch(b['hash'], tx_hash_array)
# Look for the parent block.
hashPrev = b['hashPrev']
if chain is None:
# XXX No longer used.
is_genesis = hashPrev == util.GENESIS_HASH_PREV
else:
is_genesis = hashPrev == chain.genesis_hash_prev
(prev_block_id, prev_height, prev_work, prev_satoshis,
prev_seconds, prev_ss, prev_total_ss, prev_nTime) = (
(None, -1, 0, 0, 0, 0, 0, b['nTime'])
if is_genesis else
store.find_prev(hashPrev))
b['prev_block_id'] = prev_block_id
b['height'] = None if prev_height is None else prev_height + 1
b['chain_work'] = util.calculate_work(prev_work, b['nBits'])
if prev_seconds is None:
b['seconds'] = None
else:
b['seconds'] = prev_seconds + b['nTime'] - prev_nTime
if prev_satoshis is None or prev_satoshis < 0 or b['value_in'] is None:
# XXX Abuse this field to save work in adopt_orphans.
b['satoshis'] = -1 - b['value_destroyed']
else:
b['satoshis'] = prev_satoshis + b['value_out'] - b['value_in'] \
- b['value_destroyed']
if prev_satoshis is None or prev_satoshis < 0:
ss_created = None
b['total_ss'] = None
else:
ss_created = prev_ | satoshis * (b['nTime'] - prev_nTime)
b['total_ss'] = prev_total_ss + ss_created
if b['height'] is None or b['height'] < 2:
b['search_block_id'] = None
else:
b['search_block_id'] = store.get_block_id_at_height(
util.get_search_height(int(b['height'])),
None if prev_block_id is None else int(prev_block_id))
# Insert the block table row.
try:
store.sql(
"""INSERT INTO bloc | k (
block_id, block_hash, block_version, block_hashMerkleRoot,
block_nTime, block_nBits, block_nNonce, block_height,
prev_block_id, block_chain_work, block_value_in,
block_value_out, block_total_satoshis,
block_total_seconds, block_total_ss, block_num_tx,
search_block_id
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)""",
(block_id, store.hashin(b['hash']), store.intin(b['version']),
store.hashin(b['hashMerkleRoot']), store.intin(b['nTime']),
store.intin(b['nBits']), store.intin(b['nNonce']),
b['height'], prev_block_id,
store.binin_int(b['chain_work'], WORK_BITS),
store.intin(b['value_in']), store.intin(b['value_out']),
store.intin(b['satoshis']), store.intin(b['seconds']),
store.intin(b['total_ss']),
len(b['transactions']), b['search_block_id']))
except store.dbmodule.DatabaseError:
if store.commit_bytes == 0:
# Rollback won't undo any previous changes, since we
# always commit.
store.rollback()
# If the exception is due to another process having
# inserted the same block, it is okay.
row = store.selectrow("""
SELECT block_id, block_satoshi_seconds
FROM block
WHERE block_hash = ?""",
(store.hashin(b['hash']),))
if row:
store.log.info("Block already inserted; block_id %d unsued",
block_id)
b['block_id'] = int(row[0])
b['ss'] = None if row[1] is None else int(row[1])
store.offer_block_to_chains(b, chain_ids)
return
# This is not an expected error, or our caller may have to
# rewind a block file. Let them deal with it.
raise
# List the block's transactions in block_tx.
for tx_pos in xrange(len(b['transactions'])):
tx = b['transactions'][tx_pos]
store.sql("""
INSERT INTO block_tx
(block_id, tx_id, tx_pos)
VALUES (?, ?, ?)""",
(block_id, tx['tx_id'], tx_pos))
store.log.info("block_tx %d %d", block_id, tx['tx_id'])
if b['height'] is not None:
store._populate_block_txin(block_id)
if all_txins_linked or not store._has_unlinked_txins(block_id):
b['ss_destroyed'] = store._get_block_ss_destroyed(
block_id, b['nTime'],
map(lambda tx: tx['tx_id'], b['transactions']))
if ss_created is None or prev_ss is None:
b['ss'] = None
else:
b['ss'] = prev_ss + ss_created - b['ss_destroyed']
store.sql("""
UPDATE block
SET block_satoshi_seconds = ?,
block_ss_destroyed = ?
WHERE block_id = ?""",
(store.intin(b['ss']),
store.intin(b['ss_destroyed']),
block_id))
else:
b['ss_destroyed'] = None
b['ss'] = None
# Store the inverse hashPrev relationship or mark the block as
# an orphan.
if prev_block_id:
store.sql("""
INSERT INTO block_next (block_id, next_block_id)
VALUES (?, ?)""", (prev_block_id, block_id))
elif not is_genesis:
store.sql("INSERT INTO orphan_block (block_id, block_hashPrev)" +
" VALUES (?, ?)", (block_id, store.hashin(b['hashPrev'])))
for row in store.selectall("""
SELECT block_id FROM orphan_block WHERE block_hashPrev = ?""",
(store.hashin(b['hash']),)):
(orphan_id,) = row
store.sql("UPDATE block SET prev_block_id = ? WHERE block_id = ?",
(block_id, orphan_id))
store.sql("""
INSERT INTO block_next (block_id, next_block_id)
VALUES (?, ?)""", (block_id, orphan_id))
store.sql("DELETE FROM orphan_block WHERE block_id = ?",
(orphan_id,))
# offer_block_to_chains calls adopt_orphans, which propagates
# block_height and other cumulative data to the blocks
# attached above.
store.offer_block_to_chains(b, chain_ids)
return block_id
def _populate_block_txin(store, block_id):
# Create rows in block_txin. In case of duplicate transactions,
# choose the one with the lowest block ID. XXX For consistency,
# it should be |
from django.forms import ModelForm, inlineformset_factory, HiddenInput, ModelChoiceField
from .models import BirthForm, SanityPillars, InnateAbility, StabilitySources
class CharBirthForm(ModelForm):
class Meta:
model = BirthForm
fields = ['name', 'pronoun', 'age', 'birthplace', 'drive', 'occupation']
'''
#(1) no from previous mistake where each thing was it' own form instead of a single super big form
class CharBirthForm(ModelForm):
class Meta:
model = BirthForm
fields = ['name', 'pronoun', 'age', 'birthplace']
# widgets = {'birthcode' : HiddenInput()}
# widgets = {'confirm_start' : HiddenInput()}
class DriveForm(ModelForm):
class Meta:
model = BirthForm
fields = ['drive']
# widgets = {'confirm_drive' : HiddenInput()}
class OccupationForm(ModelForm):
class Meta:
model = BirthForm
fields = ['occupation']
# widgets = {'confirm_occupation' : HiddenInput()}
#(2) no longer need to define the modelform explicitly, it's made from the inlineformset_factory
class SanityForm(ModelForm):
class Meta:
model = SanityPillars
fields = ['pillar', 'description']
#commenting out the confirm fields!
# def __init__(self, *args, **kwargs):
# super(DocumentForm, self).__init__(*args, **kwargs)
# self.fields['confirm_pillars'] = BooleanField(queryset=BirthForm.objects['confirm_pillars'])
# self.fields['FORCE_confirm_pillars'] = True
# widgets = {'confirm_pillars' : HiddenInput()}
class AbilitiesForm(ModelForm):
class Meta:
model = InnateAbility
fields = ['ability', 'value']
class SourceBirthForm(ModelForm):
class Meta:
model = StabilitySources
fields = ['name', 'relation', 'personality', 'residence']
'''
|
#formset for pillars of sanity
PillarsOfSanity = inlineformset_factory(
BirthForm,
SanityPillars,
fields = ['pillar', 'description'],
extra = 0,
| min_num = 1,
max_num = 3,
can_delete = True,
validate_min = True,
validate_max = True,
)
#formset for abilities
Abilities = inlineformset_factory(
BirthForm,
InnateAbility,
fields = ['ability', 'value'],
can_delete=True,
)
#formset for sources of Stability
SourcesOfStability = inlineformset_factory(
BirthForm,
StabilitySources,
fields = ['name', 'relation', 'personality', 'residence'],
extra = 0,
min_num = 1,
max_num = 4,
can_delete = True,
validate_min = True,
validate_max = True,
)
|
# -*- | coding: utf-8 -*-
"""
Copyright (C) 2018-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
import xbmc
if __name__ == '__main__':
xbmc.executebuiltin("Contain | er.Refresh")
|
import os
import sys
sys. | path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import tree
def main(argv):
dna = files.read_line(argv[0])
st = tree.SuffixTree(dna)
print '\n'.join(st.traverse())
if __name__ == "__main__":
m | ain(sys.argv[1:])
|
#
# Copyright 2012 IBM, Inc.
# Copyright 2012-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from vdsm import netinfo
from testlib import VdsmTestCase as TestCaseBase
from monkeypatch import MonkeyPatch
from network import api, configurators
from network import errors
from network.models import Bond, Bridge, Nic, Vlan
def _fakeNetworks():
return {'fakebridgenet': {'iface': 'fakebridge', 'bridged': True},
'fakenet': {'iface': 'fakeint', 'bridged': False}}
def _raiseInvalidOpException(*args, **kwargs):
return RuntimeError('Attempted to apply network configuration during unit '
'testing.')
class TestConfigNetwork(TestCaseBase):
def _addNetworkWithExc(self, netName, opts, errCode):
with self.assertRaises(errors.ConfigNetworkError) as cneContext:
api._addNetwork(netName, **opts)
self.assertEqual(cneContext.exception.errCode, errCode)
# Monkey patch the real network detection from the netinfo module.
@MonkeyPatch(netinfo, 'networks', _fakeNetworks)
@MonkeyPatch(netinfo, 'getMaxMtu', lambda *x: 1500)
@MonkeyPatch(netinfo, 'getMtu', lambda *x: 1500)
@MonkeyPatch(configurators.ifcfg, 'ifdown', lambda *x:
_raiseInvalidOpException())
@MonkeyPatch(configurators.ifcfg, 'ifup',
lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Bond, 'configure', lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Bridge, 'configure', lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Nic, 'configure', lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Vlan, 'configure', lambda *x: _raiseInvalidOpException())
def testAddNetworkValidation(self):
_netinfo = {
'networks': {
'fakent': {'iface': 'fakeint', 'bridged': False},
'fakebrnet': {'iface': 'fakebr', 'bridged': True,
'ports': ['eth0', 'eth1']},
'fakebrnet1': {'iface': 'fakebr1', 'bridged': True,
'ports': ['bond00']},
'fakebrnet2': {'iface': 'fakebr2', 'bridged': True,
'ports': ['eth7.1']},
'fakebrnet3': {'iface': 'eth8', 'bridged': False}
},
'vlans': {
'eth3.2': {'iface': 'eth3',
'addr': '10.10.10.10',
'netmask': '255.255.0.0',
'mtu': 1500
},
'eth7.1': {'iface': 'eth7',
'addr': '192.168.100.1',
'netmask': '255.255.255.0',
'mtu': 1500
}
},
'nics': ['eth0', 'eth1', 'eth2', 'eth3', 'eth4', 'eth5', 'eth6',
'eth7', 'eth8', 'eth9', 'eth10'],
'bridges': {
'fakebr': {'ports': ['eth0', 'eth1']},
'fakebr1': {'ports': ['bond00']},
'fakebr2': {'ports': ['eth7.1']}
},
'bondings': {'bond00': {'slaves': ['eth5', 'eth6']}}
}
fakeInfo = netinfo.NetInfo(_netinfo)
nics = ['eth2']
# Test for already existing bridge.
self._addNetworkWithExc('fakebrnet', dict(nics=nics,
_netinfo=fakeInfo), errors.ERR_USED_BRIDGE)
# Test for already existing network.
self._addNetworkWithExc('fakent', dict(nics=nics, _netinfo=fakeInfo),
errors.ERR_USED_BRIDGE)
# Test for bonding opts passed without bonding specified.
self._addNetworkWithExc('test', dict(nics=nics,
bondingOptions='mode=802.3ad',
_netinfo=fakeInfo), errors.ERR_BAD_BONDING)
# Test IP without netmask.
self._addNetworkWithExc('test', dict(nics=nics, ipaddr='10.10.10.10',
_netinfo=fakeInfo), errors.ERR_BAD_ADDR)
# Test netmask without IP.
self._addNetworkWithExc('test', dict(nics=nics,
netmask='255.255.255.0', _netinfo=fakeInfo),
errors.ERR_BAD_ADDR)
# Test gateway without IP.
self._addNetworkWithExc('test', dict(nics=nics, gateway='10.10.0.1',
_netinfo=fakeInfo), errors.ERR_BAD_ADDR)
# Test for non existing nic.
self._addNetworkWithExc('test', dict(nics=['eth11'],
_netinfo=fakeInfo), errors.ERR_BAD_NIC)
# Test for nic already bound to a different network.
self._addNetworkWithExc('test', dict(bonding='bond0', nics=['eth0',
'eth1'], _netinfo=fakeInfo),
errors.ERR_USED_NIC)
# Test for bond already member of a network.
self._addNetworkWithExc('test', dict(bonding='bond00', nics=['eth5',
'eth6'], _netinfo=fakeInfo),
errors.ERR_BAD_PARAMS)
# Test for multiple nics without bonding device.
self._addNetworkWithExc('test', dict(nics=['eth3', 'eth4'],
_netinfo=fakeInfo), errors.ERR_BAD_BONDING)
# Test for nic already in a bond.
self._addNetworkWithExc('test', dict(nics=['eth6'], _netinfo=fakeInfo),
errors.ERR_USED_NIC)
# Test for adding a new non-VLANed bridgeless network when a non-VLANed
# bridgeless network exists
self._addNetworkWithExc('test', dict(nics=['eth8'], bridged=False,
_netinfo=fakeInfo), errors.ERR_BAD_PARAMS)
def testBuildBondOptionsBadParams(self):
| class FakeNetInfo(object):
def __init__(self):
self.bondings = ['god', 'bless', 'potatoes']
with self.assertRaises(errors.ConfigNetworkError) as cne:
api._buildBondOptions('jamesbond', {}, _netinfo=FakeNetInfo())
self.assertEquals(cne.exception.errCode, errors.ERR_BAD_PARAMS)
@MonkeyPatch(netinfo, 'NetInfo', lambda: None)
def testValidateNetSetupRemoveParamValidation(s | elf):
attrs = dict(nic='dummy', remove=True,
bridged=True)
networks = {'test-netowrk': attrs}
with self.assertRaises(errors.ConfigNetworkError) as cneContext:
api._validateNetworkSetup(networks, {})
self.assertEqual(cneContext.exception.errCode,
errors.ERR_BAD_PARAMS)
|
__author__ = 'Richard Lincoln, r.w.lincoln@gmail.com'
""" This example demonstrates how optimise power flow with Pyreto. """
import sys
import logging
import numpy
import scipy.io
import pylab
import pylon
import pyreto
from pyreto.util import plotGenCost
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import ENAC, Reinforce
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import HillClimber, CMAES, ExactNES, PGPE, FEM
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler) # rm pybrain
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
#logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
case = pylon.Case.load("../data/case6ww.pkl")
case.generators[0].p_cost = (0.0, 16.0, 200.0)
case.generators[1].p_cost = (0.0, 2.0, 200.0)
case.generators[2].p_cost = (0.0, 32.0, 200.0)
case.buses[3].p_demand = 120.0
case.buses[4].p_demand = 120.0
case.buses[5].p_demand = 120.0
#plotGenCost(case.generators)
# Assume initial demand is peak demand (for sensor limits) and save it.
Pd0 = [b.p_demand for b in case.buses if b.type == pylon.PQ]
# Define a 24-hour load profile with hourly values.
p1h = numpy([0.52, 0.54, 0.52, 0.50, 0.52, 0.57, 0.60, 0.71, 0.89, 0.85, 0.88,
0.94, 0.90, 0.88, 0.88, 0.82, 0.80, 0.78, 0.76, 0.68, 0.68, 0.68,
0.65, 0.58])
#p1h = p1h[6:-6]
p1h = p1h[:12]
nf = len(p1h)
# Create a case environment specifying the load profile.
env = pyreto.CaseEnvironment(case, p1h)
# Create an episodic cost minimisation task.
task = pyreto.MinimiseCostTask(env)
# Create a network for approximating the agent's policy function that maps
# system demand to generator set-points..
nb = len([bus for bus in case.buses if bus.type == pylon.PQ])
ng = len([g for g in case.online_generators if g.bus.type != pylon.REFERENCE])
net = buildNetwork(nb, ng, bias=False)
# Create an agent and select an episodic learner.
#learner = Reinforce()
learner = ENAC()
#learner.gd.rprop = True
## only relevant for RP
#learner.gd.deltamin = 0.0001
##agent.learner.gd.deltanull = 0.05
## only relevant for BP
#learner.gd.alpha = 0.01
#learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
# Adjust some parameters of the NormalExplorer.
sigma = [50.0] * ng
learner.explorer.sigma = sigma
#learner.explorer.epsilon = 0.01 # default: 0.3
#learner.learningRate = 0.01 # (0.1-0.001, down to 1e-7 for RNNs)
# Alternatively, use blackbox optimisation.
#learner = HillClimber(storeAllEvaluations=True)
##learner = CMAES(storeAllEvaluations=True)
##learner = FEM(storeAllEvaluations=True)
##learner = ExactNES(storeAllEvaluations=True)
##learner = PGPE(storeAllEvaluations=True)
#agent = OptimizationAgent(net, learner)
# Prepare for plotting.
pylab.figure()#figsize=(16,8))
pylab.ion()
plot = MultilinePlotter(autoscale=1.1, xlim=[0, nf], ylim=[0, 1])
# Read ideal system cost and set-point values determined using OPF.
f_dc = scipy.io.mmread("../data/fDC.mtx").flatten()
f_ac = scipy.io.mmread("../data/fAC.mtx").flatten()
Pg_dc = scipy.io.mmread("../data/PgDC.mtx")
Pg_ac = scipy.io.mmread("../data/PgAC.mtx")
Qg_ac = scipy.io.mmread("../data/QgAC.mtx")
rday = range(nf)
for i in range(len(case.online_generators)):
plot.setData(i, rday, numpy.zeros(nf))
plot.setData(3, rday, f_dc[:nf])
plot.setData(4, rday, f_ac[:nf])
plot.setData(5, rday, numpy.zeros(nf)) # reward
#plot.setData(6, rday, Pg_ac[:nf] * 10)
plot.setLineStyle(0, color="red")
plot.setLineStyle(1, color="green")
plot.setLineStyle(2, color="blue")
plot.setLineStyle(3, color="black")
plot.setLineStyle(4, color="gray")
plot.setLineStyle(5, color="orange")
#plot.setLineStyle(6, color="black")
plot. | setLineStyle(linewidth=2)
plot.update()
# Give the agent its task in an experiment.
#experiment = EpisodicExperiment(task, agent)
experiment = p | yreto.rlopf.OPFExperiment(task, agent)
weeks = 52 * 2
days = 5 # number of samples per gradient estimate
for week in range(weeks):
all_rewards = experiment.doEpisodes(number=days)
tot_reward = numpy.mean(agent.history.getSumOverSequences('reward'))
# print learner._allEvaluations#[-:-1]
# Plot the reward at each period averaged over the week.
r = -1.0 * numpy.array(all_rewards).reshape(days, nf)
avg_r = numpy.mean(r, 0)
plot.setData(5, rday, avg_r)
# Plot the set-point of each generator on the last day of the week.
# FIXME: Plot the set-points averaged over the week.
for i in range(len(case.online_generators)):
scale_factor = 10
# plot.setData(i, rday, env._Pg[i, :] * scale_factor)
plot.setData(i, rday, experiment.Pg[i, :] * scale_factor)
agent.learn()
agent.reset()
# Scale sigma manually.
sigma = [(sig * 0.95) - 0.05 for sig in sigma]
learner.explorer.sigma = sigma
plot.update()
pylab.savefig("/tmp/rlopf.png")
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_communication | , an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_communication is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_communication is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; | without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_communication.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class MailMail(orm.Model):
_inherit = 'mail.mail'
def _get_unsubscribe_url(
self, cr, uid, mail, email_to, msg=None, context=None):
'''
Override native method to manage unsubscribe URL for distribution list
case of newsletter.
'''
mml = mail.mailing_id
if mml.distribution_list_id and mml.distribution_list_id.newsletter:
return super(MailMail, self)._get_unsubscribe_url(
cr, uid, mail, email_to, msg=msg, context=context)
else:
return ''
|
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.test import TestCase, tag
import factories
from formlibrary.models import CustomForm
@tag('pkg')
class CustomFormTest(TestCase):
def setUp(self):
self.organization = factories.Organization()
self.user = factories.User()
def test_save_without_public_info(self):
custom_form = CustomForm( |
organization=self.organization,
name="Humanitec's Survey",
fields="{ | }",
public={}
)
self.assertRaises(ValidationError, custom_form.save)
def test_save_without_public_org_info(self):
custom_form = CustomForm(
organization=self.organization,
name="Humanitec's Survey",
fields="{}",
public={'url': True}
)
self.assertRaises(ValidationError, custom_form.save)
def test_save_without_public_url_info(self):
custom_form = CustomForm(
organization=self.organization,
name="Humanitec's Survey",
fields="{}",
public={'org': True}
)
self.assertRaises(ValidationError, custom_form.save)
def test_save_with_public_info(self):
custom_form = CustomForm.objects.create(
organization=self.organization,
name="Humanitec's Survey",
fields="{}",
public={'org': True, 'url': True}
)
self.assertEqual(custom_form.name, "Humanitec's Survey")
self.assertEqual(custom_form.public, {'org': True, 'url': True})
|
import argparse
import numpy as np
from PIL import Image
import ocppaths
import ocpcarest
import zindex
import anydbm
import multiprocessing
import pdb
#
# ingest the PNG files into the database
#
"""This file is super-customized for Mitya's FlyEM data."""
# Stuff we make take from a config or the command line in the future
#ximagesz = 12000
#yimagesz = 12000
parser = argparse.ArgumentParser(description='Ingest the FlyEM image data.')
parser.add_argument('baseurl', action="store", help='Base URL to of ocp service no http://, e. g. neurodata.io')
parser.add_argument('token', action="store", help='Token for the annotation project.')
parser.add_argument('path', action="store", help='Directory with annotation PNG files.')
parser.add_argument('process', action="store", help='Number of processes.')
result = parser.parse_args()
# convert to an argument
resolution = 0
# load a database
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )
# get the dataset configuration
(xcubedim,ycubedim,zcubedim)=proj.datasetcfg.cubedim[resolution]
(startslice,endslice)=proj.datasetcfg.slicerange
batchsz=zcubedim
# This doesn't work because the image size does not match exactly the cube size
#(ximagesz,yimagesz)=proj.datasetcfg.imagesz[resolution]
ximagesz = 12000
yimagesz = 12000
batchsz=16
totalslices = range(startslice,endslice,16)
totalprocs = int(result.process)
#global anydb
#pdb.set_trace()
#anydb = anydbm.open('bodydict','r')
#anydb = dict(anydb)
def parallelwrite(slicenumber):
# Accessing the dict in dbm
#anydb = anydbm.open('bodydict','r')
[ db, proj, projdb ] = ocpcarest.loadDBProj ( result.token )
#print slicenumber
startslice = slicenumber
endslice = startslice+16
# Get a list of the files in the directories
for sl in range (startslice, endslice+1, batchsz):
slab = np.zeros ( [ batchsz, yimagesz, ximagesz ], dtype=np.uint32 )
for b in range ( batchsz ):
if ( sl + b <= endslice and sl + b<=1460 ):
# raw data
filenm = result.path + '/superpixel.' + '{:0>5}'.format(sl+b) + '.png'
#print "Opening filenm " + filenm
img = Image.open ( filenm, 'r' )
imgdata = np.asarray ( img )
#Adding new lines
anydb = anydbm.open('bodydict2','r')
superpixelarray = imgdata[:,:,0] + (np.uint32(imgdata[:,:,1])<<8)
newdata = np.zeros([superpixelarray.shape[0],superpixelarray.shape[1]], dtype=np.uint32)
#print "slice",sl+b,"batch",sl
print sl+b,multiprocessing.current_process()
for i in range(superpixelarray.shape[0]):
for j in range(superpixelarray.shape[1]):
key = str(sl)+','+str(superpixelarray[i,j])
if( key not in anydb):
f = open('missing_keys', 'a')
f.write(key+'\n')
f.close()
print "Error Detected Writing to File"
dictvalue = '0'
else:
dictva | lue = anydb.get( key )
newdata[i,j] = int(dictvalue)
slab[b,:,:] = newdata
print "end of slice:",sl+b
anydb.close()
print "Entering commit phase"
# Now we have a 1024x1024x16 z-aligned cube.
# Send it to the database.
for y in range ( 0, yimagesz, ycubedim ):
for x in range ( 0, ximagesz, xcubedim ):
mortonidx = zindex.XYZMorton ( [ x/xcubedim, y/ycubedim, (sl-startslice)/zcubedim] )
| cubedata = np.zeros ( [zcubedim, ycubedim, xcubedim], dtype=np.uint32 )
xmin = x
ymin = y
xmax = min ( ximagesz, x+xcubedim )
ymax = min ( yimagesz, y+ycubedim )
zmin = 0
zmax = min(sl+zcubedim,endslice+1)
cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax,ymin:ymax,xmin:xmax]
# insert the blob into the database
db.annotateDense ((x,y,sl-startslice), resolution, cubedata, 'O')
print "Commiting at x=%s, y=%s, z=%s" % (x,y,sl)
db.conn.commit()
return None
def run():
flypool = multiprocessing.Pool(totalprocs)
flypool.map(parallelwrite, totalslices, 16)
if __name__ == "__main__":
run()
|
"""
To plot this, you need to provide the experiment directory plus an output stem.
I use this for InvertedPendulum:
python plot.py outputs/InvertedPendulum-v1 --envname InvertedPendulum-v1 \
--out figures/InvertedPendulum-v1
(c) May 2017 by Daniel Seita
"""
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import seaborn as sns
import sys
from os.path import join
from pylab import subplots
plt.style.use('seaborn-darkgrid')
sns.set_context(rc={'lines.markeredgewidth': 1.0})
np.set_printoptions(edgeitems=100,linewidth=100,suppress=True)
# Some matplotlib settings.
LOGDIR = 'outputs/'
FIGDIR = 'figures/'
title_size = 22
tick_size = 18
legend_size = 17
ysize = 18
xsize = 18
lw = 1
ms = 8
error_region_alpha = 0.3
# Attributes to include in a plot.
ATTRIBUTES = ["FinalAvgReturns",
"FinalStdReturns",
"FinalMaxReturns",
"FinalMinReturns",
"ScoresAvg",
"ScoresStd",
"ScoresMax",
"ScoresMin"]
# Axes labels for environments.
ENV_TO_YLABELS = {"HalfCheetah-v1": [-800,1000],
"InvertedPendulum-v1": [0,1000]}
# Colors. In general we won't use all of these.
COLORS = ['blue', 'red', 'gold', 'black']
def plot_one_dir(args, directory):
""" The actual plotting code.
Assumes that we'll be plotting from one directory, which usually means
considering one random seed only, however it's better to have multiple
random seeds so this code generalizes. For ES, we should store the output at
*every* timestep, so A['TotalIterations'] should be like np.arange(...), but
this generalizes in case Ray can help me run for many more iterations.
"""
print("Now plotting based on directory {} ...".format(directory))
### Figure 1: The log.txt file.
num = len(ATTRIBUTES)
fig, axes = subplots(num, figsize=(12,3*num))
for (dd, cc) in zip(directory, COLORS):
A = np.genfromtxt(join(args.expdir, dd, 'log.txt'),
delimiter='\t', dtype=None, names=True)
x = A['TotalIterations']
for (i,attr) in enumerate(ATTRIBUTES):
axes[i].plot(x, A[attr], '-', lw=lw, color=cc, label=dd)
axes[i].set_ylabel(attr, fontsize=ysize)
axes[i].tick_params(axis='x', labelsize=tick_size)
axes[i].tick_params(axis='y', labelsize=tick_size)
axes[i].legend(loc='best', ncol=1, prop={'size':legend_size})
plt.tight_layout()
plt.savefig(args.out+'_log.png')
### Figure 2: Error regions.
num = len(directory)
if num == 1:
num+= 1
fig, axes = subplots(1,num, figsize=(12*num,10))
for (i, (dd, cc)) in enumerate(zip(directory, COLORS)):
A = np.genfromtxt(join(args.expdir, dd, 'log.txt'),
delimiter='\t', dtype=None, names=True)
axes[i].plot(A['TotalIterations'], A["FinalAvgReturns"],
color=cc, marker='x', ms=ms, lw=lw)
axes[i].fill_between(A['TotalIterations'],
A["FinalAvgReturns"] - A["FinalStdReturns"],
A["FinalAvgReturns"] + A["FinalStdReturns"],
alpha = error_region_alpha,
facecolor='y')
axes[i].set_ylim(ENV_TO_YLABELS[args.envname])
axes[i].tick_params(a | xis='x', labelsize=tick_size)
axes[i].tick_params(axis='y', labelsize=tick_size)
axes[i].set_title("Mean Episode Rewards ({})".format(dd), fontsize=title_size)
axes[i].set_xlabel("ES Iterations", fontsize=xsize)
axes[i].set_ylabel("Rewards", fontsize=ysize)
plt.tight_layout()
plt.savefig(args.out+'_rewards_std.png')
if __name__ == "__main__" | :
"""
Handle logic with argument parsing.
"""
parser = argparse.ArgumentParser()
parser.add_argument("expdir", help="experiment dir, e.g., /tmp/experiments")
parser.add_argument("--out", type=str, help="full directory where to save")
parser.add_argument("--envname", type=str)
args = parser.parse_args()
plot_one_dir(args, directory=os.listdir(args.expdir))
|
import pandas as pd
import numpy as np
from os.path import join as opj
import sys
from fg_shared import *
sys.path.append(opj(_git, 'utils'))
import quickr
corncob = """
require(dplyr)
require(tidyr)
require(purr)
require(corncob)
###########################################################################################
# APPLIED TO MANY FEATURES:
###########################################################################################
# Function to Fit A Beta-Binomial Model to A Single Feature
# Note: YOU HAVE A CHOICE OF W,WR,or W0 which repressent different counts
# W are counts of Meta-Clonotype (RADIUS ONLY)
# WR are counts of Meta-Clonotype (RADIUS + REGEX ONLY)
# W0 are counts o | f Clonotype (TCRDIST0 basically EXACT CLONOTYPE)
# M total counts
# AGE age in years
# SEX "Male" or "Female"
# DAYS 1 if > 2 days post diagnosis, 0 otherwise
# HLA "MATCH" or "NON-MATCH" (in this case A*01)
########################################################################################### |
#' do_corncob
#'
#' Define the beta-binomial we are attempting to fit
#'
#' @param mydata data.frame
do_corncob <- function(mydata, frm = as.formula('cbind(W, M - W) ~ AGE+SEX+DAYS+HLA')){
cb1 = bbdml(formula = frm,
phi.formula = ~ 1,
data = mydata)
return(cb1)
}
# This wrapper is useful for avoiding crashes do to errors:
possibly_do_corncob = purrr::possibly(do_corncob, otherwise = NA)
###########################################################################################
# Split Data by Feature
###########################################################################################
list_of_df_by_feature = example_df %>% split(f = example_df$feature)
###########################################################################################
# Fit Models
###########################################################################################
list_of_fit_models = purrr::map(list_of_df_by_feature, ~possibly_do_corncob(mydata = .x, frm = as.formula('cbind(W, M - W) ~ AGE+SEX+DAYS+HLA')))
list_of_fit_models = list_of_fit_models[!is.na(list_of_fit_models)]
###########################################################################################
# Parse Models
###########################################################################################
#' get bbdml coefficients into a table
#'
#'
#' @param cb is object result of corncob::bbdml
#' @param i is a label for the feature name
#'
#' @example
#' purrr::map2(list_of_fit_models, names(list_of_fit_models), ~parse_corncob(cb = .x, i = .y))
parse_corncob <- function(cb,i =1){
y = summary(cb)$coefficients
rdf = as.data.frame(y)
rdf$param = rownames(rdf)
rdf = rdf %>% mutate(estimate = Estimate, se = `Std. Error`, tvalue = `t value`, pvalue = `Pr(>|t|)`, param) %>%
mutate(type = ifelse(grepl(param, pattern = "phi"), "phi", "mu")) %>%
mutate(type2 = ifelse(grepl(param, pattern = "Intercept"), "intercept", "covariate"))
rdf$feature = i
return(rdf)
}
tabular_results = purrr::map2(list_of_fit_models, names(list_of_fit_models), ~parse_corncob(cb = .x, i = .y))
tabular_results = do.call(rbind, tabular_results) %>% tibble::remove_rownames()
clean_tabular_results = tabular_results %>% select(feature, Estimate, pvalue, param, type, type2) %>%
arrange(type2, type, pvalue)
""" |
# functions that implement analysis and synthesis of sounds using the Sinusoidal plus Stochastic Model
# (for example usage check the models_interface directory)
import numpy as np
from scipy.signal import resample, blackmanharris, triang, hanning
from scipy.fftpack import fft, ifft, fftshift
import math
import utilFunctions as UF
import dftModel as DFT
import sineModel as SM
import stochasticModel as STM
def spsModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope, stocf):
"""
Analysis of a sound using the sinusoidal plus stochastic model
x: input sound, fs: sampling rate, w: analysis window; N: FFT size, t: threshold in negative dB,
minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
stocf: decimation factor used for the stochastic approximation
returns hfreq, hmag, hphase: harmonic frequencies, magnitude and phases; stocEnv: stochastic residual
"""
# perform sinusoidal analysis
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
Ns = 512
xr = UF.sineSubtraction(x, Ns, H, tfreq, tmag, tphase, fs) # subtract sinusoids from original sound
stocEnv = STM.stochasticModelAnal(xr, H, H*2, stocf) # compute stochastic model of residual
return tfreq, tmag, tphase, stocEnv
def spsModelSynth(tfreq, tmag, tphase, stocEnv, N, H, fs):
"""
Synthesis of a sound using the sinusoidal plus stochastic model
tfreq, tmag, tphase: sinusoidal frequencies, amplitudes and phases; stocEnv: stochastic envelope
N: synthesis FFT size; H: hop size, fs: sampling rate
returns y: output sound, ys: sinusoidal component, yst: stochastic component
"""
ys = SM.sineModelSynth(tfreq, tmag, tphase, N, H, fs) # synthesize sinusoids
yst = STM.stochasticModelSynth(stocEnv, H, H*2) # synthesize stochastic residual
y = ys[:min(ys.size, yst.size)]+yst[:min(ys.size, yst.size)] # sum sinusoids and stochastic components
return y, ys, yst
def spsModel(x, fs, w, N, t, stocf):
"""
Analysis/synthesis of a sound using the sinusoidal plus stochastic model
x: input sound, fs: sampling | rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
stocf: decimation factor of mag spectrum for stochastic analysis
returns y: output sound, ys: sinusoidal component, yst: stochastic component
"""
hN = N/2 | # size of positive spectrum
hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size/2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns/4 # Hop size used for analysis and synthesis
hNs = Ns/2
pin = max(hNs, hM1) # initialize sound pointer in middle of analysis window
pend = x.size - max(hNs, hM1) # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
ysw = np.zeros(Ns) # initialize output sound frame
ystw = np.zeros(Ns) # initialize output sound frame
ys = np.zeros(x.size) # initialize output array
yst = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns)
ow = triang(2*H) # overlapping window
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
wr = bh # window for residual
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
sws = H*hanning(Ns)/2 # synthesis window for stochastic
while pin<pend:
#-----analysis-----
x1 = x[pin-hM1:pin+hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # find peaks
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs*iploc/float(N) # convert peak locations to Hertz
ri = pin-hNs-1 # input sound pointer for residual analysis
xw2 = x[ri:ri+Ns]*wr # window the input sound
fftbuffer = np.zeros(Ns) # reset buffer
fftbuffer[:hNs] = xw2[hNs:] # zero-phase window in fftbuffer
fftbuffer[hNs:] = xw2[:hNs]
X2 = fft(fftbuffer) # compute FFT for residual analysis
#-----synthesis-----
Ys = UF.genSpecSines(ipfreq, ipmag, ipphase, Ns, fs) # generate spec of sinusoidal component
Xr = X2-Ys; # get the residual complex spectrum
mXr = 20 * np.log10(abs(Xr[:hNs])) # magnitude spectrum of residual
mXrenv = resample(np.maximum(-200, mXr), mXr.size*stocf) # decimate the magnitude spectrum and avoid -Inf
stocEnv = resample(mXrenv, hNs) # interpolate to original size
pYst = 2*np.pi*np.random.rand(hNs) # generate phase random values
Yst = np.zeros(Ns, dtype = complex)
Yst[:hNs] = 10**(stocEnv/20) * np.exp(1j*pYst) # generate positive freq.
Yst[hNs+1:] = 10**(stocEnv[:0:-1]/20) * np.exp(-1j*pYst[:0:-1]) # generate negative freq.
fftbuffer = np.zeros(Ns)
fftbuffer = np.real(ifft(Ys)) # inverse FFT of harmonic spectrum
ysw[:hNs-1] = fftbuffer[hNs+1:] # undo zero-phase window
ysw[hNs-1:] = fftbuffer[:hNs+1]
fftbuffer = np.zeros(Ns)
fftbuffer = np.real(ifft(Yst)) # inverse FFT of stochastic spectrum
ystw[:hNs-1] = fftbuffer[hNs+1:] # undo zero-phase window
ystw[hNs-1:] = fftbuffer[:hNs+1]
ys[ri:ri+Ns] += sw*ysw # overlap-add for sines
yst[ri:ri+Ns] += sws*ystw # overlap-add for stochastic
pin += H # advance sound pointer
y = ys+yst # sum of sinusoidal and residual components
return y, ys, yst
|
def get_all(tordb):
return tordb.find()
def delete(tordb, obj_id):
tordb.remove([obj_id])
def insert(tordb, obj):
return tordb.insert(obj)
def update_full(tordb, id, obj):
tordb.update({'_id': | id} | , {'$set': obj})
|
def clip_matrix(image_as_matrix, width, height, top, left, expand | _by=0):
x1 = left
x2 = left + width
y1 = top
y2 = top + height
crop_img = image | _as_matrix[y1-expand_by:y2+expand_by, x1-expand_by:x2+expand_by]
return crop_img
|
#!/usr/bin/python
import sys
if (len(sys.argv) < 2):
fn = '/usr/share/ldraw/LDConfig.ldr'
else:
fn = sys.argv[1]
f = open(fn)
for line in f:
if '!COLOUR' in line:
line = line.strip()
ns = line.split()
category = ''
if 'RUBBER' in line:
category = 'material_rubber'
elif 'METAL' in line:
category = 'material_metallic'
elif 'SPECKLE' in line:
category = 'material_speckle'
elif 'GLITTER' in line: |
category = 'material_glitter'
elif 'LUMINANCE' in line:
category = 'material_luminant'
elif 'PEARLESCENT' in line:
category = 'material_pearlescent'
elif 'CHROME' in line:
category = 'material_chrome'
elif 'ALPHA' in line:
category = 'material_trans | parent'
else:
category = 'material_normal'
name = '"' + ns[2].replace('_', ' ')
idx = int(ns[4])
color = ns[6][1:]
edge = ns[8][1:]
cr = int(color[0:2], 16)
cg = int(color[2:4], 16)
cb = int(color[4:6], 16)
ca = 255
lumi = 0
for i in range(len(ns)):
if ns[i] == 'ALPHA':
ca = int(ns[i+1])
elif ns[i] == 'LUMINANCE':
lumi = int(ns[i+1])
er = int(edge[0:2], 16)
eg = int(edge[2:4], 16)
eb = int(edge[4:6], 16)
print "{ %20s, {%3d, %3d, %3d, %3d}, {%3d, %3d, %3d, 255}, %2d, %3d, %32s\", 0L }," % (category, cr, cg, cb, ca, er, eg, eb, lumi, idx, name)
|
#!/usr/bin/env python3
"""
Perform frequency analysis on text. This is already provided by !f, this script
exists for other reasons.
"""
import sys
import argparse
import re
from collections import Counter
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", type=argparse.FileType("r"), help="input file")
parser.add_argument("-l", "--length", type=int, default=1,
help="Vigenere-style key length")
return parser.parse_args()
def IOC(cnt):
total = sum(cnt.values())
if total:
return (sum(freq ** 2 - freq for freq in cnt.values())
/ (total ** 2 - total))
else:
return -1
def printchart(hist, start, interval, width=80):
(_, highest), = hist.most_common(1)
highw = len(str(highest))
return ("IOC {:.4f}\nInterval [{}::{}]\n{}"
.format(IOC(hist), start, interval,
("\n".join("{!r} ({: | {highw}}) {}"
.format(letter, frequency,
"-" * int(width * frequency / highest),
highw=highw)
for letter, frequency in hist.most_common()))))
def histogram(text, start, interval):
return Counter(re.findall("[a-zA-Z]", text)[start:: | interval])
if __name__ == "__main__":
args = parse_args()
plain = args.input.read()
for i in range(args.length):
print(printchart(histogram(plain, i, args.length), i, args.length))
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy | of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organiz | ation
# or submit itself to any jurisdiction.
"""RefExtract integration."""
from __future__ import absolute_import, division, print_function
|
from Confi | gParser import SafeConfigParser
def load_ini_file(file_name, defaults={}):
config = SafeConfigParser()
config.readfp(open(file_name))
results = {}
for section in config.sections():
for key, value in config.items(section):
results[section + '.' + key] = value
results.update(defaults)
re | turn results
|
.ElementTree as ET
import xml.dom.minidom
STATE_NONE = 0
STATE_DRAW = 1
STATE_DELETE = 2
POINT_RADIUS = 3
POINT_COLOR_CANDIDATE = (0,127,255)
LINE_COLOR_CANDIDATE = (0,127,255)
LINE_THICK_CANDIDATE = 2
LINE_COLOR = (0,192,0)
LINE_THICK = 2
windowName = "parking"
parkingName = "deib"
def point_inside_polygon(point,poly):
x,y=point
n = len(poly)
inside =False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def intersect(m1,a1,m2,a2):
'''
a1, a2 in radians
'''
A = np.matrix([[1, -np.tan(a1)],[1,-np.tan(a2)]])
b = np.array([m1[1]-m1[0]*np.tan(a1),m2[1]-m2[0]*np.tan(a2)])
x = np.linalg.solve(A,b)
x = np.round(x,0).astype(np.int)
return (x[1],x[0])
def meanAngle(point1,point2):
'''
Angles in [-90;90)
'''
mid = (np.array(point1,np.float) + np.array(point2,np.float))/2
if (point1[1]-point2[1]==0):
angle = 0
elif (point1[0]-point2[0]==0):
angle = -90
else:
angle = np.degrees(np.arctan(1.*(point1[1]-point2[1])/(point1[0]-point2[0])))
return mid, angle
def rectify(quadr):
# Determine midpoints and angular coefficients of the segments ---
m1,a1 = meanAngle(quadr[0],quadr[1])
m2,a2 = meanAngle(quadr[1],quadr[2])
m3,a3 = meanAngle(quadr[2],quadr[3])
m4,a4 = meanAngle(quadr[3],quadr[0])
# Average the angles ---
if (np.sign(a1) == np.sign(a3)):
avgAngle1_3 = np.mean((a1,a3))
else:
if np.abs(a1) > 45:
avgAngle1_3 = np.mean(np.abs((a1,a3)))
else:
avgAngle1_3 = np.mean((a1,a3))
if (np.sign(a2) == np.sign(a4)):
avgAngle2_4 = np.mean((a2,a4))
else:
if np.abs(a2) > 45:
avgAngle2_4 = np.mean(np.abs((a2,a4)))
else:
avgAngle2_4 = np.mean((a2,a4))
if (avgAngle2_4 >= 0):
avgAngle2_4 -= 90
else:
avgAngle2_4 += 90
if (np.sign(avgAngle1_3) == np.sign(avgAngle2_4)):
avgAngle = np.mean((avgAngle1_3,avgAngle2_4))
else:
if np.abs(avgAngle1_3) > 45:
avgAngle = np.mean(np.abs((avgAngle1_3,avgAngle2_4)))
else:
avgAngle = np.mean((avgAngle1_3,avgAngle2_4))
a1 = np.radians(avgAngle)
a3 = a1
a2 = avgAngle + 90
if (a2 >= 90):
a2 -= 180
a2 = np.radians(a2)
a4 = a2
# Determine the intersection points between the 4 new lines ---
p1 = intersect(m1, a1, m2, a2)
p2 = intersect(m2, a2, m3, a3)
p3 = intersect(m3, a3, m4, a4)
p4 = intersect(m4, a4, m1, a1)
rect = [p1,p2,p3,p4]
center = np.mean(rect,axis=0).astype(np.int)
angle = np.floor(avgAngle-90).astype(np.int)
w = np.linalg.norm(np.array(p1)-np.array(p2)).astype(np.int)
h = np.linalg.norm(np.array(p2)-np.array(p3)).astype(np.int)
if (w>h):
angle +=90
(w,h)=(h,w)
rotatedRect = (center,angle,w,h)
return rect,rotatedRect
def redrawImg(data):
img = data['originalImg'].copy()
for rect,rot_rect in zip(data['rectangles'],data['rotatedRectangles']):
cv2.line(img,rect[0],rect[1],LINE_COLOR,LINE_THICK)
cv2.line(img,rect[1],rect[2],LINE_COLOR,LINE_THICK)
cv2.line(img,rect[2],rect[3],LINE_COLOR,LINE_THICK)
cv2.line(img,rect[3],rect[0],LINE_COLOR,LINE_THICK)
cv2.circle(img,tuple(rot_rect[0]),np.floor(0.5*min(rot_rect[2:4])).astype(np.int),LINE_COLOR,LINE_THICK);
cv2.imshow(windowName,img)
data['currentImg'] = img
def onMouse(event,x,y,flags,data):
if (event == cv2.EVENT_LBUTTONUP):
point = (x,y)
if (data['status'] == STATE_DRAW):
# Draw the point ---
img = data['currentImg']
cv2.circle(img,point,POINT_RADIUS,POINT_COLOR_CANDIDATE,-1)
cv2.imshow(windowName,img)
data['currentImg'] = img
# Draw the line from the previous point, if any ---
numPreviousPoints = len(data['candRect'])
if numPreviousPoints > 0 and numPreviousPoints < 3:
cv2.line(img,data['candRect'][numPreviousPoints-1],point,LINE_COLOR_CANDIDATE,LINE_THICK_CANDIDATE)
cv2.imshow(windowName,img)
data['currentImg'] = img
# Add the point to the candidate rectangle ---
data['candRect'] += [point]
elif numPreviousPoints == 3:
# Close the rectangle if this is the fourth point ---
newRect = data['candRect'] + [point]
_,newRotatedRect = rectify(newRect)
data['rectangles'] += [newRect]
data['rotatedRectangles'] += [newRotatedRect]
redrawImg(data);
data['candRect'] = []
data['status'] = STATE_NONE
else:
# Add the point to the candidate rectangle ---
data['candRect'] += [point]
elif (data['status'] == STATE_DELETE):
found = False;
for idx,rect in enumerate(data['rectangles']):
if (point_inside_polygon(point,rect)):
found = True
break
if (found):
del data['rectangles'][idx]
del data['rotatedRectangles'][idx]
redrawImg(data);
data['status'] = STATE_NONE
def main():
print('+'+'-'*10+' Parking Lot Designer v1 '+'-'*10+'+')
print('| Press "n" to define a new parking lot'+' '*7+'|')
print('| Press "d" to delete an existing parking lot'+' '*1+'|')
print('| Press "w" to save the actual configuration'+' '*2+'|')
print('| Press "q" to quit'+' '*27+'|')
print('+'+'-'*45+'+')
imgPath = "camera11.jpg"
xmlPath = "camera11.xml"
img = cv2.imread(imgPath)
cv2.namedWindow(windowName)
cv2.imshow(windowName,img)
drawingStatus = {
"status":STATE_NONE,
"candRect":[],
"originalImg":img,
"currentImg":img.copy(),
| "rectangles":[],
"ro | tatedRectangles":[],
}
cv2.setMouseCallback(windowName,onMouse,drawingStatus)
pressedKey = -1
while (pressedKey != ord('q')):
pressedKey = cv2.waitKey(0)
if (pressedKey==ord('n')):
drawingStatus['status'] = STATE_DRAW
drawingStatus['candRect'] = []
redrawImg(drawingStatus);
elif(pressedKey==ord('d')):
drawingStatus['status'] = STATE_DELETE
drawingStatus['candRect'] = []
redrawImg(drawingStatus);
elif(pressedKey==ord('w')):
print('Preparing XML')
xmlParking = ET.Element("parking",id=parkingName)
for idx,(rect,rotRect) in enumerate(zip(drawingStatus['rectangles'],drawingStatus['rotatedRectangles'])):
xmlSpace = ET.SubElement(xmlParking, "space", id=str(idx+1))
xmlRotRect = ET.SubElement(xmlSpace, "rotatedRect")
ET.SubElement(xmlRotRect, "center", x=str(rotRect[0][0]),y=str(rotRect[0][1]))
ET.SubElement(xmlRotRect, "size", w=str(rotRect[2]),h=str(rotRect[3]))
ET.SubElement(xmlRotRect, "angle", d=str(rotRect[1]))
xmlContour = ET.SubElement(xmlSpace, "contour")
for point in rect:
ET.SubElement(xmlContour, "point", x=str(point[0]),y=str(point[1]))
print('Saving to ' + xmlPath)
xmlString = ET.tostring(xmlParking)
xmlDom = xml.dom.minidom.parseString(xmlString)
prettyXmlString = xmlDom.toprettyxml(indent=" ")
fp = open(xmlPath,'w')
fp.write(prettyXmlString)
|
# coding=utf-8
"""
Binary class deconstruct, reconstruct packet
"""
import copy
class Binary(object):
@staticmethod
def deconstruct_packet(packet):
"""
Replaces every bytearray in packet with a numbered placeholder.
:param packet:
:return: dict with packet and list of buffers
"""
buffers = []
packet_data = packet.get('data', None)
def _deconstruct_packet(data):
if type(data) is bytearray:
place_holder = {
'_placeholder': True,
'num': len(buffers)
}
buffers.append(data)
return place_holder
if type(data) is list:
new_data = []
for d in data:
new_data.append(_deconstruct_packet(d))
return new_data
if type(data) is dict:
new_data = {}
for k, v in data.items():
new_data[k] = _deconstruct_packet(v)
return new_data
return data
pack = copy.copy(packet)
pack['data'] = _deconstruct_packet(packet_data)
pack['attachments'] = len(buffers)
return {
'packet': pack,
'buffers': buffers
}
@staticmethod
def reconstruct_packet(packet, buffers):
def _reconstruct_packet(data):
if type(data) is dict:
if '_placeholder' in data:
buf = buffers[data['num']]
return buf
else:
for k, v in data.items():
data[k] = _reconstruct_packet(v)
| return data
if type(data) is list:
for i in xrange(len(data)):
data[i] = _reconstruct_packet(data[i])
return data
return data
packet['data'] = _reconstruct_packet(packet['data'])
| del packet['attachments']
return packet
@staticmethod
def remove_blobs(data):
def _remove_blobs(obj, cur_key=None, containing_obj=None):
if not obj:
return obj
try:
# Try to read it as a file
buf = bytearray(obj.read())
if containing_obj is not None and cur_key is not None:
containing_obj[cur_key] = buf
else:
return buf
except AttributeError:
pass
if type(obj) is list:
for index, item in enumerate(obj):
_remove_blobs(item, index, obj)
if type(obj) is dict:
for k, v in obj.items():
_remove_blobs(v, k, obj)
return obj
blobless_data = _remove_blobs(data)
return blobless_data
|
e(self, buddy, text):
self.emit('msg_buddy', buddy, text)
def update_turn(self):
self.set_sensitive(self.current_player == self.myself)
self.emit('change-turn', self.current_player)
def change_turn(self):
if len(self.players) <= 1:
self.current_player = self.players[0]
if self.current_player is None:
self.current_player = self.players[0]
elif self.current_player == self.players[-1]:
self.current_player = self.players[0]
else:
next_player = self.players.index(self.current_player) + 1
self.current_player = self.players[next_player]
self.update_turn()
def card_flipped(self, widget, identifier, signal=False):
self.model.count = self.model.count + 1
# Check if is my turn
if (not self.sentitive and not signal) or \
self.last_flipped == identifier:
return
# Handle groups if needed
if self.model.data.get('divided') == | '1':
if self.last_flipped == -1 and identifier \
>= (len(self.model.grid) // 2):
return
if self.last_flipped != -1 and identifier \
< (len(self.model.grid) // 2):
return
# do not process flips when flipping back
if self.flip_block:
return
else:
self.flip_block = True
self.model.data['running'] = 'True'
def flip_card(full_anim | ation):
self.emit('flip-card', identifier, full_animation)
if not signal:
self.emit('flip-card-signal', identifier)
snd = self.model.grid[identifier].get('snd', None)
if snd is not None:
sound_file = join(self.model.data.get('pathsnd'), snd)
if self._audio_play_finished_id != 0:
self.audio.disconnect(self._audio_play_finished_id)
self.audio.play(sound_file)
# First card case
if self.last_flipped == -1:
flip_card(full_animation=True)
self.last_flipped = identifier
self.model.grid[identifier]['state'] = '1'
self.flip_block = False
# Second card case
else:
# Pair matched
pair_key_1 = self.model.grid[self.last_flipped]['pairkey']
pair_key_2 = self.model.grid[identifier]['pairkey']
if pair_key_1 == pair_key_2:
if not signal:
self.emit('flip-card-signal', identifier)
stroke_color, fill_color = \
self.current_player.props.color.split(',')
self.emit('set-border', identifier, stroke_color, fill_color)
self.emit('set-border', self.last_flipped,
stroke_color, fill_color)
self.increase_point(self.current_player)
self.model.grid[identifier]['state'] = \
self.current_player.props.color
self.model.grid[self.last_flipped]['state'] = \
self.current_player.props.color
self.flip_block = False
self.emit('cement-card', identifier)
self.emit('cement-card', self.last_flipped)
# Pair didn't match
else:
flip_card(full_animation=True)
self.model.grid[identifier]['state'] = '1'
self.set_sensitive(False)
self._flop_cards = (identifier, self.last_flipped)
self._flop_card_timeout = GLib.timeout_add(
FLOP_BACK_TIMEOUT,
self.flop_card, identifier, self.last_flipped)
self.last_flipped = -1
def flop_card(self, identifier, identifier2):
self._flop_card_timeout = -1
self._flop_cards = None
self.emit('flop-card', identifier)
self.model.grid[identifier]['state'] = '0'
self.emit('flop-card', identifier2)
self.model.grid[identifier2]['state'] = '0'
# if self.model.data['divided'] == '1':
# self.card_highlighted(widget, -1, False)
self.set_sensitive(True)
self.flip_block = False
self.change_turn()
def card_highlighted(self, widget, identifier, mouse):
self.emit('highlight-card', self.last_highlight, False)
self.last_highlight = identifier
if identifier == -1 or not self.sentitive:
return
if self.model.data['divided'] == '1':
if self.last_flipped == -1 and identifier \
>= (len(self.model.grid) // 2):
return
if self.last_flipped != -1 and identifier \
< (len(self.model.grid) // 2):
return
if mouse and self.model.grid[identifier]['state'] == '0' or not mouse:
self.emit('highlight-card', identifier, True)
def increase_point(self, buddy, inc=1):
self.players_score[buddy] += inc
for i_ in range(inc):
self.emit('increase-score', buddy)
def get_grid(self):
return self.model.grid
def collect_data(self):
for player, score in list(self.players_score.items()):
index = self.players.index(player)
score = self.players_score[player]
self.model.data[str(index)] = str(score)
return self.model.data
def change_game(self, widget, game_name, size, mode,
title=None, color=None):
if mode in ['file', 'demo']:
logging.debug('change_game set is_demo mode %s', mode)
self.model.is_demo = (mode == 'demo')
if self.model.read(game_name) != 0:
logging.error(' Reading setup file %s', game_name)
return
if mode == 'art4apps':
# NOTE: i am using the same variables from the signal
# to avoid addding more code
category = game_name
language = title
color = None
title = None
self.model.is_demo = True
self.model.read_art4apps(category, language)
if size is None:
size = int(self.model.data['size'])
self.model.def_grid(size)
if title is not None:
self.model.data['title'] = title
if color is not None:
self.model.data['color'] = color
self.load_remote(self.model.grid, self.model.data, mode, False)
def reset_game(self, size=None):
if size is None:
size = int(self.model.data['size'])
self.model.count = 0
self.model.def_grid(size)
self.load_remote(self.model.grid, self.model.data,
self.model.data['mode'], False)
def set_load_mode(self, msg):
self.emit('load_mode', msg)
def set_sensitive(self, status):
self.sentitive = status
if not status:
self.emit('highlight-card', self.last_highlight, False)
def get_sensitive(self):
return self.sentitive
def get_current_player(self):
return self.current_player
def get_players_data(self):
data = []
for player, score in list(self.players_score.items()):
data.append([player.props.key, player.props.nick,
player.props.color, score])
return data
def set_wait_list(self, wait_list):
self.waiting_players = wait_list
for w in wait_list:
for p in self.players:
if w[0] == p.props.key:
list.remove(w)
for i_ in range(w[3]):
self.increase_point(p)
def set_myself(self, buddy):
self.myself = buddy
def add_to_waiting_list(self, buddy):
self.players.remove(buddy)
self.waiting_players.append(buddy)
self.emit('wait_mode_buddy', buddy, True)
def rem_to_waiting_list(self, buddy):
self.waiting_players.remove(buddy)
self.players.append(buddy)
self.emit('wait_mode_buddy', buddy, False)
def load_waiting_list(self, wait_lis |
from distutils.core import setup
setup(
name='jenkins_cli_tool',
version='3.1',
packages=['cli', 'cli.startjob', 'cli.startAndMonitor', 'tests'],
url='https://github.com/hermco/jenkins_cli_tool',
license='MIT',
author='chermet',
author_email='chermet@axway.com',
de | scription='CLI tool for Jen | kins',
install_requires=[
'click',
'python-jenkins'
],
entry_points={
'console_scripts': [
'jenkins-cli-tool = cli.cli:entry_point'
]
}
)
|
#!/usr/bin/env python3
'''
* Copyright 2015 by Benjamin J. Land (a.k.a. BenLand100)
*
* This file is part of chatlogs.
*
* chatlogs is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* chatlogs is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* | GNU General Public License for more details.
*
* You should have received a copy of the GNU Genera | l Public License
* along with chatlogs. If not, see <http://www.gnu.org/licenses/>.
'''
import collections
import nltk
import string
import tools
import json
import sys
import re
import multiprocessing
import enchant
if len(sys.argv) < 5:
print('./wordprofile.py database num splittype maxlen src+')
print('\tsplittype can be one of: nltk, regex, respell')
sys.exit(1)
db = tools.database(sys.argv[1])
maxlen = int(sys.argv[4])
query = ' OR '.join(['src LIKE ?' for i in range(len(sys.argv)-5)])
args = tuple(sys.argv[5:])
words = collections.Counter()
if sys.argv[3] == 'nltk':
for i in db.get_iter(query,args):
thin = ' '.join([x.lower() for x in i.msg.split(' ') if len(x) <= maxlen])
words.update(nltk.word_tokenize(thin))
elif sys.argv[3] == 'regex':
wordtokregex = re.compile('([\w'']+|[\:\=][^ ])')
for i in db.get_iter(query,args):
thin = ' '.join([x.lower() for x in i.msg.split(' ') if len(x) <= maxlen])
words.update([word for word in wordtokregex.findall(thin)])
elif sys.argv[3][0:7] == 'respell':
try:
maxdist = int(sys.argv[3].split(':')[1])
except:
maxdist = 0
wordtokregex = re.compile('([\w\']+|[\:\=][^ ])')
sgst = tools.suggester(maxdist)
for i in db.get_iter(query,args):
parts = ' '.join([x.upper() for x in i.msg.split(' ') if len(x) <= maxlen])
parts = [word for word in wordtokregex.findall(parts)]
parts = [sgst.suggest(word) for word in parts]
words.update([word for word in parts if word])
print('"---total---"',',',sum(words.values()))
print('"---unique---"',',',len(set(words)))
[print('"'+word[0]+'",',word[1]) for word in words.most_common(int(sys.argv[2]))]
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# Copyright (C) Gabriel Potter <gabriel@potter.fr>
# This program is published under a GPLv2 license
"""
Python 2 and 3 link classes.
"""
from __future__ import absolute_import
import base64
import binascii
import gzip
import struct
import sys
import scapy.modules.six as six
###########
# Python3 #
###########
def lambda_tuple_converter(func):
"""
Converts a Python 2 function as
lambda (x,y): x + y
In the Python 3 format:
lambda x,y : x + y
"""
if func is not None and func.__code__.co_argcount == 1:
return lambda *args: func(args[0] if len(args) == 1 else args)
else:
return func
if six.PY2:
bytes_encode = plain_str = str
chb = lambda x: x if isinstance(x, str) else chr(x)
orb = ord
def raw(x):
"""Builds a packet and returns its bytes representation.
This function is and always be cross-version compatible"""
if hasattr(x, "__bytes__"):
return x.__bytes__()
return bytes(x)
else:
def raw(x):
"""Builds a packet and returns its bytes representation.
This function is and always be cross-version compatible"""
return bytes(x)
def bytes_encode(x):
""" | Ensure that the given object is bytes.
If the parameter is a packe | t, raw() should be preferred.
"""
if isinstance(x, str):
return x.encode()
return bytes(x)
if sys.version_info[0:2] <= (3, 4):
def plain_str(x):
"""Convert basic byte objects to str"""
if isinstance(x, bytes):
return x.decode(errors="ignore")
return str(x)
else:
# Python 3.5+
def plain_str(x):
"""Convert basic byte objects to str"""
if isinstance(x, bytes):
return x.decode(errors="backslashreplace")
return str(x)
def chb(x):
"""Same than chr() but encode as bytes."""
return struct.pack("!B", x)
def orb(x):
"""Return ord(x) when not already an int."""
if isinstance(x, int):
return x
return ord(x)
def bytes_hex(x):
"""Hexify a str or a bytes object"""
return binascii.b2a_hex(bytes_encode(x))
def hex_bytes(x):
"""De-hexify a str or a byte object"""
return binascii.a2b_hex(bytes_encode(x))
def base64_bytes(x):
"""Turn base64 into bytes"""
if six.PY2:
return base64.decodestring(x)
return base64.decodebytes(bytes_encode(x))
def bytes_base64(x):
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '')
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'')
if six.PY2:
from StringIO import StringIO
def gzip_decompress(x):
"""Decompress using gzip"""
with gzip.GzipFile(fileobj=StringIO(x), mode='rb') as fdesc:
return fdesc.read()
def gzip_compress(x):
"""Compress using gzip"""
buf = StringIO()
with gzip.GzipFile(fileobj=buf, mode='wb') as fdesc:
fdesc.write(x)
return buf.getvalue()
else:
gzip_decompress = gzip.decompress
gzip_compress = gzip.compress
# Typing compatibility
try:
# Only required if using mypy-lang for static typing
from typing import Optional, List, Union, Callable, Any, AnyStr, Tuple, \
Sized, Dict, Pattern, cast
except ImportError:
# Let's make some fake ones.
def cast(_type, obj):
return obj
class _FakeType(object):
# make the objects subscriptable indefinetly
def __getitem__(self, item):
return _FakeType()
Optional = _FakeType()
Union = _FakeType()
Callable = _FakeType()
List = _FakeType()
Dict = _FakeType()
Any = _FakeType()
AnyStr = _FakeType()
Tuple = _FakeType()
Pattern = _FakeType()
class Sized(object):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.