prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#################################################################
# This file is part of glyr
# + a command-line tool and library to download various sort of music related metadata.
# + Copyright (C) [2011-2012] [Christopher Pahl]
# + Hosted at: https://github.com/sahib/glyr
#
# glyr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# glyr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with glyr. If not, see <http://www.gnu.org/licenses/>.
#################################################################
#!/usr/bin/env python
# encoding: utf-8
from tests.__common__ import *
not_found_options = {
'get_type': 'artistphoto',
'artist': 'HorseTheBand',
'album': 'Given, but not used.',
'title': 'Accidentally given' |
}
TESTCASES = [{
# {{{
'name': 'bbcmusic',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'The Rolling Stones'
},
'expect': len_greater_0
}, {
'options': not_foun | d_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'discogs',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Nirvana'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'flickr',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Die Ärzte'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'google',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'DeVildRiVeR'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'lastfm',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Alestorm'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'singerpictures',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Equilibrium'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'rhapsody',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'In Flames'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}
]
|
a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness():
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness():
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness():
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersio | n = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize | (self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_ |
import os
import sys
def main():
if len(sys.argv) <= 2:
print("This script generates the .expected file from your PS3's debug logs.")
print("")
print("Usage: convert-ps3-output.py <input> <output>")
print("Example: convert-ps3-output.py hello_world.log hello_world.expected")
return False
#Parse and check arguments
inputFile = sys.argv[1]
outputFile = sys.argv[2]
if not os.path.isfile(inputFile):
print("[!] Input file does not exist")
return False
f = open(inputFile, 'rb')
w = open(outputFile, 'wb')
data = f.read()
data = data[data.find(b"/app_home/" | ):]
| data = data[data.find(b"\x0D\x0A")+2:]
data = data[:data.rindex(b"END LOG")-12]
data = data.replace(b"\x0D\x0A", b"\x0A")
w.write(data)
w.close()
if __name__ == "__main__":
main()
|
merged_tally.add_score(score)
# Add triggers from second tally to merged tally
for trigger in tally.triggers:
merged_tally.add_trigger(trigger)
return merged_tally
def get_tally_xml(self):
"""Return XML representation of the tally
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing tally data
"""
element = ET.Element("tally")
# Tally ID
element.set("id", str(self.id))
# Optional Tally name
if self.name != '':
element.set("name", self.name)
# Optional Tally filters
for filter in self.filters:
subelement = ET.SubElement(element, "filter")
subelement.set("type", str(filter.type))
if filter.bins is not None:
bins = ''
for bin in filter.bins:
bins += '{0} '.format(bin)
subelement.set("bins", bins.rstrip(' '))
# Optional Nuclides
if len(self.nuclides) > 0:
nuclides = ''
for nuclide in self.nuclides:
if isinstance(nuclide, Nuclide):
nuclides += '{0} '.format(nuclide.name)
else:
nuclides += '{0} '.format(nuclide)
subelement = ET.SubElement(element, "nuclides")
subelement.text = nuclides.rstrip(' ')
# Scores
if len(self.scores) == 0:
msg = 'Unable to get XML for Tally ID="{0}" since it does not ' \
'contain any scores'.format(self.id)
raise ValueError(msg)
else:
scores = ''
for score in self.scores:
scores += '{0} '.format(score)
subelement = ET.SubElement(element, "scores")
subelement.text = scores.rstrip(' ')
# Tally estimator type
if self.estimator is not None:
subelement = ET.SubElement(element, "estimator")
subelement.text = self.estimator
# Optional Triggers
for trigger in self.triggers:
trigger.get_trigger_xml(element)
return element
def find_filter(self, filter_type):
"""Return a filter in the tally that matches a specified type
Parameters
----------
filter_type : str
Type of the filter, e.g. 'mesh'
Returns
-------
filter : openmc.filter.Filter
Filter from this tally with matching type, or None if no matching
Filter is found
Raises
------
ValueError
If no matching Filter is found
"""
filter = None
# Look through all of this Tally's Filters for the type requested
for test_filter in self.filters:
if test_filter.type == filter_type:
filter = test_filter
break
# If we did not find the Filter, throw an Exception
if filter is None:
msg = 'Unable to find filter type "{0}" in ' \
'Tally ID="{1}"'.format(filter_type, self.id)
raise ValueError(msg)
return filter
def get_filter_index(self, filter_type, filter_bin):
"""Returns the index in the Tally's results array for a Filter bin
Parameters
----------
filter_type : str
The type of Filter (e.g., 'cell', 'energy', etc.)
filter_bin : Integral or tuple
The bin is an integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. The bin is an integer for the
cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is a (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
Returns
-------
The index in the Tally data array for this filter bin
"""
# Find the equivalent Filter in this Tally's list of Filters
filter = self.find_filter(filter_type)
# Get the index for the requested bin from the Filter and return it
filter_index = filter.get_bin_index(filter_bin)
return filter_index
def get_nuclide_index(self, nuclide):
"""Returns the index in the Tally's results array for a Nuclide bin
Parameters
----------
nuclide : str
The name of the Nuclide (e.g., 'H-1', 'U-238')
Returns
-------
nuclide_index : int
The index in the Tally data array for this nuclide.
Raises
------
KeyError
When the argument passed to the 'nuclide' parameter cannot be found
in the Tally.
"""
nuclide_index = -1
# Look for the user-requested nuclide in all of the Tally's Nuclides
for i, test_nuclide in enumerate(self.nuclides):
# If the Summary was linked, then values are Nuclide objects
if isinstance(test_nuclide, Nuclide):
if test_nuclide._name == nuclide:
nuclide_index = i
br | eak
# If the Summary has not been linked, then values are ZAIDs
else:
| if test_nuclide == nuclide:
nuclide_index = i
break
if nuclide_index == -1:
msg = 'Unable to get the nuclide index for Tally since "{0}" ' \
'is not one of the nuclides'.format(nuclide)
raise KeyError(msg)
else:
return nuclide_index
def get_score_index(self, score):
"""Returns the index in the Tally's results array for a score bin
Parameters
----------
score : str
The score string (e.g., 'absorption', 'nu-fission')
Returns
-------
score_index : int
The index in the Tally data array for this score.
Raises
------
ValueError
When the argument passed to the 'score' parameter cannot be found in
the Tally.
"""
try:
score_index = self.scores.index(score)
except ValueError:
msg = 'Unable to get the score index for Tally since "{0}" ' \
'is not one of the scores'.format(score)
raise ValueError(msg)
return score_index
def get_filter_indices(self, filters=[], filter_bins=[]):
"""Get indices into the filter axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the filter
axis of the tally's data array (axis=0) for particular combinations
of filters and their corresponding bins.
Parameters
----------
filters : list of str
A list of filter type strings
(e.g., ['mesh', 'energy']; default is [])
filter_bins : list of Iterables
A list of the filter bins corresponding to the filter_types
parameter (e.g., [(1,), (0., 0.625e-6)]; default is []). Each bin
in the list is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. Each bin is an integer for the
cell instance ID for 'distribcell' Filters. Each bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is a (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest. The order of the bins in the list must correspond to the
filter_types parameter.
Returns
-------
ndarray
A NumPy array of the filter indices
"""
cv.check_iterable_type('filters', filters, basestring)
cv.check_iterable_type('filter_bins', filter_bins, tuple)
|
import unittest
import os
import sys
import time
import sqlite3
import os.path as op
from shutil import rmtree
from functools import partial
from kivy.clock import Clock
main_path = op.dirname(op.dirname(op.abspath(__file__)))
sys.path.append(main_path)
from main import KrySA, ErrorPop
from tasks.basic import Basic
class Test(unittest.TestCase):
def pause(*args):
time.sleep(0.000001)
def run_test(self, app, *args):
Clock.schedule_interval(self.pause, 0.000001)
# open New -> Project popup, set inputs
app.root._new_project()
app.root.savedlg.view.selection = [self.folder, ]
app.root.savedlg.ids.name.text = 'Test.krysa'
app.root.savedlg.run([self.folder, ], 'Test.krysa')
project_folder = op.join(self.path, 'test_folder', 'Test')
data = op.join(project_folder, 'data')
results = op.join(project_folder, 'results')
# open New -> Data popup, set inputs
app.root._new_data()
new_data = app.root.wiz_newdata.ids.container.children[0]
new_data.ids.table_name.text = 'NewData'
cols = new_data.ids.columns.children
# set columns for new data
range_vals = range(13)
for _ in range(2):
new_data.ids.columnadd.dispatch('on_release')
cols[0].ids.colname.text += str(len(cols))
cols[0].ids.coltype.text = 'INTEGER'
vals = cols[0].ids.vals.children
for i in range_vals:
cols[0].ids.valadd.dispatch('on_release')
vals[0].ids.value.text = str(i + 1)
new_data.ids.columnadd.dispatch('on_release')
cols[0].ids.colname.text += str(len(cols))
cols[0].ids.coltype.text = 'REAL'
vals = cols | [0].ids.vals.children
for i in range_vals:
cols[0].ids.valadd.dispatch('on_release')
num = str(i + 1)
val | s[0].ids.value.text = num + '.' + num
new_data = app.root.wiz_newdata.run()
# open Task's popup and get task
k = '3'
address = ['A1:D13', 'A1:B2', 'C1:D2',
'A12:B13', 'C12:D13', 'B3:C10']
for addr in address:
taskcls = Basic()
taskcls.basic_small()
children = app.root_window.children
for c in reversed(children):
if 'Task' in str(c):
index = children.index(c)
task = children[index]
# fill the task
body = task.children[0].children[0].children[0].children
body[-1].text = 'NewData'
body[-2].children[0].children[-1].children[0].text = addr
body[-2].children[0].children[-2].children[0].text = k
body[-3].children[0].dispatch('on_release')
# get results and test
expected = reversed([2, 2, 2, 13, 13, 4])
results = app.root.ids.results
skipone = False # if top padding with widget present
for c in results.children:
if 'Widget' in str(c):
skipone = True
break
for i, exp in enumerate(expected):
i = i + 1 if skipone else i
# Result -> Page -> container -> result
result = float(results.children[i].ids.page.children[1].text)
self.assertEqual(result, exp)
app.stop()
def test_tasks_basic_small(self):
self.path = op.dirname(op.abspath(__file__))
if not op.exists(op.join(self.path, 'test_folder')):
os.mkdir(op.join(self.path, 'test_folder'))
else:
rmtree(op.join(self.path, 'test_folder'))
os.mkdir(op.join(self.path, 'test_folder'))
self.folder = op.join(self.path, 'test_folder')
app = KrySA()
p = partial(self.run_test, app)
Clock.schedule_once(p, .000001)
app.run()
rmtree(self.folder)
if __name__ == '__main__':
unittest.main()
|
None:
info.type = value
else:
edge = TransitionGenerator.Edge(incoming=False, role=role, value=value)
info.edges.append(edge)
if value == frame:
edge.inverse = edge
continue
if value.islocal():
nb_info = frame_info.get(value, None)
if nb_info is None:
nb_info = TransitionGenerator.FrameInfo(value)
frame_info[value] = nb_info
nb_edge = TransitionGenerator.Edge(
incoming=True, role=role, value=frame)
nb_info.edges.append(nb_edge)
nb_edge.inverse = edge
edge.inverse = nb_edge
pending.append(value)
# Initialize bookkeeping for all frames pointed to by this frame.
for p in pending:
self._init_info(p, frame_info, initialized)
# Translates 'simple' action to an Action using indices from 'attention'.
def _translate(self, attention, simple):
action = Action(t=simple.action.type)
if simple.action.length is not None:
action.length = simple.action.length
if simple.action.role is not None:
action.role = simple.action.role
if action.type == Action.EVOKE:
action.label = simple.info.type
elif action.type == Action.REFER:
action.target = attention.index(simple.info.handle)
elif action.type == Action.EMBED:
action.label = simple.info.type
action.target = attention.index(simple.other_info.handle)
elif action.type == Action.ELABORATE:
action.label = simple.info.type
action.source = attention.index(simple.other_info.handle)
elif action.type == Action.CONNECT:
action.source = attention.index(simple.info.handle)
action.target = attention.index(simple.other_info.handle)
elif action.type == Action.ASSIGN:
action.source = attention.index(simple.info.handle)
action.label = simple.action.label
return action
# Updates frame indices in 'attention' as a result of the action 'simple'.
def _update(self, attention, simple):
t = simple.action.type
if t in [Action.EVOKE, Action.EMBED, Action.ELABORATE]:
# Insert a new frame at the center of attention | .
attention.insert(0, simple.info.handle)
elif t in [Action.REFER, Action.ASSIGN, Action.CONNECT]:
# Promote an existing frame to the | center of attention.
attention.remove(simple.info.handle)
attention.insert(0, simple.info.handle)
# Builds and returns a simple action of type 'type'.
def _simple_action(self, type=None):
return TransitionGenerator.SimpleAction(type)
# Stores mentions starting or ending or both at a given token.
class TokenToMentions:
def __init__(self):
self.starting = []
self.ending = []
self.singletons = []
# Record 'mention' at starting at this token.
def start(self, mention):
if len(self.starting) > 0:
# Check that the mention respects nesting.
assert self.starting[-1].end >= mention.end
self.starting.append(mention)
# Record 'mention' as ending at this token.
def end(self, mention):
if len(self.ending) > 0:
# Check that the mention respects nesting.
assert self.ending[0].begin <= mention.begin
self.ending.insert(0, mention) # most-nested is at the front
# Record 'mention' as starting and ending at this token.
def singleton(self, mention):
self.singletons.append(mention)
# Returns if there are no mentions starting/ending at this token.
def empty(self):
return len(self.starting) + len(self.ending) + len(self.singletons) == 0
# Returns a string representation of the object.
def __repr__(self):
return "Starting:" + str(self.starting) + ", Ending:" + \
str(self.ending) + ", Singletons:" + str(self.singletons)
# Generates transition sequence for 'document' which should be an instance of
# AnnotatedDocument.
def generate(self, document):
frame_info = {} # frame -> whether it is evoked from a span
initialized = {} # frame -> whether the frame's book-keeping is done
# Initialize book-keeping for all evoked frames.
for m in document.mentions:
for evoked in m.evokes():
self._init_info(evoked, frame_info, initialized)
frame_info[evoked].from_mention = True
# Initialize book-keeping for all thematic frames.
for theme in document.themes:
self._init_info(theme, frame_info, initialized)
# Record start/end boundaries of all mentions.
token_to_mentions = []
for _ in range(len(document.tokens)):
token_to_mentions.append(TransitionGenerator.TokenToMentions())
for m in document.mentions:
if m.length == 1:
token_to_mentions[m.begin].singleton(m)
else:
token_to_mentions[m.begin].start(m)
token_to_mentions[m.end - 1].end(m)
# Single token mentions are handled via EVOKE(length=1), and others
# are handled via MARK at the beginning token and EVOKE(length=None)
# at the end token.
simple_actions = []
marked = {} # frames for which we have output a MARK
evoked = {} # frames for which we have output an EVOKE
for index in range(len(document.tokens)):
t2m = token_to_mentions[index]
# First evoke/refer the singleton mentions.
for singleton in t2m.singletons:
for frame in singleton.evokes():
# If the frame is already evoked, refer to it.
if frame in marked:
assert frame in evoked, "Referring to marked but not evoked frame"
if frame in evoked:
refer = self._simple_action(Action.REFER)
refer.info = frame_info[frame]
refer.action.length = singleton.length # should be 1
simple_actions.append(refer)
continue
# Otherwise evoke a new frame.
evoke = self._simple_action(Action.EVOKE)
evoke.action.length = singleton.length # should be 1
evoke.info = frame_info[frame]
evoke.action.label = evoke.info.type
simple_actions.append(evoke)
marked[frame] = True
evoked[frame] = True
# Output EVOKE for any frames whose spans end here.
for mention in t2m.ending:
assert mention.length > 1, mention.length # singletons already handled
for frame in mention.evokes():
assert frame in marked # frame should be already MARKed
if frame in evoked:
# Already handled via REFER at mention.begin.
continue
evoke = self._simple_action(Action.EVOKE)
evoke.info = frame_info[frame]
evoke.action.label = evoke.info.type
simple_actions.append(evoke)
evoked[frame] = True
# Output MARK for any frames whose spans begin here.
for mention in t2m.starting:
assert mention.length > 1, mention.length # singletons already handled
for frame in mention.evokes():
# Check if this is a fresh frame or a refer.
if frame in marked:
assert frame in evoked, "Referring to marked but not evoked frame"
if frame in evoked:
refer = self._simple_action(Action.REFER)
refer.info = frame_info[frame]
refer.action.length = mention.length
simple_actions.append(refer)
continue
mark = self._simple_action(Action.MARK)
mark.info = frame_info[frame]
simple_actions.append(mark)
marked[frame] = True
# Move to the next token.
simple_actions.append(self._simple_action(Action.SHIFT))
simple_actions.append(self._simple_action(Action.STOP))
# Recursively output more actions (e.g. CONNECT, EMBED, ELABORATE, ASSIGN)
# from the current set of EVOKE/REFER actions. Then translate each
# action using final attention indices. This is done in reverse order
# for convenience.
simple_actions.reverse()
actions = []
attention = []
while len(simple_actions) > 0:
simple_action = simple_actions.pop()
actions.append(self._translate(attention, simple_action))
self._update(attention, simple |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You | should have received a copy of the GN | U Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class BppSuite(CMakePackage):
"""BppSuite is a suite of ready-to-use programs for phylogenetic and
sequence analysis."""
homepage = "http://biopp.univ-montp2.fr/wiki/index.php/BppSuite"
url = "http://biopp.univ-montp2.fr/repos/sources/bppsuite/bppsuite-2.2.0.tar.gz"
version('2.2.0', 'd8b29ad7ccf5bd3a7beb701350c9e2a4')
depends_on('cmake@2.6:', type='build')
depends_on('texinfo', type='build')
depends_on('bpp-core')
depends_on('bpp-seq')
depends_on('bpp-phyl')
|
main_id': DEFAULT_DOMAIN_ID}
new_user = self.identity_api.create_user(new_user)
self.assertRaises(exception.NotFound,
self.identity_api.check_user_in_group,
new_user['id'],
new_group['id'])
def test_check_user_in_group_404(self):
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID}
new_user = self.identity_api.create_user(new_user)
new_group = {
'domain_id': DEFAULT_DOMAIN_ID,
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
self.assertRaises(exception.UserNotFound,
self.identity_api.check_user_in_group,
uuid.uuid4().hex,
new_group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.check_user_in_group,
new_user['id'],
uuid.uuid4().hex)
self.assertRaises(exception.NotFound,
self.identity_api.check_user_in_group,
uuid.uuid4().hex,
uuid.uuid4().hex)
def test_list_users_in_group(self):
domain = self._get_domain_fixture()
new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
# Make sure we get an empty list back on a new group, not an error.
user_refs = self.identity_api.list_users_in_group(new_group['id'])
self.assertEqual([], user_refs)
# Make sure we get the correct users back once they have been added
# to the group.
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
user_refs = self.identity_api.list_users_in_group(new_group['id'])
found = False
for x in user_refs:
if (x['id'] == new_user['id']):
found = True
self.assertNotIn('password', x)
self.assertTrue(found)
def test_list_users_in_group_404(self):
self.assertRaises(exception.GroupNotFound,
self.identity_api.list_users_in_group,
uuid.uuid4().hex)
def test_list_groups_for_user(self):
domain = self._get_domain_fixture()
test_groups = []
test_users = []
GROUP_COUNT = 3
USER_COUNT = 2
for x in range(0, USER_COUNT):
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
test_users.append(new_user)
positive_user = test_users[0]
negative_user = test_users[1]
for x in range(0, USER_COUNT):
group_refs = self.identity_api.l | ist_groups_for_user(
test_users[x]['id'])
self.assertEqual(0, len(group_refs))
for x in range(0, GROUP_COUNT):
before_count = x
after_count = x + 1
new_group = {'domain_id': domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
t | est_groups.append(new_group)
# add the user to the group and ensure that the
# group count increases by one for each
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(before_count, len(group_refs))
self.identity_api.add_user_to_group(
positive_user['id'],
new_group['id'])
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(after_count, len(group_refs))
# Make sure the group count for the unrelated user did not change
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(0, len(group_refs))
# remove the user from each group and ensure that
# the group count reduces by one for each
for x in range(0, 3):
before_count = GROUP_COUNT - x
after_count = GROUP_COUNT - x - 1
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(before_count, len(group_refs))
self.identity_api.remove_user_from_group(
positive_user['id'],
test_groups[x]['id'])
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEqual(after_count, len(group_refs))
# Make sure the group count for the unrelated user
# did not change
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEqual(0, len(group_refs))
def test_remove_user_from_group(self):
domain = self._get_domain_fixture()
new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
groups = self.identity_api.list_groups_for_user(new_user['id'])
self.assertIn(new_group['id'], [x['id'] for x in groups])
self.identity_api.remove_user_from_group(new_user['id'],
new_group['id'])
groups = self.identity_api.list_groups_for_user(new_user['id'])
self.assertNotIn(new_group['id'], [x['id'] for x in groups])
def test_remove_user_from_group_404(self):
domain = self._get_domain_fixture()
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
new_group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
self.assertRaises(exception.GroupNotFound,
self.identity_api.remove_user_from_group,
new_user['id'],
uuid.uuid4().hex)
self.assertRaises(exception.UserNotFound,
self.identity_api.remove_user_from_group,
uuid.uuid4().hex,
new_group['id'])
self.assertRaises(exception.NotFound,
self.identity_api.remove_user_from_group,
uuid.uuid4().hex,
uuid.uuid4().hex)
def test_group_crud(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain['id'], domain)
group = {'domain_id': domain['id'], 'name': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictContainsSubset(group, group_ref)
group['name'] = uuid.uuid4().hex
self.identity_api.update_group(group['id'], group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictContainsSubset(group, group_ref)
self.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group['id'])
def t |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, | or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even t | he implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Module for managing Linux firewall feature from Python, using sudo to run
ipfw program.
NOTE: this only works on Linux with firewall option enabled in the kernel.
"""
import sudo
#import socketlib
def port_forward(srcport, destport, rule=None):
"""Use firewall rule to forward a TCP port to a different port. Useful for
redirecting privileged ports to non-privileged ports. """
return NotImplemented
def add(rule, action):
return NotImplemented
def delete(rule):
return NotImplemented
def flush():
return NotImplemented
# XXX some day make this complete... :-)
class Firewall(object):
def read(self):
"""Read current rule set."""
return NotImplemented
class IPChains(object):
pass
if __name__ == "__main__":
pass
|
#requirements: selenium wget python2.7 Geckodriver
import time
import sys
import wget
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
def googlescrape(str):
browser = webdriver.Chrome()
browser.get(url)
time.sleep(3) # sleep for 5 seconds so you can see the results
results = browser.find_elements_by_css_selector('div.g')
if len(results) == 0:
print "No results found"
browser.quit()
else:
for x in range(0,len(results)):
link = results[x].find_element_by_tag_name("a")
href = link.get_attribute("href")
print href
wget.download(href)
browser.quit()
return
if len(sys.argv) == 3:
domain = sys.argv[1]
ftype = sys | .argv[2]
url = "https://www.google.com/search?num=100&start=0&hl=em&meta=&q=site:"
url += domain
url += "+filetype:"
url += ftype
url += "&filter=0"
googlescrape(url)
elif len(sys.argv) == 2:
for i in range (0,3):
if i==0:
print "Checking for pdfs..."
ftype = "pdf"
elif i | == 1:
print "Checking for docs..."
ftype = "doc"
elif i == 2:
print "Checking for xls..."
ftype = "xls"
domain = sys.argv[1]
url = "https://www.google.com/search?num=100&start=0&hl=em&meta=&q=site:"
url += domain
url += "+filetype:"
url += ftype
url += "&filter=0"
googlescrape(url)
else:
print "Error: Improper number of arguments. Usage: python search.py domain.com pdf"
sys.exit()
|
est.TestCase):
def test_parser(self):
"""Test parser selection.
Test to see whether we are selecting the correct parser.
"""
tx = BytesIO(open(
os.path.join(BASENAME, 'resources', 'tx-9c0f7b2.dmp'),
'r'
).read())
connection = network.Connection(None, ('host', 8333))
message = connection.parse_message('tx', tx)
self.assertEqual('tx', message.type)
self.assertIsInstance(message, messages.TxPacket)
def test_misc(self):
nc = network.NetworkClient()
self.assertRaises(NotImplementedError, nc.run_forever)
v = mock.Mock()
c = mock.Mock()
nc.handle_version(c, v)
self.assertEquals(v.version, c.version)
class TestConnection(unittest.TestCase):
def test_misc(self):
c = network.Connection(None, ('127.0.0.1', 8333))
self.assertRaises(NotImplementedError, c.disconnect)
def testConnectDisconnect(self):
nc = network.NetworkClient()
nc.connection_class = mock.MagicMock()
conn = nc.connect(('1.2.3.4', 8333))
nc.connect(('1.2.3.4', 8334))
nc.connect(('1.2.3.5', 8333))
self.assertRaises(ValueError, nc.connect, ('1.2.3.4', 8333))
self.assertTrue(('1.2.3.4', 8333) in nc.connections)
self.assertEqual(len(nc.connections), 3)
self.assertFalse(conn.disconnect.called)
nc.disconnect(('1.2.3.4', 8333))
self.assertTrue(conn.disconnect.called)
self.assertTrue(('1.2.3.4', 8333) not in nc.connections)
self.assertEqual(len(nc.connections), 2)
self.assertRaises(ValueError, nc.disconnect, ('1.2.3.4', 8333))
self.assertEqual(len(nc.connections), 2)
def test_roundtrip(self):
""" Do a full roundtrip of the network stack.
Use Connection to serialize a message and GeventConnection to
deserialize it again.
"""
network_client = mock.MagicMock()
connection = network.GeventConnection(network_client,
('127.0.0.1', 8333),
False)
connection.socket = mock.Mock()
p = messages.GetDataPacket()
connection.send(p.type, p)
wire = BytesIO(connection.socket.send.call_args[0][0])
def recv(n):
return wire.read(n)
connection.socket.recv = recv
message = connection.read_message()
self.assertTrue(isinstance(message, messages.GetDataPacket))
# This should produce a short read
wire = BytesIO(connection.socket.send.call_args[0][0][:-2])
self.assertRaises(ValueError, connection.read_message)
# This will raise a non-matching magic error
wire = BytesIO("BEEF" + connection.socket.send.call_args[0][0][4:])
self.assertRaises(ValueError, connection.read_message)
class TestGeventNetworkClient(unittest.TestCase):
def test_init(self):
network.GeventNetworkClient()
@mock.patch('bitcoin.network.gevent')
def test_connect(self, mgevent):
nc = network.GeventNetworkClient()
conn = nc.connect(('10.0.0.1', 8333))
self.assertTrue(conn)
self.assertTrue(mgevent.spawn.called)
def test_run_forever(self):
nc = network.GeventNetworkClient()
nc.connection_group = mock.Mock()
nc.run_forever()
self.assertTrue(nc.connection_group.join.called)
@mock.patch('bitcoin.network.socket')
def test_listen(self, msocket):
nc = network.GeventNetworkClient()
group_size = len(nc.connection_group)
nc.listen()
self.assertTrue(nc.socket.bind.called)
self.assertTrue(nc.socket.listen.called)
self.assertEquals(len(nc.connection_group), group_size + 1)
@mock.patch('bitcoin.network.gevent.spawn_later')
@mock.patch('bitcoin.network.gevent.spawn')
def test_accept(self, mspawn, mspawn_later):
nc = network.GeventNetworkClient()
nc.socket = mock.Mock()
connection_handler = mock.Mock()
nc | .register_handler(
network.ConnectionEstablishedEvent.type,
connection_handler
)
nc.socket.accept = mock.Mock(side_effect=[
(mock.Mock(), ('10.0.0.1', 8333)),
StopIteration()
])
self.assertRaises(StopIteration, nc.accept)
self.assertTrue(connection_handler.called)
@mock.patch('bitcoin.network.gevent.spawn_later')
@mock.pa | tch('bitcoin.network.gevent.spawn')
def test_accept_idle_timeout(self, mspawn, mspawn_later):
nc = network.GeventNetworkClient()
nc.socket = mock.Mock()
connection_handler = mock.Mock()
nc.register_handler(
network.ConnectionLostEvent.type,
connection_handler
)
nc.socket.accept = mock.Mock(side_effect=[
(mock.Mock(), ('10.0.0.1', 8333)),
StopIteration()
])
def spawn_later(t, callable, *args, **kwargs):
callable(*args, **kwargs)
# Wire the idle timeout handler to be called immediately
mspawn_later.side_effect = spawn_later
self.assertRaises(StopIteration, nc.accept)
self.assertEquals(len(nc.connections), 0)
self.assertTrue(connection_handler.called)
class TestUtil(unittest.TestCase):
def test_bootstrap(self):
res = network.bootstrap()
self.assertTrue(res)
@patch('bitcoin.network.socket.getaddrinfo')
def test_bootstrap_fail(self, getaddrinfo):
""" socket.getaddrinfo may return None. """
def side_effect(a, b):
if a == network.DNS_SEEDS[0]:
raise socket.gaierror()
else:
return [(2, 2, 17, '', ('68.48.214.241', 0))]
getaddrinfo.side_effect = side_effect
res = network.bootstrap()
self.assertListEqual(res, [('68.48.214.241', 8333)])
class TestBehavior(unittest.TestCase):
def setUp(self):
self.network_client = mock.Mock()
self.network_client.bytes_sent = 0
self.connection = mock.Mock(incoming=False, host=('127.0.0.1', 8333))
def test_client_behavior_init(self):
network.ClientBehavior(self.network_client)
args = self.network_client.register_handler.call_args_list
types = [a[0][0] for a in args]
# Ensure we have at least handlers for the connection established event
# and an incoming version message
self.assertTrue(network.ConnectionEstablishedEvent.type in types)
self.assertTrue(messages.VersionPacket.type in types)
def test_client_behavior_on_connect(self):
b = network.ClientBehavior(self.network_client)
message = mock.Mock(type=network.ConnectionEstablishedEvent.type)
# We should not send anything on new incoming connections
self.connection.incoming = True
b.on_connect(self.connection, message)
self.assertFalse(self.connection.send.called)
# On outgoing connections we initiate the handshake
self.connection.incoming = False
b.on_connect(self.connection, message)
self.assertTrue(self.connection.send.called)
def test_client_behavior_send_verack(self):
b = network.ClientBehavior(self.network_client)
b.send_verack(self.connection)
self.connection.send.assert_called_with('verack', '')
self.assertEquals(self.connection.send.call_count, 1)
def test_client_behavior_on_version(self):
b = network.ClientBehavior(self.network_client)
b.send_version = mock.Mock()
b.send_verack = mock.Mock()
# This is an outgoing connection, so we should send just one verack
self.connection.incoming = False
b.on_version(self.connection, mock.Mock())
self.assertFalse(b.send_version.called)
self.assertEquals(b.send_verack.call_count, 1)
# Now on an incoming connection, we also respond with a version
self.connection.incoming = True
b.send_verack.reset()
b.on_version(self.connection, mock.Mock())
self.assertTrue(b. |
# -*- coding: utf-8 -*-
'''
Created on Nov 30, 2014
@author: Michael Große <mic.grosse@posteo.de>
'''
from collections import defaultdict
from receipteval.item_cat_dict import ItemCategoryDict
class ReceiptCollection(object):
'''
Collection of purchases with evaluation and output options.
'''
def __init__(self, purchases=[]):
self.categories = defaultdict(lambda: [0.0, set(), 0.0])
self.purchases = purchases
self.unsane_items = []
self.unsane_categories = []
self.category_dict = ItemCategoryDict()
self.total = 0.0
def collect_items(self):
'''
sort all positions in our stored receipts/purchases into their
respective categories
'''
for purchase in self.purchases:
self.unsane_items.extend(purchase.unsane_items)
for item in purchase.positions:
self.categories[item.category][1].add(item.name)
self.categories[item.category][0] += item.price
self.check_sanity()
self.calculate_total()
def totalize_categories(se | lf):
self.initialize_super_categories()
for category in self.categories.keys():
catsum = 0
length = len(category)
for cat in self.c | ategories.keys():
if (cat[0:length] == category):
catsum += self.categories[cat][0]
self.categories[category][2] = catsum
def initialize_super_categories(self):
missing_super_categories = []
for category in self.categories.keys():
if (category[:category.rfind(':')] not in self.categories.keys()):
missing_super_categories.append(category[:category.rfind(':')])
for missing in missing_super_categories:
while True:
if missing not in self.categories:
self.categories[missing][0] = 0
if missing.rfind(':') == -1:
break
missing = missing[:missing.rfind(':')]
def check_category(self, category, item):
'''
make list of conflicting categories
'''
stored_category = self.category_dict.get_category(item)
if category != stored_category:
self.unsane_categories.append((item, category, stored_category))
def check_sanity(self):
'''
make list of items belonging to more than one category
'''
all_items = set()
for category in self.categories:
if category is '':
continue
for item in self.categories[category][1]:
if item in all_items:
self.unsane_items.append(item)
self.check_category(category, item)
all_items.add(item)
def calculate_total(self):
'''
calculate the grand total across all categories
'''
self.total = 0.0
for category in self.categories:
self.total += self.categories[category][0]
def get_ledger(self, date='1900-01-01'):
'''
create output in the format of the ledger application
'''
ledger_output = ""
for receipt in sorted(self.purchases, key=lambda t: t.date):
if receipt.date >= date:
ledger_output += receipt.get_ledger()
return ledger_output
|
S8oYyypOWc9zADFcJOExm1sRDBpIEz67oDLXAre6fwBluRTwRE9J9FK2ivtJJaJ
oymobILSiGE2pI+a4uzonRD1Z/yzblZF8/k7HoT3WEihXi8LFa5xClNcU8Q8+T2JlMCgkuGPsIgm
r+ooelPj4QhS0IxAa256tLk9wknDlKhvZlAEmiOYBoqh8ZKzl16rz98Z9VJGRTSRF4XRw+cxNA40
pEE0gOHBoCmORC1fyJZg1lZFfnbM2qBkP/KJcRCY8h3gwKS6HL2TXfNHeUI/AP2K5IFwN1i6DtNN
5CmEA2sB0g0sdkEUCcgBsKYZDCBMiWQYmEvieLBeDA5sWDY2AcTgrtEJdcij1KATVbeBB0YlhWUN
Zb4oEicDU1F24XIFCrDJCd15Q8FFSPH5+pafDgX1QhoyS8ZFvnsnM3v3jnmY0ljpyVL1CCFPcIVB
19yJF6V6wqRuOixXgearbDhBw1EcbqF66kHHOSVKWWFC1ruocgZJRnMikmKE2N5jRUsOkHpjkufV
07B8/jmjkxyWLOBVGgyMiUtiCUb519s1TQ4Hqep6E+SzpBfsYLMoMhfC2hc4En5KBYyBCxR5U90u
FOP12aGZN+8EghLOIwAjBVCeXcovgI6oe30WouNhpqxxMLgDfOkV4mYcV3xaFGuRiBM7gwgZjyhL
ioGHpk7dcYVDAhVO3c45KldfYTlDS3Y+A9g+TNll3OBEbjD3Jz3pjAF9HJWRKBRBNxZA7IGEs+CQ
jzVqkHq+X13Smg6R9w+mTZqBArhUSYR83vSMYJO6Gv72CmRIf/fQ6V47NRvdcsEKS4kRhmpSs0id
N/p4mNAN2nIhT/0K6ozCEyWUEmbeFAekPmglk0ADqR+Q/ZT1T5ueIZPIMPXtMRs6VupNov7RlvfL
BIR+fNQ7QfZThFgkDXOm1x0Fp5SwMqZ8VczvrLanlBnVSyvR0A1Dffn0IAsQBrV23M4wTIkbX9Nq
YQx2fOAZ1gmqRLSGJQQXtSdFLPTNlXoqle8DBvlIk9MLTxRab2693NXzykKkaobbd350/zMwNV0I
Qw/8NSbNwlzIdx6qMPMlKANomK2hIcHBwuQU5U8c27Um3r3jIUHCY4qpStkW67aqz88RDywhXQwE
VkJX94n8UdsyTX/GQRWthhOSSniM5PtileBfFqTrIvoNKvi6gdLHsV1IwlEzsHYwDZd/BOelmPbo
zFZFW1jTasNiQ0+njUwHDAKXqlx9uOTUU+Qgcw2hUfHTd+/0t5k64em7d+4T1E/4i1cEzqHUwHC/
B5EkZS5Y+itjTT1c/8MKqc2tWi0unb0PO05cHnGqk0UiI4fO44XqctwiiiJfXpj4e0KCZEzbAIoQ
b | 2CY+hSfFvyYiBR9u86xBhSbN1LY04LOp5bZMN3ZrGry39FM+MhbrXsMN4S4XWzN7YPnwNCyyqkA
GyXA5DhDDXs8tA4mPaQqCe+6JCu/x7fFu33Zlmxk4sQbdk4biVJVZMXfPc5DxrSVpOEQcuYOrkHt
GvW8WBcN7NmCDYSrosuxrzWsahElVwCjBNsgRaIFNILJRl4jGKbluEZ3Sj+Ai5ImA+JZacE/3PH1
5AGLFvxtRoLNDvxELFkm6Iwir1ytGwyycwKRZkPmwEJppWqU4qbzKMCzzFTHFsh38xC34WEHtt2q
vnZVXK0fMsPQ4gH9zctqS9bdMt90XL+qUJXyWG+yVSRm1Vog2+4nhHdkQMtzZxi6as3UzApVOqVI
2m | DUoo9YTViCQIQztjro6oPT4gC/tcZIFEMs+SHM0EVKyajCl86uQIcC9W+NdhHzSl2d0jg1UEDU
IUCWF8vdNoVy5aM6Em5yWtdVka+P9Dvb6xqORkOxLqywOv4BFT9jJU71uKFPKbsOt098CVJuuZrp
QtIWbbWg24J9TGinuFN8XnnLTsc8Qm20KsYUHYcYkwDrMgruOx+FxPOoy7t3w5BNqx5gnUHK6iVN
8907bDsGUO3c8IFzrKHgtN+9+3jyVbRrCCNEeKYDll5UIPs0LJ40Es1hElbaW3GT49WFrB6vnTDK
SQ46a7fFmjypdF8ITL4Jnq22ZpGuVs0xU1pPIHlF5HCgREIbdrIYt9dlIdonbwmCCKlGlmdXOT+R
VosmewO/s8apnLoTFUtkOKDVXXo/R1JBfuc7jj3wT2BCz9dn9bvBs2nWcIfTOWQeKG+SSNYQo+c+
LAg5v0Cze+PMjjZgv5Oj2U7bpoX9EBeJMlnyLfyeNGwZjdlc360hnEd5eMjpm5p7tz6PkJaun2RG
Pj2MvNraptf1Ra14I4YuiiUnxvn3jVvLFCZ1tdRXdPLGB8dp/HCY5mGoMCJGoueNucOrNyBaijO8
R8Gopt5dY3GzqfJ1rqvBcv+yRekHKvVZXlZcd4UWAq0b2VXhsXYVP3o/p1Y6uwXZji7VDAPjBpWH
mgsutpJ5IX9pk0kqYJ+SDwP9zS0lBudr8wEB+qRcf4KCkas3qt5FC4oUOX1NGVxkgwiCS+k26LtF
NzZ2IUgk8Fd8T9hW5Xl3Ud3O2J9Hb1Qhtrgytg9CVclut1dXeXNrMdcfiubK9Vm1LcA04YqVog0m
TtyCsMwFV1PMq/QHI0WewQKvH4pGUyHxAAxMCggO2S5G3KpsgWpu+dKJgeACa3Gi8OzNMnv+VBme
LCfiCX0Gbt5MAiFwgObBed3A7oKu13QVJqU1pGV/KJpTLGlJRdPPyLdrjzo04C4RoxaxEApJ1AcM
ybn2wftbvG5FestRcLODBKOiFSoEijW5viwHaXadN6g2gjHZtvk5VjCggnxK+TxrA+qnkXTS2/IR
YuX9SAPLCZzsJGd45QKbUGnAR8lpjRl4Z2KJoPcctl3nuZRc0UANKDHBXckKJfkiu7LKG4L+UE/6
h7BrV/WS5McPKyRlFAm+QBccMZhEfvbpVsXkr9R7eeINYTVKwFkEOjDA78G5R6GzJo6Lr/zkQnpV
nG4tP/IP5+Wj+8aFiicpVhKAM4t6MU6qerId/xWd16jHqc4Bv7QsiNaagJ2OQhh1kiVyrbAvWCOB
OrV9oJcS08zQMag5GQWo2ke6fR+mdiEwULA2yPJkfX4W9bgkjAi2SUm2cEXPPiHbeZx9mqqRry8K
jqXK18ZBASe35UDrlSpBDV9vahKaFIp1KgddZkFhHUwk2u3gXCCRASND0svIwcFEwc7Nux5rC8+6
Q1VeFtEU4+Qz/frA1JLR7//92z+lWEX0v+pIw//hLaFku2beR/5XScjONyUHG/6Pb/9Y2SVy4t7/
hzf/x59xrCFItWX9QeQK6qPSpJVnfrbqRsxcgxNHY9m9ONtimAWWJedMYvX+s5Tqm6gEY8xXO82X
l+oDng++dDCxtYBMHKfS6hlHcbwq3m+p9TMwDOUzNKu4KyIFeXxGjF56vhEh9IpoHv99BlP/Spjy
PqFe50293agUkQbjY+iTZCqeWnmWiD60YqGmBweCwQPB3tTkFXPsx3wKSiggDZNvpzMVBMLPQpm2
GC86n/o7gtohRnn2YWP07nwqbdXXO+eIYYhDE7TmNsXGn2TdDVYmRa/vh7yZT4EUp/6E9WSJFO0K
PkjxGiI7/wRieA00tfROV5ASezTXpbDp74lV+yajR+qxdqZTExALaHAclMRSuXgMBiT/nJv8KnA9
M9E1ExM9KJdihPVSKUYQcHHKT8PyVJWtiQZa2XHNVKpljxcG306cDBSpDLIm7M5NkBeFaSUu/vp1
6GXeb2BSfTxOJlzvgU4MGFjSOLHPk4DkevXMh9TrMvCtUnxmZHTY9R6oPKHdBHNkrT/dhmxMzSNj
Q1EuYq3US/Wwn/2UMhWmQFNHVVimzl66yTV9ik8rFunw3DJrpO5a40XwgQ6gxHIGDaOEL5AYu7OI
/UJSdWCF3f2SEPwCsjWMegNZw7Uh+gsg8IhW+sX9kobDxAX8ab3dwH4l/8kBCx7+SAwI590HMOvR
DjePSLnagFVjQqjSeYiO22Iik1WPO5GWGSAB/XPPGP4+ddRUz7drVBHwHluL0tTLKpCmMnlAi5oM
HSIvpShH3zuizumUyReTnv+119JZOadWVFJ8lv1q6u/e8674JZLquh6u2iXfz/vl1QZyf1V7G/oD
tU7zgZuzLyaVhSqssYkvsMO8vTq03in35W+iljuTucwU9NR71pirx2l8Zi8omeKJFC9yW796+vLF
qzeLtz9//uyZ1cX+uLcHisu49WDU9FL1oGrRJsvGf1ZW9e3XuDIbNlJMWwoQWHswow8OFVKig+jR
IQiAe9E333zzRbBklWJ2einH5RF3PhnIRsVGukrW/cOfrKL7LZWvLx884oEHno4tsTLXoz2py1Ro
e/rNl796+dXT6KsXT7588/zF19Hbr3/59Ytffz3j6ukX9TUZYJiIQWoDvWSSd4oYA4my7P3C9L3P
P/88HkWLoui23jbLgiuI8W6me6An/uKLLwA78P8xIYjGHceRnlqWZb0Sx2F2F+Z26QBecRPkVGSc
WLRYlWdnYAEiLFnvMKv02NI5FpKzz0cqVdam366n+1QRKFE+LGRhfIxIFddsF9X0xGGtwDWXCxIR
wys8nr79+uk3L58+efP059HTb548fYmkc8SkuqMC2aZJnFnxqOnJ8GjaOMnwviu3DNLkk32mLnqV
rzKFlKGR4oyqwueQ/HdkrZKyjigmJ9tZPfD8EAnws5a1PVZApsdCDyfCBLgyPoohV93RuqIzL9F/
kLFb+s+APjGgBdyLWkBQe3YbvXONvXd2yTE7NZnXAYYgRqcDyf32d0b9u9L1uEWbXkgegP2Ul5oo
wkjSkVxbLmYvEITAz65Q0bY+I1fFnHUfhDjHf+5U8xXlXNWKTqgWJo8EWxmbNOKiYexgjXAHXV7J
aq85rqKq5EP73UmkJwxB6WpLaSFthbbbq+nLFW2FiLhaNZg+C75M/FD4FixdoNA5OqtyWJIC//Sr
r56/fP389czTsuAcos0CDctllxg0z/3V4FeMJz7bs36J3kW9XpAPi2oxzpTHHEW4S9rKfAvSsgpW
3I+WVQ16xo/7voBKJOiXopewOvVMGxtzOgkj47HUn84GjDATF6hMSD/vkv4wxy90XjyPTYKvDx6f
zKDbR9O28hpzBiXO4a70Yi1/ixVkjOtX/Oo61JHjqq8KfOyobK/sBOqVo5cq6U2f2zXwlEWmG6J3
+RV9mmiinI2ek9SxbznAYy1TyKgMgYotlIhHpZLvZkP2oXPR2n+CjOYkGkZ7pGp6FFeb7la7qHoD
8quCttVMYFj3F6cBIWMWsW35/n96+6c6ioKuAKr6/P3//OZv/oj9lfAXaMnoVCoOyDVHqbNeCAHd
HCAFojiXa6mcKzpPuCY95ayRX1U7JL+TU7B//zcF+aoXoHz2kow04JeLDw50D/QSqT8P+G/POWee
jbT8cReW044kiuee05kRCodo4BAKBWswVNbzugVTmszi5p7zTH8z0TXkKE8DqyXwVbDqCVvTVhjm
v6Z3v5KbVanKV1Dxd9VOFe5XpbLU5W1MvSnP3i4bAF2kOhcVaNBwoMM1IPIR2lRkI/y/3L1tkxtJ
kiY2pi+yg2Sn+3Aymcl0dzngcpFgo8Aqsqent5bgLJvN7qGm+XJk8XpG1XVoFJBVhS0ACSIBVtX0
9pn0Rb9Hv0c/Qr9B/hrhERmJAtk9djL17rASmREe7x4eHu6Pn25IdvbOYMp/bePe0PN35bkrVuh3
42xpJ/K8RrS7m9OYrQRMtbDlQztVSIlU6zYrA8qrvlS9Fjka40fU6rm9fZuFa2HcMjG8pcsKa4QH
fHSKCgEyCkQrC3oh1dBYVcf0EkEifAQL3sJwpuk3fuM+rtZB3BS295jQKZCzHByaI/GiuHIUMZml
pmgVPslAiq+xRC64fiziQFJ6jDxMHGITVd5+0gpJWtzzevow7f2oeFc0XaxaQA3tjEARJUgeCM8B
fdnJjzupwsJYVUEFQjh73+3avy1zSOl0GCXrnAUcp4zWBcg2es3a1niBRvpMdz9gLga8wtkxDX1i
9MtiReEBQn7hKkDoKmjGgAF6VzeqlYZ1zSctttpgk4ZZuUDbPYuwYwJSUCi4Yd4mjQOpZGxepQjy
E0GC2yp3kwpwLW2L2iNRupSNyZMl+bDn0ORyA5KpCzfOW9+tTdaWGKamOTtsmBEwNCN9aIS6ukLN
0FRrynBBUyPisbpllGxgxAYTTNvw7mEYuISqgTfRFIsKr4nb4vFEX9iCPKUDsFw7OhfgzXP/Fnui
gVTHMO9JQR1THZuwXnTlNaHTSee6E8NbchcIEpbU2HeMZz2Wyp+bqXQ6YZ4tfWByGTxem4sPrb+g
xvIV5dflxxE6fnDSrXENtwx0Gt82j0Ivv+Y5tCALj8ae2t4jZuTb37QT+J+7dVjzjiR2l2FnNlbh
7dYqANM7vDsRBV8WVSbUQnxqp4fmMO6yKmJO+HO8GlWWOznTmY77WuNQdYbGCeEQIFJ5jaM1czMh
1h5fTQ65R8Lrie52tsYsrfPbDoOlaSPhbPW/vPtXaC1BXfD+3x39N/8dnala4ocHrxelD7pLe694
5D1/hdfQJRtAo30tZpOTU3VTwfkVgSIwv+7ZHA/pGQP3oamOxZbbIVo6zIg+7oQrnBQZ9EEehoCH
J1E/3X7MreXc8XhcywfH3B2ytTOJyzkfLTkAGBEiyKLbw0HdIRW2z3HrGb57a/h43EXax3erE1lg
+cc37JPbVA9h1WoNr0C6wMkCxFCFREkeHPIEkuiITPNh6uXB791bwseWtz7tV+/e/qUH0hvrJsaT
bLKafgDmgGbVQOjFs6+fv3uBqMDzKtssCIBrOtKgpw9sRY6+fv6GyT/YT7/+4vfJ979zbynKSY+Q
STVS6ClJG39o/RwslhfoChJKuKTwGP11CuLnclV+mOIZ2V2tBAuU/HU1eGv2+tXb53+W9ehCQ4zQ
v/uMzNWXBRuHdyhJJ1OAsyx7gk44mzGi6PL9lFGHb06lttGi9kZq+JcRSAf8l+OPUCkPODNpDM2E
FY4Z2EtEu6AHB2zHAXM5HFsIahkdOPim2/FyrIqU0jLqd5WzsD6UhmqbQ2bLbJmGNezAiCaJgBdh
ABpDUPadRbktEo29sbGdd4z3uruBqkId5ETH05BYPl3NQX1yumtuv1tcLsqrxTNMcHeCfAHfx+jh
lJE6CK/7c2bjudDvZfyi18ADfup4Ng3nYAWt7TRwjI5wos4h4ZqChMS2q9BdP3frYkCtb7i5VN9U
h7pvJtS9CUfiL3E40mWF8RQvrwzUXOBbPfKwACTW01QcLWQb9YadaBm6hCPYmOJVBlEmhkN393gB
i5vijwan8qapQXdXcR3jmUF9lJgbIdhwDSK2Yc82M6vHgGBwKD2VgMt6MVoHt7daO8klXKfbEMY2
ivlc76GEygQJwhe5pQ34XiPEsAup667ipX4WSrm7JaxB9hJWgGdzUYe6jw1xbhGbEcuHGSIFEjYA
fEHD7hEyfGTTMiytxJVhE8/Cx10E+Eao62baduc+pofmQCXbo+RxD9aNrfg9xgwCCfDuivQgONu9
1MbYj91bp0mwkPC/VquwexPvtyAV//t3/1ZvHIZAG93MZtDq9//h6P/617/5DYq6jCJ6CvL8nnxV
nzQFgZTAOT43hbrDiYXW4S32cjWfHw/2+7/rf6Gwrmjz/LD/4P7D/sMsL2d4t0SgPoT8CuJJi4Ji
LiYC7HUO4rl4tKBb7fDJm2+fvkIzm6NnsLd86EO/csjNAi27K4xHis6qhI00KQs2S8I6ZS7KF24N
rZZtgEtYbZZqs431fND/HQgTM3gW9ERUsC7OC/U5G61bOLfniFbZBZrfKJdE0H9TgN6sGE92saV3
MYzYD7NFZTBWa/ZEHJgF8ZEB/HFG5FjW0QXjEVZaCHXDFRoUIfjIAi+E0NL/ikpE4IVSwjiUlbgy
tTxWaADBi/AHGQFoYTVgWWRtqCfOyfttcfmmF4+Onnz1GA5HMPaIl1iy15Dcybg8WZvxIsvl2sNc
DrXoXEAZOSp1uUKQJpQ40DCgc69z25Gg23fNH4S90Wq9Qm/Hnu5INC7azF7m0jl4gfNSA11QfLBJ
6dxHTXUF04OxGNmlW3zmDA5slflatRy |
rror(
"No starting revision argument is available."
)
def get_revision_argument(self) -> _RevNumber:
"""Get the 'destination' revision argument.
This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script.as_revision_number(
self.context_opts["destination_rev"]
)
def get_tag_argument(self) -> Optional[str]:
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument` - a newer and more
open ended system of extending ``env.py`` scripts via the command
line.
"""
return self.context_opts.get("tag", None)
@overload
def get_x_argument( # type:ignore[misc]
self, as_dictionary: "Literal[False]" = ...
) -> List[str]:
...
@overload
def get_x_argument( # type:ignore[misc]
self, as_dictionary: "Literal[True]" = ...
) -> Dict[str, str]:
...
def get_x_argument(self, as_dictionary: bool = False):
"""Return the value(s) passed for the ``-x`` argument, if any.
The ``-x`` argument is an open ended flag that allows any user-defined
value or values to be passed on the command line, then available
here for consumption by a custom ``env.py`` script.
The return value is a list, returned directly from the ``argparse``
structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
are parsed using ``key=value`` format into a dictionary that is
then returned.
For example, to support passing a database URL on the command line,
the standard ``env.py`` script can be modified like this::
cmd_line_url = context.get_x_argument(
as_dictionary=True).get('dbname')
if cmd_line_url:
engine = create_engine(cmd_line_url)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
po | olclass=pool.NullPool)
This then takes effect by running the ``alembic`` script as::
alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
This function does n | ot require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_tag_argument`
:attr:`.Config.cmd_opts`
"""
if self.config.cmd_opts is not None:
value = self.config.cmd_opts.x or []
else:
value = []
if as_dictionary:
value = dict(arg.split("=", 1) for arg in value)
return value
def configure(
self,
connection: Optional["Connection"] = None,
url: Optional[str] = None,
dialect_name: Optional[str] = None,
dialect_opts: Optional[dict] = None,
transactional_ddl: Optional[bool] = None,
transaction_per_migration: bool = False,
output_buffer: Optional[TextIO] = None,
starting_rev: Optional[str] = None,
tag: Optional[str] = None,
template_args: Optional[dict] = None,
render_as_batch: bool = False,
target_metadata: Optional["MetaData"] = None,
include_name: Optional[Callable] = None,
include_object: Optional[Callable] = None,
include_schemas: bool = False,
process_revision_directives: Optional[Callable] = None,
compare_type: bool = False,
compare_server_default: bool = False,
render_item: Optional[Callable] = None,
literal_binds: bool = False,
upgrade_token: str = "upgrades",
downgrade_token: str = "downgrades",
alembic_module_prefix: str = "op.",
sqlalchemy_module_prefix: str = "sa.",
user_module_prefix: Optional[str] = None,
on_version_apply: Optional[Callable] = None,
**kw
) -> None:
"""Configure a :class:`.MigrationContext` within this
:class:`.EnvironmentContext` which will provide database
connectivity and other configuration to a series of
migration scripts.
Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
The important thing needed by :meth:`.configure` is a
means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use
for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
:param dialect_opts: dictionary of options to be passed to dialect
constructor.
.. versionadded:: 1.0.12
:param transactional_ddl: Force the usage of "transactional"
DDL on or off;
this otherwise defaults to whether or not the dialect in
use supports it.
:param transaction_per_migration: if True, nest each migration script
in a transaction rather than the full series of migrations to
run.
:param output_buffer: a file-like object that will be used
for textual output
when the ``--sql`` option is used to generate SQL scripts.
Defaults to
``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
object. The value here overrides that of the :class:`.Config`
object.
:param output_encoding: when using ``--sql`` to generate SQL
scripts, apply this encoding to the string output.
:param literal_binds: when using ``--sql`` to generate SQL
scripts, pass through the ``literal_binds`` flag to the compiler
so that any literal values that would ordinarily be bound
parameters are converted to plain strings.
.. warning:: Dialects can typically only handle simple datatypes
like strings and numbers for auto-literal generation. Datatypes
like dates, intervals, and others may still req |
"""<name>Performance Curves</name>
<description>Model performance at different thresholds</description>
<icon>icons/PerformanceCurves.png</icon>
<priority>30</priority>
<contact>Janez Demsar (janez.demsar@fri.uni-lj.si)</contact>"""
from OWWidget import *
from OWGUI import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from OWDlgs import OWChooseImageSizeDlg
import sip
import orngTest
from OWGraph import *
class PerformanceGraph(OWGraph):
def __init__(self, master, *arg):
OWGraph.__init__(self, *arg)
self.master = master
self.mousePressed = False
def mousePressEvent(self, e):
self.mousePressed = True
canvasPos = self.canvas().mapFrom(self, e.pos())
self.master.thresholdChanged(self.invTransform(QwtPlot.xBottom, canvasPos.x()))
def mouseReleaseEvent(self, e):
self.mousePressed = False
def mouseMoveEvent(self, e):
if self.mousePressed:
self.mousePressEvent(e)
# Remove if this widget ever goes multilingual!
_ = lambda x:x
class OWPerformanceCurves(OWWidget):
settingsList = ["selectedScores", "threshold"]
def __init__(self, parent=None, signalManager=None, name="Performance Curves"):
OWWidget.__init__(self, parent, signalManager, name)
self.inputs=[("Evaluation Results", orngTest.ExperimentResults, self.setTestResults, Default)]
self.outputs=[]
self.selectedScores = []
self.classifiers = []
self.selectedClassifier = []
self.targetClass = -1
self.threshold = 0.5
self.thresholdCurve = None
self.statistics = ""
self.resize(980, 420)
self.loadSettings()
self.scores = [_('Classification accuracy'), _('Sensitivity (Recall)'), _('Specificity'),
_('Positive predictive value (Precision)'), _('Negative predictive value'),
_('F-measure')]
self.colors = [Qt.black, Qt.green, Qt.darkRed,
Qt.blue, Qt.red,
QColor(255, 128, 0)]
self.res = None
self.allScores = None
OWGUI.listBox(self.controlArea, self, 'selectedClassifier', 'classifiers', box = "Models", callback=self.classifierChanged, selectionMode = QListWidget.SingleSelection)
self.comTarget = OWGUI.comboBox(self.controlArea, self, 'targetClass', box="Target Class", callback=self.classifierChanged, valueType=0)
OWGUI.listBox(self.controlArea, self, 'selectedScores', 'scores', box = _("Performance scores"), callback=self.selectionChanged, selectionMode = QListWidget.MultiSelection)
sip.delete(self.mainArea.layout())
self.layout = QHBoxLayout(self.mainArea)
self.dottedGrayPen = QPen(QBrush(Qt.gray), 1, Qt.DotLine)
self.graph = graph = PerformanceGraph(self, self.mainArea)
graph.state = NOTHING
graph.setAxisScale(QwtPlot.xBottom, 0.0, 1.0, 0.0)
graph.setAxisScale(QwtPlot.yLeft, 0.0, 1.0, 0.0)
graph.useAntialiasing = True
graph.insertLegend(QwtLegend(), QwtPlot.BottomLegend)
graph.gridCurve.enableY(True)
graph.gridCurve.setMajPen(self.dottedGrayPen)
graph.gridCurve.attach(graph)
self.mainArea.layout().addWidget(graph)
b1 = OWGUI.widgetBox(self.mainArea, "Statistics")
OWGUI.label(b1, self, "%(statistics)s").setTextFormat(Qt.RichText)
OWGUI.rubber(b1)
self.controlArea.setFixedWidth(220)
def setTestResults(self, res):
self.res = res
if res and res.classifierNames:
self.classifiers = res.classifierNames
self.selectedClassifier = [0]
self.comTarget.clear()
self.comTarget.addItems(self.res.classValues)
self.targetClass=min(1, len(self.res.classValues))
self.classifierChanged()
else:
self.graph.clear()
self.thresholdCurve = None
self.allScores = None
def classifierChanged(self):
self.allScores = []
self.probs = []
classNo = self.selectedClassifier[0]
probsClasses = sorted((tex.probabilities[classNo][self.targetClass], self.targetClass==tex.actualClass) for tex in self.res.results)
self.all = all = len(probsClasses)
TP = self.P = P = float(sum(x[1] for x in probsClasses))
FP = self.N = N = all-P
TN = FN = 0.
prevprob = probsClasses[0][0]
for Nc, (prob, kls) in enumerate(probsClasses):
if kls:
TP -= 1
FN += 1
else:
FP -= 1
TN += 1
if prevprob ! | = prob:
self.allScores.append(((TP+TN)/all, TP/(P or 1), TN/(N or 1), TP/(all-Nc), TN/Nc, 2*TP/(P+all-Nc), TP, TN, FP, FN, Nc))
self.probs.append(prevprob)
| prevprob = prob
self.allScores.append(((TP+TN)/all, TP/(P or 1), TN/(N or 1), TP/(all-Nc), TN/Nc, 2*TP/(P+all-Nc), TP, TN, FP, FN, Nc))
self.probs.append(prevprob)
self.allScores = zip(*self.allScores)
self.selectionChanged()
def selectionChanged(self):
self.graph.clear()
self.thresholdCurve = None
if not self.allScores:
return
for c in self.selectedScores:
self.graph.addCurve(self.scores[c], self.colors[c], self.colors[c], 1, xData=self.probs, yData=self.allScores[c], style = QwtPlotCurve.Lines, symbol = QwtSymbol.NoSymbol, lineWidth=3, enableLegend=1)
self.thresholdChanged()
# self.graph.replot is called in thresholdChanged
def thresholdChanged(self, threshold=None):
if threshold is not None:
self.threshold = threshold
if self.thresholdCurve:
self.thresholdCurve.detach()
self.thresholdCurve = self.graph.addCurve("threshold", Qt.black, Qt.black, 1, xData=[self.threshold]*2, yData=[0,1], style=QwtPlotCurve.Lines, symbol = QwtSymbol.NoSymbol, lineWidth=1)
self.graph.replot()
if not self.allScores:
self.statistics = ""
return
ind = 0
while self.probs[ind] < self.threshold and ind+1 < len(self.probs):
ind += 1
alls = self.allScores
stat = "<b>Sample size: %i instances</b><br/> Positive: %i<br/> Negative: %i<br/><br/>" % (self.all, self.P, self.N)
stat += "<b>Current threshold: %.2f</b><br/><br/>" % self.threshold
stat += "<b>Positive predictions: %i</b><br/> True positive: %i<br/> False positive: %i<br/><br/>" % (self.all-alls[-1][ind], alls[-5][ind], alls[-3][ind])
stat += "<b>Negative predictions: %i</b><br/> True negative: %i<br/> False negative: %i<br/><br/>" % (alls[-1][ind], alls[-4][ind], alls[-3][ind])
if self.selectedScores:
stat += "<b>Performance</b><br/>"
stat += "<br/>".join("%s: %.2f" % (self.scores[i], alls[i][ind]) for i in self.selectedScores)
self.statistics = stat
def sendReport(self):
if self.res:
self.reportSettings(_("Performance Curves"),
[(_("Model"), self.res.classifierNames[self.selectedClassifier[0]]),
(_("Target class"), self.res.classValues[self.targetClass])])
self.reportImage(self.graph.saveToFileDirect, QSize(790, 390))
self.reportSection("Performance")
self.reportRaw(self.statistics)
|
# http://code.activestate.com/recipes/577263-numerical-integration-using-monte-carlo-method/
# Numerical Integration using Monte Carlo method
# FB - 201006137
#pythran export montecarlo_integration(float, float, int, float list, int)
#runas montecarlo_integration(1.,10.,100,[x/100. for x in range(100)],100)
#bench montecarlo_integration(1.,10.,650000,[x/100. for x in range(100)],100)
import math
def montecarlo_integration(xmin, xmax, numSteps,rand,randsize):
# define any function here!
def f(x):
return math.sin(x)
# find ymin-ymax
ymin = f(xmin)
ymax = ymin
for i in range(numSteps):
x = xmin + (xmax - xmin) * float(i) / numSteps
y | = f(x)
if y < ymin: ymin = y
if y > ymax: ymax = y
# Monte Carlo
rectArea = (xmax - xmin) * (yma | x - ymin)
numPoints = numSteps # bigger the better but slower!
ctr = 0
for j in range(numPoints):
x = xmin + (xmax - xmin) * rand[j%randsize]
y = ymin + (ymax - ymin) * rand[j%randsize]
if math.fabs(y) <= math.fabs(f(x)):
if f(x) > 0 and y > 0 and y <= f(x):
ctr += 1 # area over x-axis is positive
if f(x) < 0 and y < 0 and y >= f(x):
ctr -= 1 # area under x-axis is negative
fnArea = rectArea * float(ctr) / numPoints
return fnArea
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class ActiveFormatTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'afd': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'afd': 'afd',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, afd=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""ActiveFormatTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._afd = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if afd is not None:
self.afd = afd
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def afd(self):
"""Gets the afd of this ActiveFormatTest. # noqa: E501
:return: The afd of this ActiveFormatTest. # noqa: E501
:rtype: int
"""
return self._afd
@afd.setter
def afd(self, afd):
"""Sets the afd of this ActiveFormatTest.
:param afd: The afd of this ActiveFormatTest. # noqa: E501
:type: int
"""
self._afd = afd
@property
def reject_on_error(self):
"""Gets the reject_on_error of this ActiveFormatTest. # noqa: E501
:return: The reject_on_error of this ActiveFormatTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this ActiveFormatTest.
:param reject_on_error: The reject_on_error of this ActiveFormatTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this ActiveFormatTest. # noqa: E501
:return: The checked of this ActiveFormatTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
| """Sets the checked of this ActiveFormatTest.
:param checked: The ch | ecked of this ActiveFormatTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActiveFormatTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ActiveFormatTest):
return True
return self.to_dict() != other.to_dict()
|
"""
Module that provides a connection to the ModuleStore specified in the django settings.
Passes settings.MODULESTORE as kwargs to MongoModuleStore
"""
from __future__ import absolute_import
from importlib import import_module
import re
from django.conf import settings
from django.core.cache import get_cache, InvalidCacheBackendError
from django.dispatch import Signal
import django.utils
from xmodule.modulestore.loc_mapper_store import LocMapperStore
from xmodule.util.django import get_current_request_hostname
# We may not always have the request_cache module available
try:
from request_cache.middleware import RequestCache
HAS_REQUEST_CACHE = True
except ImportError:
HAS_REQUEST_CACHE = False
_MODULESTORES = {}
FUNCTION_KEYS = ['render_template']
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
def create_modulestore_instance(engine, doc_store_config, options, i18n_service=None):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
_options = {}
_options.update(options)
for key in FUNCTION_KEYS:
if key in _options and isinstance(_options[key], basestring):
_options[key] = load_function(_options[key])
if HAS_REQUEST_CACHE:
request_cache = RequestCache.get_request_cache()
else:
request_cache = None
try:
metadata_inheritance_cache = get_cache('mongo_metadata_inheritance')
except InvalidCacheBackendError:
metadata_inheritance_cache = get_cache('default')
return class_(
metadata_inheritance_cache_subsystem=metadata_inheritance_cache,
request_cache=request_cache,
modulestore_update_signal=Signal(providing_args=['modulestore', 'course_id', 'location']),
xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),
xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None),
doc_store_config=doc_store_config,
i18n_service=i18n_service or ModuleI18nService(),
**_options
)
def get_default_store_name_for_current_request():
"""
This method will return the appropriate default store mapping for the current Django request,
else 'default' which is the system default
"""
store_name = 'default'
# see what request we are currently processing - if any at all - and get hostname for the request
hostname = get_current_request_hostname()
# get mapping information which is defined in configurations
mappings = getattr(settings, 'HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', None)
# compare hostname against the regex expressions set of mappings
# which will tell us which store name to use
if hostname and mappings:
for key in mappings.keys():
if re.match(key, hostname):
store_name = mappings[key]
return store_name
return store_name
def modulestore(name=None):
"""
This returns an instance of a modulestore of given name. This will wither return an existing
modulestore or create a new one
"""
if not name:
# If caller did not specify name then we should
# determine what should be the default
name = get_default_store_name_for_current_request()
if name not in _MODULESTORES:
_MODULESTORES[name] = create_modulestore_instance(
settings.MODULESTORE[name]['ENGINE'],
settings.MODULESTORE[name].get('DOC_STORE_CONFIG', {}),
settings.MODULESTORE[name].get('OPTIONS', {})
)
# inject loc_mapper into newly created modulestore if it needs it
if name == 'split' and _loc_singleton is not None:
_MODULESTORES['split'].loc_mapper = _loc_singleton
return _MODULESTORES[name]
_loc_singleton = None
def loc_mapper():
"""
Get the loc mapper which bidirectionally maps Locations to Locators. Used like modulestore() as
a singleton accessor.
"""
# pylint: disable=W0603
global _loc_singleton
# pylint: disable=W0212
if _loc_singleton is None:
try:
loc_cache = get_cache('loc_cache')
except InvalidCacheBackendError:
loc_cache = get_cache('default')
# instantiate
_loc_singleton = LocMapperStore(loc_cache, **settings.DOC_STORE_CONFIG)
# inject into split mongo modulestore
if 'split' in _MODULESTORES:
_MODULESTORES['split'].loc_mapper = _loc_singleton
return _loc_singleton
def clear_existing_modulestores():
"""
Clear the existing modulestore instances, causing
them to be re-created when accessed again.
This is useful for flushing state between unit tests.
"""
_MODULESTORES.clear()
# pylint: disable=W0603
global _loc_singleton
cache = getattr(_loc_singleton, "cache", None)
if cache:
cache.clear()
_loc_singleton = None
def editable_modulestore(name='default'):
"""
Retrieve a modulestore that we can modify.
This is useful for tests that need to insert test
data into the modulestore.
Currently, only Mongo-backed modulestores can be modified.
Returns `None` if no editable modulestore is available.
"""
# Try to retrieve the ModuleStore
# Depending on the settings, this may or may not
# be editable.
store = modulestore(name)
# If this is a `MixedModuleStore`, then we will need
# to retrieve the actual Mongo instance.
# We assume that the default is Mongo.
if hasattr(store, 'modulestores'):
store = store.modulestores['default']
# At this point, we either have the ability to create
# items in the store, or we do not.
if hasattr(store, 'create_xmodule'):
return store
else:
return None
class ModuleI18nService(object):
"""
Implement the XBlock runtime "i18n" service.
Mostly a pass-through to Django's translation module.
django.utils.translation implements the gettext.Translations interface (it
has ugettext, ungettext, etc), so we can use it directly as the | runtime
i18n service.
"""
def __getattr__(self, name):
return getattr(django.utils.translation, name)
def strftime(self, *args, **kwargs):
"""
A locale-aware implementation of strftime.
"""
# This is the wrong place to import this function. I'm putting it here
# because the xmodule test suite can't import this module, because
# Django is not available in that suite | . This function isn't called in
# that suite, so this hides the import so the test won't fail.
#
# As I said, this is wrong. But Cale says this code will soon be
# refactored to a place that will be right, and the code can be made
# right there. If you are reading this comment after April 1, 2014,
# then Cale was a liar.
from util.date_utils import strftime_localized
return strftime_localized(*args, **kwargs)
|
a,b,c = input().split(" ")
numbers = [int(a),int(b),int(c)]
cnumbers = list(numbers)
for j in range(0,len(numbers)):
for i in ran | ge(0,len(numbers)-1):
if n | umbers[i] > numbers[i+1]:
aux = numbers[i+1]
numbers[i+1] = numbers[i]
numbers[i] = aux
for a in range(len(numbers)):
print(numbers[a])
print("")
for b in range(len(cnumbers)):
print(cnumbers[b]) |
from __future__ import unicode_literals
from zope.interface impo | rt implements, classProvides
from scheme.macro import Macro
from scheme.symbol import Symbol
from scheme.environment import Environme | nt, SyntaxEnvironment
from scheme.syntax import SyntaxSymbol
from scheme.PatternMatcher import PatternMatcher
# from scheme.utils import syntax_copy_with_replacement
from scheme.utils import transformCode
import scheme.debug
class syntax_rules(object):
implements(Macro)
classProvides(Macro)
def __init__(self, processer, ast):
literals = ast[0]
patterns = ast[1:]
self.name = patterns[0][0][0]
self.env = processer.cenv.parent
self.literals = literals
self.patterns = patterns
def __call__(self, processer, params):
params=params[0].toObject(processer.cenv)
for pattern in self.patterns:
template = pattern[1:]
pattern = pattern[0]
bindings = PatternMatcher(pattern, self.literals).match(params)
if bindings is None:
continue
env = Environment(self.env)
l = {}
l.update(globals())
l.update(locals())
#import code
#code.InteractiveConsole(locals=l).interact()
transformedCode = transformCode(template, bindings, env, self)
#osp = processer.stackPointer
#processer.popStack(transformedCode)
##processer.ast = transformedCode
#processer.stackPointer = osp
if scheme.debug.getDebug('syntax'):
print 56, transformedCode
if len(transformedCode)==1:
return transformedCode[0]
return transformedCode
raise SyntaxError("syntax-rules no case matching %r for %s" % (params, self.name))
import scheme.Globals
scheme.Globals.Globals['syntax-rules'] = syntax_rules
|
from __future__ import unicode_literals
from blanc_basic_assets.fields import AssetForeignKey
from django.core.exceptions import ValidationError
from django.core.urlresolvers import get_script_prefix
from django.core.validators import RegexValidator
from django.db import models
from django.utils.encoding import iri_to_uri, python_2_unicode_compatible
from mptt.managers import TreeManager
from mptt.models import MPTTModel, TreeForeignKey
# Validator from flatpages
url_validator = RegexValidator(regex=r'^[-\w/\.~]+$',
message="This value must contain only letters, numbers, dots, "
"underscores, dashes, slashes or tildes.")
# Another validator to ensure the URL starts and ends with a slash
def slash_validator(url):
if not url.startswith('/'):
raise ValidationError("This value must start with a leading slash.")
elif not url.endswith('/'):
raise ValidationError("This value must end with a trailing slash.")
@python_2_unicode_compatible
class Page(MPTTModel):
url = models.CharField(
'URL', max_le | ngth=100, unique=True,
help_text="Example: '/about/contact/'. Make sure to have leading and trailing slashes.",
validators=[url_validator, slash_validator])
title = models.CharField(max_length=200)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
show_in_navigation = models.BooleanField(default=True, db_index=True)
hero_image = AssetForeignKey('assets.Image', blank=True, | null=True, on_delete=models.SET_NULL)
content = models.TextField(blank=True)
template_name = models.CharField(max_length=100, blank=True)
published = models.BooleanField(default=True, db_index=True)
login_required = models.BooleanField(default=False, db_index=True)
objects = TreeManager()
def __str__(self):
return '%s -- %s' % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
|
from __future__ import absolute_import, unicode_literals
import warnings
| from django.core | .exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from wagtail.utils.deprecation import RemovedInWagtail16Warning
from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel
from wagtail.wagtailcore.utils import resolve_model_string
from .widgets import AdminSnippetChooser
class BaseSnippetChooserPanel(BaseChooserPanel):
object_type_name = 'item'
_target_model = None
@classmethod
def widget_overrides(cls):
return {cls.field_name: AdminSnippetChooser(model=cls.target_model())}
@classmethod
def target_model(cls):
if cls._target_model is None:
if cls.snippet_type:
# RemovedInWagtail16Warning: The target_model is automatically
# detected from the relation, so snippet_type is deprecated.
try:
cls._target_model = resolve_model_string(cls.snippet_type)
except LookupError:
raise ImproperlyConfigured(
"{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}"
.format(cls.__name__, cls.snippet_type)
)
except ValueError:
raise ImproperlyConfigured(
"{0}.snippet_type refers to model {1!r} that has not been installed"
.format(cls.__name__, cls.snippet_type)
)
else:
cls._target_model = cls.model._meta.get_field(cls.field_name).rel.model
return cls._target_model
def render_as_field(self):
instance_obj = self.get_chosen_item()
return mark_safe(render_to_string(self.field_template, {
'field': self.bound_field,
self.object_type_name: instance_obj,
}))
class SnippetChooserPanel(object):
def __init__(self, field_name, snippet_type=None):
self.field_name = field_name
if snippet_type is not None:
warnings.warn(
'The snippet_type argument to SnippetChooserPanel is deprecated. '
'The related model is now automatically detected.',
RemovedInWagtail16Warning)
self.snippet_type = snippet_type
def bind_to_model(self, model):
return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {
'model': model,
'field_name': self.field_name,
'snippet_type': self.snippet_type,
})
|
m)
if last:
last.quantifier = t.between
return t
elif psm.char == "?" and quantified == ContentOfGroup.Quantified:
self.quantified = ContentOfGroup.UngreedyQuantified
last = self._last_or_fail(psm)
if last:
last.quantifier.greedy = False
return self.limited_prev
elif quantified == ContentOfGroup.Quantified:
psm.error = "unexpected quantifier"
elif quantified == ContentOfGroup.UngreedyQuantified:
psm.error = "quantifier repeated"
# <<< Quantifier
else:
t = ast.SingleChar()
t.char = psm.char
self.add(t)
return self.limited_prev
def _last_or_fail(self, psm: PSM):
if self.parent.g.group.seq:
return self.parent.g.group.seq[-1]
else:
psm.error = "nothing to repeat"
class MinimumOfRepetition:
def __init__(self, parent: ContentOfGroup):
self.parent = parent
self.between = ast.Between()
self.min = []
def next(self, psm: PSM):
if psm.char.isdigit():
self.min.append(psm.char)
return self
elif psm.char == ",":
self._interpret()
return MaximumOfRepetition(self)
elif psm.char == "}":
self._interpret()
return self.parent
else:
psm.error = 'expected digit, "," or "}"'
def _interpret(self):
if not self.min:
return
try:
count = int("".join(self.min))
except ValueError:
assert False, "internal error: cannot convert to number minimum of repetition"
self.between.min = count
class MaximumOfRepetition:
def __init__(self, repeat: MinimumOfRepetition):
self.repeat = repeat
self.max = []
def next(self, psm: PSM):
if psm.char.isdigit():
self.max.append(psm.char)
return self
elif psm.char == "}":
self._interpret()
return self.repeat.parent
else:
psm.error = 'expected digit, "," or "}"'
def _interpret(self):
if not self.max:
return
try:
count = int("".join(self.max))
except ValueError:
assert False, "internal error: cannot convert to number maximum of repetition"
self.repeat.between.max = count
#--------------------------------------
# Escaping
class EscapedChar:
def __init__(self, prev, as_single_chars=(), as_pattern_chars=()):
self.prev = prev # ContentOfGroup or CharClass
self.single_chars = as_single_chars
self.pattern_chars = as_pattern_chars
def next(self, psm: PSM):
if psm.char in SpecialPattern.individual_chars \
or psm.char in SpecialPattern.range_chars \
or psm.char in self.pattern_chars:
t = ast.PatternChar()
t.pattern = psm.char
self.prev.add(t)
return self.prev
elif psm.char in self.single_chars:
t = ast.SingleChar()
t.char = psm.char
self.prev.add(t)
return self.prev
elif psm.char == "x":
return AsciiChar(self.prev)
elif psm.char == "u":
return UnicodeChar(self.prev)
else:
psm.error = "unauthorized escape of {}".format(psm.char)
class AsciiChar:
def __init__(self, prev):
self.prev = prev # ContentOfGroup or CharClass
self.pattern = ast.PatternChar()
self.pattern.type = ast.PatternChar.Ascii
self.prev.add(self.pattern)
def next(self, psm: PSM):
if psm.char in string.hexdigits:
self.pattern.pattern += psm.char
count = len(self.pattern.pattern)
return self.prev if count >= 2 else self
else:
psm.error = "expected ASCII hexadecimal character"
class UnicodeChar:
def __init__(self, prev):
self.prev = prev # ContentOfGroup or CharClass
self.pattern = ast.PatternChar()
self.pattern.type = ast.PatternChar.Unicode
self.prev.add(self.pattern)
def next(self, psm: PSM):
if psm.char in string.hexdigits:
self.pattern.pattern += psm.char
count = len(self.pattern.pattern)
return self.prev if count >= 4 else self
else:
psm.error = "expected ASCII hexadecimal character"
#-------------------------------------
# Character | class
class WrappedCharClass:
def __init__(self):
# ast is CharClass or may be changed to PatternClass in one case
| self.ast = ast.CharClass()
def add(self, other):
assert isinstance(self.ast, ast.CharClass)
self.ast.elems = self.ast.elems + (other,)
def pop(self):
assert isinstance(self.ast, ast.CharClass)
last = self.ast.elems[-1]
self.ast.elems = self.ast.elems[:-1]
return last
class CharClass:
def __init__(self, prev):
self.prev = prev # ContentOfGroup or CharClass
self.q = WrappedCharClass()
# forward function
self.add = self.q.add
self.next_is_range = False
self.empty = True
self.can_mutate = True
def next(self, psm: PSM):
this_should_be_range = self.next_is_range
self.next_is_range = False
this_is_empty = self.empty
self.empty = False
if psm.char == "\\":
self.can_mutate = False
self.next_is_range = this_should_be_range
return EscapedChar(self,
as_single_chars=SpecialPattern.restrict_special_chars)
elif this_should_be_range and psm.char != "]":
assert isinstance(self.q.ast, ast.CharClass)
assert len(self.q.ast.elems) >= 1
self.next_is_range = False
t = ast.Range()
t.begin = self.q.pop()
t.end = ast.SingleChar()
t.end.char = psm.char
self.q.add(t)
return self
elif psm.char == "^":
# if at the begining, it has a special meaning
if this_is_empty:
self.can_mutate = False
self.q.ast.negate = True
else:
t = ast.SingleChar()
t.char = psm.char
self.q.add(t)
return self
elif psm.char == "]":
if this_should_be_range:
t = ast.SingleChar()
t.char = "-"
self.q.add(t)
else:
self.mutate_if_posix_like()
self.prev.add(self.q.ast)
return self.prev
elif psm.char == "[":
return CharClass(self)
elif psm.char == "-" and len(self.q.ast.elems) >= 1:
self.next_is_range = True
return self
else:
t = ast.SingleChar()
t.char = psm.char
self.q.add(t)
return self
def mutate_if_posix_like(self):
"""
Change from character class to pattern char if the content is matching
POSIX-like classe.
"""
assert isinstance(self.q.ast, ast.CharClass)
# put in this variable everything that had happen but not saved into
# the single char object
# because mutation is only possible if the exact string of the content
# match a pre-definied list, so if an unlogged char is consumed, it
# must prevent mutation
if not self.can_mutate:
return
if len(self.q.ast.elems) < SpecialPattern.min_len_posix_class + 2:
return
opening = self.q.ast.elems[0]
if not isinstance(opening, ast.SingleChar) or opening.char != ":":
return
closing = self.q.ast.elems[-1]
if not isinstance(closing, ast.SingleChar) or closing.char != ":":
return
is_only_ascii = lambda x: (isinstance(x, ast.SingleChar)
and len(x.char) == 1
an |
ing',
threadHydroWithinB | lock='Within Block Threading',
tmax='Maximum Simulation Time [s]',
updateHydroFluxes='Update Hydro Fluxes',
useDiffuse='Use Diffusive Effects' | ,
useEnergyDeposition='Use Energy Deposition',
useFlame='Use Flame',
useGravity='Use Gravity',
useHydro='Use Hydro Calculation',
useRadTrans='Use Radiative Transfer',
use_cma_advection='Use CMA Advection',
use_cma_flattening='Use CMA Flattening',
ye_burned='Burned ye',
ye_unburned='Unburned ye',
**_fields([
['{}l_boundary_type', '{} Lower Boundary Type'],
['{}r_boundary_type', '{} Upper Boundary Type'],
['{}min', '{} Minimum'],
['{}max', '{} Maximum'],
['nblock{}', 'Blocks in {}'],
], ['x', 'y', 'z']),
**_fields([
['refine_var_{}', 'Name Variable {}'],
['refine_cutoff_{}', 'Refine Variable {}'],
['derefine_cutoff_{}', 'Derefine Variable {}'],
], [str(v) for v in range(1, 7)]),
)
_VIEW_FUNC_PREFIX = '_view_'
def __init__(self):
self._view_fns = PKDict()
for n, o in inspect.getmembers(self):
if n.startswith(self._VIEW_FUNC_PREFIX) and inspect.ismethod(o):
self._view_fns[n[len(self._VIEW_FUNC_PREFIX):]] = o
def update_schema(self, schema):
self._update_labels(schema)
self._update_views(schema)
return schema
def _assert_model_view_fields_exist(self, name, view, schema):
"""Check that model fields in view exist in models"""
def flatten(to_flatten):
def flatten_column(to_flatten):
if isinstance(to_flatten[0], str):
return flatten(to_flatten[1])
res = []
for f in to_flatten:
res += flatten_column(f)
return res
res = []
for f in to_flatten:
if isinstance(f, str):
res.append(f)
continue
assert isinstance(f, list), \
'uknown type f={f}'
res += flatten_column(f)
return res
for f in flatten(view.get('basic', []) + view.get('advanced', [])):
if '.' not in f:
f = f'{name}.{f}'
p = f.split('.')
assert p[0] in schema.model, \
f'model name={p[0]} does not exist in known models={schema.model.keys()}'
assert p[1] in schema.model[p[0]], \
f'field={p[1]} does not exist in model={schema.model[p[0]]} name={p[0]}'
def _get_species_list(self, schema):
res = []
for f in schema.model.Multispecies_MultispeciesMain:
m = re.search(r'eos_(.*)EosType', f)
if m:
res.append(m.group(1))
return res
def _update_labels(self, schema):
labels = self._LABELS.copy()
self._update_sim_labels(schema, labels)
self._update_multispecies_labels(schema, labels)
for m in schema.model.values():
for f in m:
if f not in labels:
continue
info = m[f]
if len(info) == 3:
info.append(f)
elif info[3]:
info[3] = '{} {}'.format(f, info[3])
else:
info[3] = f
info[0] = labels[f]
def _update_multispecies_labels(self, schema, labels):
if 'Multispecies_MultispeciesMain' not in schema.model:
return
for s in self._get_species_list(schema):
for f, label in {
'ms_{}A': 'Number of protons and neutrons in nucleus',
'ms_{}Z': 'Atomic number',
'ms_{}ZMin': 'Minimum allowed average ionization',
'eos_{}EosType': 'EOS type to use for MTMMMT EOS',
'eos_{}SubType': 'EOS subtype to use for MTMMMT EOS',
'ms_{}Gamma': 'Ratio of heat capacities',
'eos_{}TableFile': 'Tabulated EOS file name',
'op_{}Absorb': 'Absorption',
'op_{}Emiss': 'Emission',
'op_{}Trans': 'Transport',
}.items():
labels[f.format(s)] = f'{s.title()} {label}'
def _update_sim_labels(self, schema, labels):
#TODO(pjm): use constant for flashApp model
# special case for main simulation labels - use full description as label
for f, info in schema.model.Simulation_SimulationMain_flashApp.items():
if len(info) > 3:
labels[f] = info[3]
info[3] = ''
def _update_views(self, schema):
for n, f in self._view_fns.items():
if n not in schema.view:
continue
v = f(schema)
if v:
self._assert_model_view_fields_exist(n, v, schema)
schema.view[n].update(v)
def _view_Driver_DriverMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Driver.txt
v = PKDict(
title='Simulation Driver',
advanced=[
['Driver', [
'dr_abortPause',
'dr_dtMinBelowAction',
'dr_dtMinContinue',
'dr_numPosdefVars',
'dr_posdefDtFactor',
'dr_posdefVar_1',
'dr_posdefVar_2',
'dr_posdefVar_3',
'dr_posdefVar_4',
'dr_printTStepLoc',
'dr_shortenLastStepBeforeTMax',
'dr_tstepSlowStartFactor',
'dr_usePosdefComputeDt',
]],
['Drift', [
'drift_break_inst',
'drift_trunc_mantissa',
'drift_tuples',
'drift_verbose_inst',
]],
['Time', [
'wall_clock_time_limit',
'tinitial',
]],
['Timestep', [
'tstep_change_factor',
'nbegin',
'nend',
'useSTS',
'useSTSforDiffusion',
'nuSTS',
'nstepTotalSTS',
]],
['Thread', [
'threadBlockListBuild',
'threadDriverBlockList',
'threadDriverWithinBlock',
'threadRayTraceBuild',
'threadWithinBlockBuild',
]],
['Redshift', [
'zInitial',
'zFinal',
]],
['Other', [
'meshCopyCount',
'sweepOrder',
]],
],
basic=[
'dtinit',
'tmax',
'dtmax',
'dtmin',
'allowDtSTSDominate',
],
)
if 'IO_IOMain' in schema.model:
v.basic.append('IO_IOMain.plotFileIntervalTime')
return v
def _view_physics_Diffuse_DiffuseMain(self, schema):
# http://flash.uchicago.edu/site/flashcode/user_support/rpDoc_4p2.py?submit=rp_Diffuse.txt
v = PKDict(
title='Diffusive Effects',
basic=[
'diff_eleFlMode',
'diff_eleFlCoef',
'dt_diff_factor',
[
['X', [
'diff_eleXlBoundaryType',
'diff_eleXrBoundaryType',
]],
['Y', [
'diff_eleYlBoundaryType',
'diff_eleYrBoundaryType',
]],
['Z', [
'diff_eleZlBoundaryType',
'diff_eleZrBoundaryType',
]]
]
],
)
if ' |
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.zookeeper.serverset.endpoint import ServiceInstance
from twitter.common.zookeeper.serverset.serverset import ServerSet
from twitter.common.zookeeper.group.group_base import GroupInterface, Membership
from twitter.common.zookeeper.group.kazoo_group import ActiveKazooGroup
from kazoo.client import KazooClient
import mock
SERVICE_INSTANCE_JSON = '''{
"additionalEndpoints": {
"aurora": {
"host": "smfd-aki-15-sr1.devel.twitter.com",
"port": 31510
},
"health": {
"host": "smfd-aki-15-sr1.devel.twitter.com",
"port": 31510
}
},
"serviceEndpoint": {
"host": "smfd-aki-15-sr1.devel.twitter.com",
"port": 31510
},
"shard": 0,
"status": "ALIVE"
}'''
@mock.patch('twitter.common.zookeeper.serverset.serverset.ActiveKazooGroup')
@mock.patch('twitter.common.zookeeper.serverset.serverset.validate_group_implementation')
def test_internal_monitor(mock_grou | p_impl_validator, MockActiveKazooGroup):
mock_zk = mock.Mock(spec=KazooClient)
mock_group = mock.MagicMock(spec=GroupInterface)
MockActiveKazooGroup.mock_add_spec(ActiveKazooGroup)
MockActiveKazooGr | oup.return_value = mock_group
# by default it tries to assert that the group impl is a subclass of GroupInterface
# since the group impl will be a mock, it doesn't pass that check, so we mock the validator
# as well.
mock_group_impl_validator.return_value = True
def devnull(*args, **kwargs): pass
serverset = ServerSet(
mock_zk,
'/some/path/to/group',
on_join=devnull,
on_leave=devnull)
members = [Membership(id) for id in range(2)]
print("Members are: %s" % members)
serverset._internal_monitor(frozenset(members))
for call in mock_group.info.mock_calls:
_, (_, callback), _ = call
callback(ServiceInstance.unpack(SERVICE_INSTANCE_JSON))
assert len(serverset._members) == 2
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tf2_msgs/FrameGraphRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FrameGraphRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "tf2_msgs/FrameGraphRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FrameGraphRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message i | nstance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``Stri | ngIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tf2_msgs/FrameGraphResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FrameGraphResponse(genpy.Message):
_md5sum = "437ea58e9463815a0d511c7326b686b0"
_type = "tf2_msgs/FrameGraphResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string frame_yaml
"""
__slots__ = ['frame_yaml']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
frame_yaml
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FrameGraphResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.frame_yaml is None:
self.frame_yaml = ''
else:
self.frame_yaml = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.frame_yaml
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.frame_yaml = str[start:end].decode('utf-8')
else:
self.frame_yaml = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.frame_yaml
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.frame_yaml = str[start:end].decode('utf-8')
else:
self.frame_yaml = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class FrameGraph(object):
_type = 'tf2_msgs/FrameGraph'
_md5sum = '437ea58e9463815a0d511c7326b686b0'
_request_class = FrameGraphRequest
_response_class = FrameGraphResponse
|
#!/usr/bin/env python
import os
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'openclose', serial=True, result="""
# DURATION TID FUNCTION
[ 9875] | main() {
[ 9875] | fopen() {
14.416 us [ 9875] | sys_open();
19.099 us [ 9875] | } /* fopen */
9.720 us [ 9875] | fclose();
37.051 us [ 9875] | } /* main */
""")
def prerun(self, timeout):
if os.geteuid() != 0:
return TestBase.TEST_SKIP
if os.path.exists('/.dockerenv'):
return TestBase.TEST_SKIP
return TestBase.TEST_SUCCESS
# check syscall name would corrected (for SyS_ prefix)
def setup(self):
self.option = "-k -P '_*sys_open@kernel'"
def fixup(self, cflags, result):
uname = os.uname()
# Linux v | 4.17 (x86_64) changed syscall routines
major, minor, release = uname[2].split('.')
if uname[0] == 'Lin | ux' and uname[4] == 'x86_64' and \
int(major) >= 5 or (int(major) == 4 and int(minor) >= 17):
return result.replace(' sys_open', ' __x64_sys_openat')
else:
return result.replace(' sys_open', ' sys_openat')
|
the dictionary, return its value. If not, insert
*k* with a value of *default* and return *default*. *default*
defaults to ``None``. See :meth:`dict.setdefault` for more
information.
"""
if not super(OrderedMultiDict, self).__contains__(k):
self[k] = [] if default is _MISSING else [default]
return default
def copy(self):
"Return a shallow copy of the dictionary."
return self.__class__(self.iteritems(multi=True))
@classmethod
def fromkeys(cls, keys, default=None):
"""Create a dictionary from a list of keys, with all the values
set to *default*, or ``None`` if *default* is not set.
"""
return cls([(k, default) for k in keys])
def update(self, E, **F):
"""Add items from a dictionary or iterable (and/or keyword arguments),
overwriting values under an existing key. See
:meth:`dict.update` for more details.
"""
# E and F are throwback names to the dict() __doc__
if E is self:
return
self_add = self.add
if isinstance(E, OrderedMultiDict):
for k in E:
if k in self:
del self[k]
for k, v in E.iteritems(multi=True):
self_add(k, v)
elif hasattr(E, 'keys'):
for k in E.keys():
self[k] = E[k]
else:
seen = set()
seen_add = seen.add
for k, v in E:
if k not in seen and k in self:
del self[k]
seen_add(k)
self_add(k, v)
for k in F:
self[k] = F[k]
return
def update_extend(self, E, **F):
"""Add items from a dictionary, iterable, and/or keyword
arguments without overwriting existing items present in the
dictionary. Like :meth:`update`, but adds to existing keys
instead of overwriting them.
"""
if E is self:
iterator = iter(E.items())
elif isinstance(E, OrderedMultiDict):
iterator = E.iteritems(multi=True)
elif hasattr(E, 'keys'):
iterator = ((k, E[k]) for k in E.keys())
else:
iterator = E
self_add = self.add
for k, v in iterator:
self_add(k, v)
def __setitem__(self, k, v):
if super(OrderedMultiDict, self).__contains__(k):
self._remove_all(k)
self._insert(k, v)
super(OrderedMultiDict, self).__setitem__(k, [v])
def __getitem__(self, k):
return super(OrderedMultiDict, self).__getitem__(k)[-1]
def __delitem__(self, k):
super(OrderedMultiDict, self).__delitem__(k)
self._remove_all(k)
def __eq__(self, other):
if self is other:
return True
try:
if len(other) != len(self):
return False
except TypeError:
return False
if isinstance(other, OrderedMultiDict):
selfi = self.iteritems(multi=True)
otheri = other.iteritems(multi=True)
zipped_items = izip_longest(selfi, otheri, fillvalue=(None, None))
for (selfk, selfv), (otherk, otherv) in zipped_items:
if selfk != otherk or selfv != otherv:
return False
if not(next(selfi, _MISSING) is _MISSING
and next(otheri, _MISSING) is _MISSING):
# leftovers (TODO: watch for StopIteration?)
return False
return True
elif hasattr(other, 'keys'):
for selfk in self:
try:
other[selfk] == self[selfk]
except KeyError:
return False
return True
return False
def __ne__(self, other):
return not (self == other)
def pop(self, k, default=_MISSING):
"""Remove all values under key *k*, returning the most-recently
inserted value. Raises :exc:`KeyError` if the key is not
present and no *default* is provided.
"""
return self.popall(k, default)[-1]
def popall(self, k, default=_MISSING):
"""Remove all values under key *k*, returning them in the form of
a list. Raises :exc:`KeyError` if the key is not present and no
*default* is provided.
"""
if super(OrderedMultiDict, self).__contains__(k):
self._remove_all(k)
if default is _MISSING:
return super(OrderedMultiDict, self).pop(k)
return super(OrderedMultiDict, self).pop(k, default)
def poplast(self, k=_MISSING, default=_MISSING):
"""Remove and return the most-recently inserted value under the key
*k*, or the most-recently inserted key if *k* is not
provided. If no values remain under *k*, it will be removed
from the OMD. Raises :exc:`KeyError` if *k* is not present in
the dictionary, or the dictionary is empty.
"""
| if k is _MISSING:
if self:
k = self.root[PREV][KEY]
else:
raise KeyError('empty %r' % type(self))
try:
self._remove(k)
except KeyError:
if default is _MISSING:
raise KeyError(k)
| return default
values = super(OrderedMultiDict, self).__getitem__(k)
v = values.pop()
if not values:
super(OrderedMultiDict, self).__delitem__(k)
return v
def _remove(self, k):
values = self._map[k]
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
if not values:
del self._map[k]
def _remove_all(self, k):
values = self._map[k]
while values:
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
del self._map[k]
def iteritems(self, multi=False):
"""Iterate over the OMD's items in insertion order. By default,
yields only the most-recently inserted value for each key. Set
*multi* to ``True`` to get all inserted items.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY], curr[VALUE]
curr = curr[NEXT]
else:
for key in self.iterkeys():
yield key, self[key]
def iterkeys(self, multi=False):
"""Iterate over the OMD's keys in insertion order. By default, yields
each key once, according to the most recent insertion. Set
*multi* to ``True`` to get all keys, including duplicates, in
insertion order.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
else:
yielded = set()
yielded_add = yielded.add
while curr is not root:
k = curr[KEY]
if k not in yielded:
yielded_add(k)
yield k
curr = curr[NEXT]
def itervalues(self, multi=False):
"""Iterate over the OMD's values in insertion order. By default,
yields the most-recently inserted value per unique key. Set
*multi* to ``True`` to get all values according to insertion
order.
"""
for k, v in self.iteritems(multi=multi):
yield v
def todict(self, multi=False):
"""Gets a basic :class:`dict` of the items in this dictionary. Keys
are the same as the OMD, values are the most recently inserted
values for each key.
Setting the *multi* arg to ``True`` is yields the same
result as calling :class:`dict` on the OMD, except that all the
value lists are copies that can be safely mutated.
"""
if multi:
return dict([(k, self.getlist(k)) for k in self])
return dict([(k, self[k]) for k in self])
def sorted(self, key=None, rev |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# ----------------------------------------------------------- | ---------------
from msrest.serialization import Model
class SecurityGroupViewResult(Model):
"""The information about security rules applied to the specified VM.
|
:param network_interfaces: List of network interfaces on the specified VM.
:type network_interfaces:
list[~azure.mgmt.network.v2017_09_01.models.SecurityGroupNetworkInterface]
"""
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[SecurityGroupNetworkInterface]'},
}
def __init__(self, network_interfaces=None):
super(SecurityGroupViewResult, self).__init__()
self.network_interfaces = network_interfaces
|
import unittest
from test import test_support
# Skip this test if the _testcapi module isn't available.
test_support.import_module('_testcapi')
from _testcapi import _test_structmembersType, \
CHAR_MAX, CHAR_MIN, UCHAR_MAX, \
SHRT_MAX, SHRT_MIN, USHRT_MAX, \
INT_MAX, INT_MIN, UINT_MAX, \
LONG_MAX, LONG_MIN, ULONG_MAX, \
LLONG_MAX, LLONG_MIN, ULLONG_MAX
ts=_test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8,
9.99999, 10.1010101010, "hi")
class ReadWriteTests(unittest.TestCase):
def test_bool(self):
ts.T_BOOL = True
self.assertEqual(ts.T_BOOL, True)
ts.T_BOOL = False
self.assertEqual(ts.T_BOOL, False)
self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1)
def test_byte(self):
ts.T_BYTE = CHAR_MAX
self.assertEqual(ts.T_BYTE, CHAR_MAX)
ts.T_BYTE = CHAR_MIN
self.assertEqual(ts.T_BYTE, CHAR_MIN)
ts.T_UBYTE = UCHAR_MAX
self.assertEqual(ts.T_UBYTE, UCHAR_MAX)
def test_short(self):
ts.T_SHORT = SHRT_MAX
self.assertEqual(ts.T_SHORT, SHRT_MAX)
ts.T_SHORT = SHRT_MIN
self.assertEqual(ts.T_SHORT, SHRT_MIN)
ts.T_USHORT = USHRT_MAX
self.assertEqual(ts.T_USHORT, USHRT_MAX)
def test_int(self):
ts.T_INT = INT_MAX
self.assertEqual(ts.T_INT, INT_MAX)
ts.T_INT = INT_MIN
self.assertEqual(ts.T_INT, INT_MIN)
ts.T_UINT = UINT_MAX
self.assertEqual(ts.T_UINT, UINT_MAX)
def test_long(self):
ts.T_LONG = LONG_MAX
self.assertEqual(ts.T_LONG, LONG_MAX)
ts.T_LONG = LONG_MIN
self.assertEqual(ts.T_LONG, LONG_MIN)
ts.T_ULONG = ULONG_MAX
self.assertEqual(ts.T_ULONG, ULONG_MAX)
@unittest.skipUnless(hasattr(ts, "T_LONGLONG"), "long long not present")
def test_longlong(self):
ts.T_LONGLONG = LLONG_MAX
self.assertEqual(ts.T_LONGLONG, LLONG_MAX)
ts.T_LONGLONG = LLONG_MIN
self.assertEqual(ts.T_LONGLONG, LLONG_MIN)
ts.T_ULONGLONG = ULLONG_MAX
self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX)
## make sure these will accept a plain int as well as a long
ts.T_LONGLONG = 3
self.assertEqual(ts.T_LONGLONG, 3)
ts.T_ULONGLONG = 4
self.assertEqual(ts.T_ULONGLONG, 4)
def test_inplace_string(self):
self.assertEqual(ts.T_STRING_INPLACE, "hi")
self.assertRaises(TypeError, setattr, ts, "T_STRING_INPLACE", "s")
self.assertRaises(TypeError, delattr, ts, "T_STRING_INPLACE")
class TestWarnings(unittest.TestCase):
def test_byte_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MAX+1
def test_byte_min(self):
with test_support.check_warnings(('', RuntimeWarning)):
| ts.T_BYTE = CHAR_MIN-1
def test_ubyte_max(self):
with test_support. | check_warnings(('', RuntimeWarning)):
ts.T_UBYTE = UCHAR_MAX+1
def test_short_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MAX+1
def test_short_min(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MIN-1
def test_ushort_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_USHORT = USHRT_MAX+1
def test_main(verbose=None):
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main(verbose=True)
|
# singleton tuple <- singleton tuple
x, = 0,
print(x)
# singleton tuple <- singleton list
x, = [-1]
print(x)
# binary tuple <- binary tuple
x,y = 1,2
print(x,y)
# binary tuple swap
x,y = y,x
print(x,y)
# ternary tuple <- ternary tuple
x, | y,z = 3,4,5
print(x,y,z)
# singleton list <- singleton list
[x] = [42]
print(x)
# singleton list <- singleton tuple
[x] = 43,
print(x)
# binary list <- binary list
[x,y] = [6,7]
# binary list <- binary tuple
[x,y] = [44,45]
print(x,y)
# binary tuple (parens) <- binary list
(x,y) = [7,8]
print | (x,y)
# binary tuple <- result of function call
(x,y) = (lambda: (9,10))()
print(x,y)
# nested binary tuple (parens) <- nested binary tuple (parens)
((x,y),z) = ((11,12),13)
print(x,y,z)
# nested binary tuple <- nested binary tuple
(x,y),z = (14,15),16
print(x,y,z)
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os
from sqoop.conf import SQOOP_CONF_DIR
LOG = logging.getLogger(__name__)
_PROPERTIES_DICT = None
_CONF_SQOOP_AUTHENTICATION_TYPE = 'org.apache.sqoop.security.authentication.type'
def reset():
global _PROPERTIES_DICT
_PROPERTIES_DICT = None
def get_props():
if _PROPERTIES_DICT is None:
_parse_properties()
return _PROPERTIES_DICT
def has_sqoop_has_security():
return get_props().get(_CONF_SQOOP_AUTHENTICATION_TYPE, 'SIMPLE').upper() == 'KERBEROS'
def _parse_properties():
global _PROPERTIES_DICT
properties_file = os.path.join(SQOOP_CONF_DIR.get(), 'sqoop.properties')
_PROPERTIES_DICT = _parse_site(properties_file)
def _parse_site(site_path):
try:
with open(site_path, 'r') as f:
data = f.read()
except IOError as err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (site_path, err))
return
data = ""
return | dict([line.split('=', 1) for line in data.split('\n') if '=' in line and not line.star | tswith('#')])
|
# (c) 2013, Ovais Tariq <me@ovaistariq.net>
#
# This file is part of mha_helper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public | License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# alo | ng with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
__name__ = 'mha_helper'
__author__ = 'Ovais Tariq'
__email__ = 'me@ovaistariq.net'
__version__ = '0.4.2'
__url__ = 'https://github.com/ovaistariq/mha-helper'
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file './acq4/analysis/old/StdpCtrlTemplate.ui'
#
# Created: Tue Dec 24 01:49:15 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_StdpCtrlWidget(object):
def setupUi(self, StdpCtrlWidget):
StdpCtrlWidget.setObjectName(_fromUtf8("StdpCtrlWidget"))
StdpCtrlWidget.resize(227, 321)
self.gridLayout = QtGui.QGridLayout(StdpCtrlWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(StdpCtrlWidget)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.thresholdSpin = QtGui.QDoubleSpinBox(StdpCtrlWidget)
self.thresholdSpin.setObjectName(_fromUtf8("thresholdSpin"))
self.gridLayout.addWidget(self.thresholdSpin, 0, 1, 1, 2)
self.label_2 = QtGui.QLabel(StdpCtrlWidget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.durationSpin = QtGui.QSpinBox(StdpCtrlWidget)
self.durationSpin.setObjectName(_fromUtf8("durationSpin"))
se | lf.gridLayout.addWidget(self.durationSpin, 1, 1, 1, 2)
self.label_4 = QtGui.QLabel(StdpCtrlWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.slopeWidthSpin = SpinBox(StdpCtrlWidget)
self.slopeWidthSpin.setObjectName(_fromUtf8("slopeWidthSpin"))
self.gridLayout.addWidget(self.slopeWidthSpin, 2, 1, 1, 2)
self.apExclusionChe | ck = QtGui.QCheckBox(StdpCtrlWidget)
self.apExclusionCheck.setObjectName(_fromUtf8("apExclusionCheck"))
self.gridLayout.addWidget(self.apExclusionCheck, 3, 0, 1, 1)
self.label_3 = QtGui.QLabel(StdpCtrlWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 2)
self.apthresholdSpin = QtGui.QDoubleSpinBox(StdpCtrlWidget)
self.apthresholdSpin.setObjectName(_fromUtf8("apthresholdSpin"))
self.gridLayout.addWidget(self.apthresholdSpin, 4, 2, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 5, 0, 1, 1)
self.retranslateUi(StdpCtrlWidget)
QtCore.QMetaObject.connectSlotsByName(StdpCtrlWidget)
def retranslateUi(self, StdpCtrlWidget):
StdpCtrlWidget.setWindowTitle(_translate("StdpCtrlWidget", "Form", None))
self.label.setText(_translate("StdpCtrlWidget", "PspThreshold:", None))
self.label_2.setText(_translate("StdpCtrlWidget", "Post-stim Duration (ms):", None))
self.label_4.setText(_translate("StdpCtrlWidget", "Slope width:", None))
self.apExclusionCheck.setText(_translate("StdpCtrlWidget", "Exclude APs", None))
self.label_3.setText(_translate("StdpCtrlWidget", "Exclusion Threshold (mV):", None))
from SpinBox import SpinBox
|
# c | oding: utf-8
class ActMail(object):
subject = '宝宝云帐号注册激活邮件'
content = '感谢您申请注册宝宝云账号! 请点击链接完成注册: '
source = 'enfancemill@gmail.com'
secret = 'kwhr8xkq4eoq6bvvh6nv | r6267lrdcqb2'
|
"""Update vulnerability sources."""
from selinon import StoragePool
from f8a_worker.base import BaseTask
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Ecosystem
from f8a_worker.solver import get_ecosystem_solver, OSSIndexDependencyParser
from f8a_worker.workers import CVEcheckerTask
class CVEDBSyncTask(BaseTask):
"""Update vulnerability sources."""
def components_to_scan(self, previous_sync_timestamp, only_already_scanned):
"""Get EPV that were recently updated in OSS Index, so they can contain new vulnerabilities.
Get components (e:p:v) that were recently (since previous_sync_timestamp) updated
in OSS Index, which means that they can contain new vulnerabilities.
:param previous_sync_timestamp: timestamp of previous check
:param only_already_scanned: include already scanned components only
:return: generator of e:p:v
"""
# TODO: reduce cyclomatic complexity
to_scan = []
rdb = StoragePool.get_connected_storage('BayesianPostgres')
for ecosystem in ['nuget']:
ecosystem_solver = get_ecosystem_solver(self.storage.get_ecosystem(ecosystem),
with_parser=OSSIndexDependencyParser())
self.log.debug("Retrieving new %s vulnerabilities from OSS Index", ecosystem)
ossindex_updated_packages = CVEcheckerTask.\
query_ossindex_vulnerability_fromtill(ecosystem=ecosystem,
from_time=previous_sync_timestamp)
for ossindex_updated_package in ossindex_updated_packages:
if Ecosystem.by_name(rdb.session, ecosystem).is_backed_by(EcosystemBackend.maven):
package_name = "{g}:{n}".format(g=ossindex_updated_package['group'],
n=ossindex_updated_package['name'])
else:
package_name = ossindex_updated_package['name']
package_affected_versions = set()
for vulnerability in ossindex_updated_package.get('vulnerabilities', []):
for version_string in vulnerability.get('versions', []):
try:
resolved_versions = ecosystem_solver.\
solve(["{} {}".format(package_name, version_string)],
all_versions=True)
except Exception:
self.log.exception("Failed to resolve %r for %s:%s", version_string,
ecosystem, package_name)
continue
resolved_versions = resolved_versions.get(package_name, [])
if only_already_scanned:
already_scanned_versions =\
[ver for ver in resolved_versions if
self.storage.get_analysis_count(ecosystem, package_name, ver) > 0]
package_affected_versions.update(already_scanned_versions)
else:
package_affected_versions.update(resolved_versions)
for version in package_affected_versions:
to_scan.append({
'ecosystem': ecosystem,
'name': package_name,
'v | ersion': version
})
msg = "Components to be {prefix}scanned for vulnerabilities: {components}".\
format(prefix="re-" if only_already_s | canned else "",
components=to_scan)
self.log.info(msg)
return to_scan
def execute(self, arguments):
"""Start the task.
:param arguments: optional argument 'only_already_scanned' to run only
on already analysed packages
:return: EPV dict describing which packages should be analysed
"""
only_already_scanned = arguments.pop('only_already_scanned', True) if arguments else True
ignore_modification_time = (arguments.pop('ignore_modification_time', False)
if arguments else False)
CVEcheckerTask.update_victims_cve_db_on_s3()
self.log.debug('Updating sync associated metadata')
s3 = StoragePool.get_connected_storage('S3VulnDB')
previous_sync_timestamp = s3.update_sync_date()
if ignore_modification_time:
previous_sync_timestamp = 0
# get components which might have new vulnerabilities since previous sync
to_scan = self.components_to_scan(previous_sync_timestamp, only_already_scanned)
return {'modified': to_scan}
|
to read from and write to the SQLite backend.
"""
import sqlite3
import codecs
import os
import re
def setup_db():
dbpath = os.path.dirname(os.path.realpath(__file__)) + os.sep +".."+os.sep+"gitdox.db"
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
# Drop tables if they exist
cur.execute("DROP TABLE IF EXISTS docs")
cur.execute("DROP TABLE IF EXISTS users")
cur.execute("DROP TABLE IF EXISTS metadata")
cur.execute("DROP TABLE IF EXISTS validate")
conn.commit()
# Create tables
#user table not used
#cur.execute('''CREATE TABLE IF NOT EXISTS users
# (id INTEGER PRIMARY KEY AUTOINCREMENT, username text)''')
#docs table
cur.execute('''CREATE TABLE IF NOT EXISTS docs
(id INTEGER PRIMARY KEY AUTOINCREMENT, name text, corpus text, status text,assignee_username text ,filename text, content text, mode text, schema text, validation text, timestamp text, cache text)''')
#metadata table
cur.execute('''CREATE TABLE IF NOT EXISTS metadata
(docid INTEGER, metaid INTEGER PRIMARY KEY AUTOINCREMENT, key text, value text, corpus_meta text, UNIQUE (docid, metaid) ON CONFLICT REPLACE, UNIQUE (docid, key) ON CONFLICT REPLACE)''')
#validation table
cur.execute('''CREATE TABLE IF NOT EXISTS validate
(doc text, corpus text, domain text, name text, operator text, argument text, id INTEGER PRIMARY KEY AUTOINCREMENT)''')
conn.commit()
conn.close()
def create_document(doc_id, name, corpus, status, assigned_username, filename, content,mode="xml", schema='--none--'):
generic_query("INSERT INTO docs(id, name,corpus,status,assignee_username,filename,content,mode,schema) VALUES(?,?,?,?,?,?,?,'xml',?)",
(int(doc_id), name, corpus, status, assigned_username, filename, content, schema))
def generic_query(sql, params, return_new_id=False):
# generic_query("DELETE FROM rst_nodes WHERE doc=? and project=?",(doc,project))
dbpath = os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep + "gitdox.db"
conn = sqlite3.connect(dbpath)
with conn:
cur = conn.cursor()
if params is not None:
cur.execute(sql,params)
else:
cur.execute(sql)
if return_new_id:
return cur.lastrowid
else:
rows = cur.fetchall()
return rows
def invalidate_doc_by_name(doc,corpus):
generic_query("UPDATE docs SET validation=NULL WHERE name like ? and corpus like ?", (doc, corpus))
def invalidate_ether_docs(doc,corpus):
generic_query("UPDATE docs SET validation=NULL WHERE name like ? and corpus like ? and mode = 'ether'", (doc, corpus))
def invalidate_doc_by_id(id):
generic_query("UPDATE docs SET validation=NULL WHERE id=?", (id,))
def doc_exists(doc,corpus):
res = generic_query("SELECT name from docs where name=? and corpus=?",(doc,corpus))
return len(res) > 0
def save_changes(id,content):
"""save change from the editor"""
generic_query("UPDATE docs SET content=? WHERE id=?",(content,id))
invalidate_doc_by_id(id)
def update_assignee(doc_id,user_name):
generic_query("UPDATE docs SET assignee_username=? WHERE id=?",(user_name,doc_id))
def update_status(id,status):
generic_query("UPDATE docs SET status=? WHERE id=?",(status,id))
def update_docname(id,docname):
generic_query("UPDATE docs SET name=? WHERE id=?",(docname,id))
invalidate_doc_by_id(id)
def update_filename(id,filename):
generic_query("UPDATE docs SET filename=? WHERE id=?",(filename,id))
def update_corpus(id,corpusname):
generic_query("UPDATE docs SET corpus=? WHERE id=?",(corpusname,id))
invalidate_doc_by_id(id)
def update_mode(id,mode):
generic_query("UPDATE docs SET mode=? WHERE id=?",(mode,id))
def update_schema(id, schema):
generic_query("UPDATE docs SET schema=? WHERE id=?", (schema, id))
def delete_doc(id):
generic_query("DELETE FROM docs WHERE id=?",(id,))
generic_query("DELETE FROM metadata WHERE docid=?", (id,))
def cell(text):
if isinstance(text, int):
text = str(text)
return "\n <td>" + text + "</td>"
def update_meta(meta_id,doc_id,key,value,corpus=False):
if corpus:
_, corpus_name, _, _, _, _, _ = get_doc_info(doc_id)
generic_query("REPLACE INTO metadata(metaid,docid,key,value,corpus_meta) VALUES(?,?,?,?,?)", (meta_id, None, key, value,corpus_name))
else:
generic_query("REPLACE INTO metadata(metaid,docid,key,value,corpus_meta) VALUES(?,?,?,?,?)",(meta_id,doc_id,key,value,None))
invalidate_doc_by_id(doc_id)
def save_meta(doc_id,key,value,corpus=False):
if corpus:
_, corpus_name, _, _, _, _, _ = get_doc_info(doc_id)
new_id = generic_query("REPLACE INTO metadata(docid,key,value,corpus_meta) VALUES(?,?,?,?)", (None, key, value,corpus_name), return_new_id = True)
else:
new_id = generic_query("INSERT OR REPLACE INTO metadata(docid,key,value,corpus_meta) VALUES(?,?,?,?)",(doc_id,key,value,None), return_new_id = True)
invalidate_doc_by_id(doc_id)
return new_id
def delete_meta(metaid, doc_id, corpus=False):
gener | ic_query("DELETE FROM metadata WHERE metaid=?", (metaid,))
if not corpus:
invalidate_doc_by_id(doc_id)
def get_doc_info(doc_id):
res = generic_query("SELECT name,corpus,filename,status,assignee_username,mode,schema FROM docs WHERE id=?", (int(doc_id),))
if len(res) > 0:
return res[0]
else:
return res
def get_doc_content(doc_id):
res = generic_query("SELECT content FROM docs WHERE id=?", (int(doc_id),))
return res[0][0]
def get_all_doc_ids_for_corpus(corpus):
return map(lambda | x: x[0],
generic_query("SELECT id FROM docs WHERE corpus=?", (corpus,)))
def get_all_docs(corpus=None, status=None):
if corpus is None:
if status is None:
return generic_query("SELECT id, name, corpus, mode, content FROM docs", None)
else:
return generic_query("SELECT id, name, corpus, mode, content FROM docs where status=?", (status,))
else:
if status is None:
return generic_query("SELECT id, name, corpus, mode, content FROM docs where corpus=?", (corpus,))
else:
return generic_query("SELECT id, name, corpus, mode, content FROM docs where corpus=? and status=?", (corpus, status))
def get_doc_meta(doc_id, corpus=False):
if corpus:
fields = get_doc_info(doc_id)
if len(fields) > 0:
_, corpus_name, _, _, _, _, _ = fields
return generic_query("SELECT * FROM metadata WHERE corpus_meta=? ORDER BY key COLLATE NOCASE",(corpus_name,))
else:
return []
else:
return generic_query("SELECT * FROM metadata WHERE docid=? ORDER BY key COLLATE NOCASE", (int(doc_id),))
def get_corpora():
return generic_query("SELECT DISTINCT corpus FROM docs ORDER BY corpus COLLATE NOCASE", None)
def get_validate_rules(sort=None, domain=None):
query = "SELECT corpus, doc, domain, name, operator, argument, id FROM validate"
args = []
if domain:
query += " WHERE domain=? "
args.append(domain)
if sort:
query += " ORDER BY " + sort
return generic_query(query, args)
def get_rule_domain(id):
return generic_query("SELECT domain FROM validate WHERE id=?", (id,))[0][0]
def get_xml_rules():
return get_validate_rules(domain='xml')
def get_meta_rules():
return get_validate_rules(domain='meta')
def get_ether_rules():
return get_validate_rules(domain='ether')
def get_export_rules():
return get_validate_rules(domain='export')
def create_validate_rule(doc, corpus, domain, name, operator, argument):
new_id = generic_query("INSERT INTO validate(doc,corpus,domain,name,operator,argument) VALUES(?,?,?,?,?,?)", (doc, corpus, domain, name, operator, argument), return_new_id = True)
if domain == "meta":
invalidate_doc_by_name("%","%")
else:
invalidate_ether_docs("%","%")
return new_id
def delete_validate_rule(id):
generic_query("DELETE FROM validate WHERE id=?", (int(id),))
invalidate_doc_by_name("%", "%")
def update_validate_rule(doc, corpus, domain, name, operator, argument, id):
generic_query("UPDATE validate SET doc = ?, corpus = ?, domain = ?, name = ?, operator = ?, argument = ? WHERE id = ?",(doc, corpus, domain, name, operator, argument, id))
if domain == "meta":
invalidate_doc_by_name("%", "%")
else:
invalidate_ether_docs("%", "%")
def update_validation(doc_id,validation):
generic_query("UPDATE docs SET validation=? where id=?",(validation,doc_id))
def update_timestamp(doc_id, timestamp):
generic_query("UPDATE docs SET timesta |
impor | t pcf8591read
"""
This is a barebones script for controlling the work flow of recording EMG words and associating the data with a specific word, captured in the filename.
"""
reader = pcf8591read.adc_reader()
f | ilename = input('Current word:')
reader.record = True
reader.run(filename)
|
im | port _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="reversescale", parent_name="volume", **kwargs):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
| )
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handle lease database updates from DHCP servers.
"""
from __future__ import print_function
import os
import sys
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from nova.conductor import rpcapi as conductor_rpcapi
import nova.conf
from nova import config
from nova import context
import nova.db.api
from nova import exception
from nova.i18n import _LE, _LW
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as objects_base
from nova import rpc
CONF = nova.conf.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('network_manager', 'nova.service')
LOG = logging.getLogger(__name__)
def add_lease(mac, ip_address):
"""Set the IP that was assigned by the DHCP server."""
api = network_rpcapi.NetworkAPI()
api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host)
def old_lease(mac, ip_address):
"""Called when an old lease is recognized."""
# NOTE(vish): We assume we heard about this lease the first time.
# If not, we will get it the next time the lease is
# renewed.
pass
def del_lease(mac, ip_address):
"""Called when a lease expires."""
api = network_rpcapi.NetworkAPI()
api.release_fixed_ip(context.get_admin_context(), ip_address,
CONF.host, mac)
def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
network = objects.Network.get_by_id(ctxt, network_id)
network_manager = importutils.import_object(CONF.network_manager)
return network_manager.get_dhcp_leases(ctxt, network)
def add_action_parsers(subparsers):
subparsers.add_parser('init')
# NOTE(cfb): dnsmasq always passes mac, and ip. hostname
# is passed if known. We don't care about
# hostname, but argparse will complain if we
# do not accept it.
for action in ['add', 'del', 'old']:
parser = subparsers.add_parser(action)
parser.add_argument('mac')
parser.add_argument('ip')
parser.add_argument('hostname', nargs='?', default='')
parser.set_defaults(func=globals()[action + '_lease'])
CONF.register_cli_opt(
cfg.SubCommandOpt('action',
title='Action options',
help='Available dhcpbridge options',
handler=add_action_parsers))
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG.error(_LE('No db access allowed in nova-dhcpbridge: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-dhcpbridge')
nova.db.api.IMPL = NoDB()
def main():
"""Parse environment and arguments and call the appropriate action."""
config.parse_args(sys.argv,
default_config_files=jsonutils.loads(os.environ['CONFIG_FILE']))
logging.setup(CONF, "nova")
global LOG
LOG = logging.getLogger('nova.dhcpbridge')
if CONF.action.name == 'old':
# NOTE(sdague): old is the most frequent message sent, and
# it's a noop. We should just exit immediately otherwise we
# can stack up a bunch of requests in dnsmasq. A SIGHUP seems
# to dump this list, so actions queued up get lost.
return
objects.register_all()
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
else:
LOG.warning(_LW('Conductor local mode i | s deprecated and will '
'be removed in a subsequent release'))
if CONF.action.name in ['add', 'del']:
LOG.debug("Called '%(action)s' for mac '%(mac)s' with IP '%(ip)s'",
{"action": CONF.action.name,
"mac": CONF.action.mac,
"ip": CONF.action.ip})
CONF.action.func(CONF.action.mac, CONF.action.ip)
else:
tr | y:
network_id = int(os.environ.get('NETWORK_ID'))
except TypeError:
LOG.error(_LE("Environment variable 'NETWORK_ID' must be set."))
return(1)
print(init_leases(network_id))
rpc.cleanup()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova.db import api as db
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class VirtCPUModel(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'arch': fields.ArchitectureField(nullable=True),
'vendor': fields.StringField(nullable=True),
'topology': fields.ObjectField('VirtCPUTopology',
nullable=True),
'features': fields.ListOfObjectsField("VirtCPUFeature",
default=[]),
'mode': fields.CPUModeField(nullable=True),
'model': fields.StringField(nullable=True),
'match': fields.CPUMatchField(nullable=True),
}
def obj_load_attr(self, attrname):
setattr(self, attrname, None)
def to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
@classmethod
def from_json(cls, jsonstr):
return cls.obj_from_primitive(jsonutils.loads(jsonstr))
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['vcpu_model'])
if not db_extra or not db_extra['vcpu_model']:
return None
return cls.obj_from_primitive(j | sonutils.loads(db_extra['vcpu_model']))
@base.NovaObjectRegistry.register
class VirtCPUFeature(base.NovaObject):
VERSION = '1.0'
fields = {
'policy': fields.CPUFeaturePolicyField(nullable=True),
| 'name': fields.StringField(nullable=False),
}
def obj_load_attr(self, attrname):
setattr(self, attrname, None)
|
# -*- coding: utf-8 -*-
'''
accpy.simulate.const
Felix Kramer (felix.kramer@physik.hu-berlin.de)
'''
# ratio of circumference to diameter of circle
pi = 3.141592653589793115997963468544185161590576171875
# speed of light----------------------------------------/ (m/s)
cl = 299792458.
# elementary charge-------------------------------------/ (e)=(As)=(C)
qe = 1.602176565e-19
# electron mass-----------------------------------------/ (kg)
me = 9.10938291e-31
# proton mass-------------------------------------------/ (kg)
mp = 1.672621777e-27
# muon mass---------------------------------------------/ (kg)
mu = 1.883531475e-28
# electron restenergy-----------------------------------/ (J)=(Nm)=(Ws)
Ee = me*cl**2
# proton restenergy-------------------------------------/ (J)=(Nm)=(Ws)
Ep = mp*cl**2
# muon restenergy----- | ----------------------------------/ (J)=(Nm)=(Ws)
Eu = mu*cl**2
# classical radius of electron--------------------------/ (m)
re = qe**2/(me*1e7)
# classical radius of proton----------------------------/ (m)
rp = qe**2/(mp*1e7)
# classical radius of muon------------------------------/ (m)
ru = qe**2/(mu*1e7)
# vacuum permeability / magnetic field contant----------/ (N/A^2 | )=(Henry/m)
u0 = 4*pi*1E-7
# vacuum permittivity / elektrical field const----------/ (As/Vm)
e0 = 1/(u0*cl**2)
# Planck constant---------------------------------------/ (Js)
hp = 6.62606957e-34
# reduced Planck constant-------------------------------/ (Js)
hb = hp/2/pi
# Boltzmann constant------------------------------------/ (J/K)
kb = 1.3806488e-23
# Avogadro number---------------------------------------/ (1/mole)
NA = 6.02214129e23
# gas constant------------------------------------------/ (J/Kmole)
RG = kb*NA
# gravitational constant--------------------------------/ (Nm^2/kg^2)
Gr = 6.67384e-11
# gravitational acceleration Berlin---------------------/ (m/s^2)
ge = 9.812753
|
import visualizer as v
import os as os
import json
from shutil import rmtree
from os.path import expanduser
from os.path import join as OSpathJoin
global appLoc
appLoc = "C:\Users\john\Desktop\jDesigner"#C:\\Program Files\\aggendo\\jCode\\"
configFold = "C:\Users\john\Desktop\jDesigner\config"
recent = ""
canvas = ""
shortcuts | = ""
prefs = ""
#jDe = json.JSONDecoder
recF = file
prefF = file
tempFold = "C:\\Users\\john\\Desktop\\jDesigner\\temp"
def genPaths():
partialPath = v.__file__ #get the path to visualizer.pyc
partialPath = partialPath[:-15] #strip away the /visualizer.pyc
appLoc = partialPath
configFold = OSpathJoin(partialPath, "config")
tempFold = OSpathJoin(partialPath, "temp")
print(partialPath)
genPaths()
def getTemp():
return(tempFold) #TODO make tempFold configurable
def singleton(cls) | :
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
@singleton
class MyClass:
appLoc = ""
def __init__():
if not os.path.exists(appLoc):
os.makedirs(appLoc)
def getRecent():
recF = open(recent, "r")
jRec = json.load(recF)
recF.close()
return(jRec)
#TODO has something that add the path to identical filenames in drop down
def lastFile():
recF = open(recent, "r")
jRec = json.loads(recF.read())
recF.close()
if(len(jRec)!=0):
return(jRec[0]["name"])
else:
return(None)
def addRecent(name, path):
recF = open(recent, "r")
jDe = json.JSONDecoder()
jRec = jDe.decode(recF.read())
recF.close()
del jDe
for dic in jRec:
if(dic["path"]==path):
jRec.remove(dic)
break
#if(jRec.count({name: path})==1):
#jRec.remove({name: path})
if(len(jRec)==getSetting('recents')):
del jRec[getSetting('recents')-1]
jRec.insert(0, {"name": name, "path": path})
recF = open(recent, "w+")
recF.write(json.dumps(jRec, sort_keys=True, indent=4, separators=(',', ': ')))
recF.close()
del jRec
def printRecents():
recF = open(recent, "r")
print(recF.read())
recF.close()
def getSetting(name):
global prefs
prefF = open(prefs, "r")
jPref = json.loads(prefF.read())
prefF.close
return(jPref[name])
def storeSetting(name, value):
prefF = open(prefs, "r")
jPref = json.loads(prefF.read())
prefF.close()
prefF = open(prefs, "w+")
jPref[name] = value
prefF.write(json.dumps(jPref, sort_keys=True, indent=4, separators=(',', ': ')))
prefF.close()
def createDefaultPrefs():
jPref = {}
jPref['recents'] = 3
jPref['sizeX'] = 700
jPref['sizeY'] = 600
jPref['defaultBitSize'] = 3.3
jPref["defaultFolder"] = expanduser("~")
jPref['defaultHomingPos'] = 0 #0=bot left, 1 = bot right, 2 = top left 3=top right
return(json.dumps(jPref, sort_keys=True, indent=4, separators=(',', ': ')))
def createRecentsList():
ret = [{'file.j':'location'},]
return(ret)
def __init__():
global recent
global canvas
global shortcuts
global prefs
if not os.path.exists(appLoc):
print("creating files")
os.makedirs(appLoc)
if not os.path.exists(configFold):
os.mkdir(configFold)#{"filename": "filepath"},{"otherName": "filepath"}
recent = open(os.path.join(configFold, "recent.json"), "w+")
recent.write('[]')# indent=4))# separtors=(',', ': ')))
canvas = open(os.path.join(configFold, "canvas.json"), "w+")
shortcuts = open(os.path.join(configFold, "shortcuts.json"), "w+")
prefs = open(os.path.join(configFold, "prefs.json"), "w+")
prefs.write(createDefaultPrefs())
#files are created
recent.close()
canvas.close()
shortcuts.close()
prefs.close()
recent = os.path.join(configFold, "recent.json")
canvas = os.path.join(configFold, "canvas.json")
shortcuts = os.path.join(configFold, "shortcuts.json")
prefs = os.path.join(configFold, "prefs.json")
def __kill__():
global recF
global prefF
global recent
global canvas
global shortcuts
global prefs
try:
recF.close()
except:
pass
try:
canvas.close()
except:
pass
try:
shortcuts.close()
except:
pass
try:
prefF.close()
except:
pass
del recent
del canvas
del shortcuts
del prefs
del prefF
del recF
#del jDe
if(__name__=="__main__"):
#singleton();
try:
rmtree(configFold)
except:
pass
try:
__init__()
addRecent('test file', 'testLoc')
addRecent('test2', 'loc')
addRecent('test3', 'loc1')
addRecent('test3', 'loc1')
printRecents()
__kill__()
except Exception as inst:
# print type(inst) # the exception instance
# print inst.args # arguments stored in .args
# print inst # __str__ allows args to be printed directly
__kill__()
raise
if(prefs==""):
try:
__init__() #find a way to use with statements to close files
except Exception as inst:
# print type(inst) # the exception instance
# print inst.args # arguments stored in .args
# print inst # __str__ allows args to be printed directly
__kill__()
raise
|
from django.db import migrations
class Migration(migrations.Migration):
"""
We're changing the stream deactivation process to make it mark all messages
in the stream as read. For things to be consistent with streams that have been
deactivated before this change, we need a migration to fix those old streams,
to have all messages marked as read.
"""
dependencies = [
('zerver', '0300_add_attachment_is_web_public'),
]
operations = [
migrations.RunSQL(
| sql="""
UPDATE zerver_usermessage SET flags = flags | 1
FROM zerver_message
| INNER JOIN zerver_stream ON zerver_stream.recipient_id = zerver_message.recipient_id
WHERE zerver_message.id = zerver_usermessage.message_id
AND zerver_stream.deactivated;
""",
reverse_sql="",
),
]
|
from utils import url_xpath, State
from .people import DEPersonScraper
from .bills import DEBillScraper
# from .events import DEEventScraper
# from .committees import DECommitteeScraper
class Delaware(State):
scrapers = {
"people": DEPersonScraper,
"bills": DEBillScraper,
# 'events': DEEventScraper,
# 'committees': DECommitteeScraper,
}
legislative_sessions = [
{
"_scraped_name": "1998 - 2000 (GA 140)",
"identifier": "140",
"name": "140th General Assembly (1999-2000)",
"start_date": "1999-01-05",
"end_date": "2001-01-01",
},
{
"_scraped_name": "2000 - 2002 (GA 141)",
"identifier": "141",
"name": "141st General Assembly (2001-2002)",
"start_date": "2001-01-02",
"end_date": "2003-01-01",
},
{
"_scraped_name": "2002 - 2004 (GA 142)",
"identifier": "142",
"name": "142nd General Assembly (2003-2004)",
"start_date": "2003-01-07",
"end_date": "2005-01-01",
},
{
"_scraped_name": "2004 - 2006 (GA 143)",
"identifier": "143",
"name": "143rd General Assembly (2005-2006)",
"start_date": "2005-01-04",
"end_date": "2007-01-01",
},
{
"_scraped_name": "2006 - 2008 (GA 144)",
"identifier": "144",
"name": "144th General Assembly (2007-2008)",
"start_date": "2007-01-09",
"end_date": "2009-01-01",
},
{
"_scraped_name": "2008 - 2010 (GA 145)",
"identifier": "145",
"name": "145th General Assembly (2009-2010)",
"start_date": "2009-01-06",
"end_date": "2010-05-05",
},
{
"_scraped_name": "2010 - 2012 (GA 146)",
"identifier": "146",
"name": "146th General Assembly (2011-2012)",
"start_date": "2011-01-05",
"end_date": "2012-05-09",
},
{
"_scraped_name": "2012 - 2014 (GA 147)",
"identifier": "147",
"name": "147th General Assembly (2013-2014)",
"start_date": "2013-01-09",
"end_date": "2014-05-07",
},
{
"_scraped_name": "2014 - 2016 (GA 148)",
"identifier": "148",
"name": "148th General Assembly (2015-2016)",
"start_date": "2015-01-07",
"end_date": "2016-05-04",
},
{
"_scraped_name": "2016 - 2018 (GA 149)",
"identifier": "149",
"name": "149th General Assembly (2017-2018)",
"start_date": "2017-01-10",
"end_date": "2018-05-09",
},
| {
"_scraped_name": "2018 - 2020 (GA 150)",
"identifier": "150",
"name": "150th General Assembly (2019-2020)",
"start_date": "2019-01-08",
"end_date": "2020-05-06",
},
# {
# "_scraped_name": "2020 - 2022 (GA 151)",
# "identifier": "151",
# "name": "151st General Assembly (2020-2022)",
# "start_date": "2021-01-12",
| # "end_date": "2022-05-06",
# },
]
ignored_scraped_sessions = [
"2020 - 2022 (GA 151)"
]
def get_session_list(self):
url = "https://legis.delaware.gov/"
sessions = url_xpath(url, '//select[@id="billSearchGARefiner"]/option/text()')
sessions = [session.strip() for session in sessions if session.strip()]
return sessions
|
from django import forms
from django.contrib.gis.geos import Point
from widgets import AddAnotherWidgetWrapper
from django.core.exceptions import ValidationError
from .models import (Site, CycleResultSet, Monitor, ProgrammeResources,
ProgrammeImage)
class SiteForm(forms.ModelForm):
latitude = forms.DecimalField(
min_value=-90,
max_value=90,
required=False,
)
longitude = forms.DecimalField(
min_value=-180,
max_value=180,
required=False,
)
class Meta(object):
model = Site
exclude = []
widgets = {'coordinates': forms.HiddenInput()}
def __init__(self, *args, **kwargs):
if args: # If args exist
data = args[0]
if data['latitude'] and data['longitude']:
latitude = float(data['latitude'])
longitude = float(data['longitude'])
data['coordinates'] = Point(longitude, latitude)
if 'instance' in kwargs and kwargs['instance'] is not None and kwargs['instance'].coordinates:
coordinates = kwargs['instance'].coordinates.tuple
initial = kwargs.get('initial', {})
initial['longitude'] = coordinates[0]
initial['latitude'] = coordinates[1]
kwargs['initial'] = initial
super(SiteForm, self).__init__(*args, **kwargs)
class CycleResultSetForm(forms.ModelForm):
site_option_name = forms.CharField(widget=forms.TextInput)
class Meta(object):
model = CycleResultSet
exclude = []
def __init__(self, *args, **kwargs):
super(CycleResultSetForm, self).__init__(*args, **kwargs)
crs = kwargs.get('instance', None)
if crs:
partner = crs.partner
else:
partner = None
self.fields['monitors'].queryset = Monitor.objects.filter(
partner=partner)
self.fields[
'site_option_name'].help_text = "This is the name of the option for this site in the form, e.g. for 'Folweni clinic' it's probably 'folweni' (without the single quotes). You can find the names of options in the relevant Survey admin page."
class CRSFromKoboForm(forms.Form):
def __init__(self, *args, **kwargs):
facilities = kwargs.pop('facilities')
super(CRSFromKoboForm, self).__init__(*args, **kwargs)
for i, facility in enumerate(facilities):
crs_field = forms.ModelChoiceField(
queryset=CycleResultSet.objects.order_by('site__name').all(),
label=facility['label'])
crs_field.widget = AddAnotherWidgetWrapper(crs_field.widget,
CycleResultSet)
self.fields['crs_%d' % i] = crs_field
self.fields['facility_%d' % i] = forms.CharField(
widget=forms.HiddenInput(), initial=facility['name'])
self.fields['num_facilities'] = forms.CharField(
widget=forms.HiddenInput(), initial=len(facilities))
class ProgrammeResourcesForm(forms.ModelForm):
class Meta:
model = ProgrammeResources
exclude = ('document_extension', )
def clean(self):
link = self.cleaned_data.get('link')
document = self.cleaned_data.get('document')
order_no = self.cleaned_data.get('order')
resource = self.cleaned_data.get('resource')
programme = self.cleaned_data.get('programme')
if resource.name == 'Link' and link is None:
raise ValidationError('Enter a link')
if resource.name == 'Reports' and document is None:
raise ValidationError('Upload a document')
if resource.name == 'Survey Instrument' and document is None:
raise ValidationError('Upload a document')
if link and document:
raise ValidationError(
"You cant have an External link and a Document")
if ProgrammeResources.objects.filter(
order=order_no, resource=resource,
programme=programme).exists():
raise ValidationError(
'A Resource already exists for this order number')
if resource.name == 'Links' and document:
raise ValidationError(
'A resource of type Link cannot have a document, expecting a link'
)
if resource.name == 'Repor | ts' and link:
raise ValidationError(
'A resource of type Reports cannot have a link, expecting a document'
)
if resource.name == 'Survey Instrument' and link:
raise ValidationError(
'A resource of type Survey Instrument cannot have a link, expecting a document'
)
return self.cl | eaned_data
class ProgrammeImageForm(forms.ModelForm):
class Meta:
model = ProgrammeImage
fields = '__all__'
def clean(self):
featured = self.cleaned_data.get('featured')
programme = self.cleaned_data.get('programme')
if featured:
if ProgrammeImage\
.objects\
.filter(programme=programme, featured=True):
raise ValidationError(
"An image in this programme is already marked as a featured image"
)
return self.cleaned_data
|
t os
import subprocess
import shutil
import sys
import signal
from io import StringIO
from ast import literal_eval
from enum import Enum
import tempfile
from pathlib import Path, PurePath
from mesonbuild import build
from mesonbuild import environment
from mesonbuild import mesonlib
from mesonbuild import mlog
from mesonbuild import mtest
from mesonbuild.mesonlib import stringlistify, Popen_safe
from mesonbuild.coredata import backendlist
import argparse
import xml.etree.ElementTree as ET
import time
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
import re
from run_tests import get_fake_options, run_configure, get_meson_script
from run_tests import get_backend_commands, get_backend_args_for_dir, Backend
from run_tests import ensure_backend_detects_changes
class BuildStep(Enum):
configure = 1
build = 2
test = 3
install = 4
clean = 5
validate = 6
class TestResult:
def __init__(self, msg, step, stdo, stde, mlog, conftime=0, buildtime=0, testtime=0):
self.msg = msg
self.step = step
self.stdo = stdo
self.stde = stde
self.mlog = mlog
self.conftime = conftime
self.buildtime = buildtime
self.testtime = testtime
class AutoDeletedDir:
def __init__(self, d):
self.dir = d
def __enter__(self):
os.makedirs(self.dir, exist_ok=True)
return self.dir
def __exit__(self, _type, value, traceback):
# We don't use tempfile.TemporaryDirectory, but wrap the
# deletion in the AutoDeletedDir class because
# it fails on Windows due antivirus programs
# holding files open.
mesonlib.windows_proof_rmtree(self.dir)
failing_logs = []
print_debug = 'MESON_PRINT_TEST_OUTPUT' in os.environ
under_ci = not {'TRAVIS', 'APPVEYOR'}.isdisjoint(os.environ)
do_debug = under_ci or print_debug
no_meson_log_msg = 'No meson-log.txt found.'
system_compiler = None
class StopException(Exception):
def __init__(self):
super().__init__('Stopped by user')
stop = False
def stop_handler(signal, frame):
global stop
stop = True
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGTERM, stop_handler)
def setup_commands(optbackend):
global do_debug, backend, backend_flags
global compile_commands, clean_commands, test_commands, install_commands, uninstall_commands
backend = optbackend
msbuild_exe = shutil.which('msbuild')
# Auto-detect backend if unspecified
if backend is None:
if msbuild_exe is not None:
backend = 'vs' # Meson will auto-detect VS version to use
else:
backend = 'ninja'
# Set backend arguments for Meson
if backend.startswith('vs'):
backend_flags = ['--backend=' + backend]
backend = Backend.vs
elif backend == 'xcode':
backend_flags = ['--backend=xcode']
backend = Backend.xcode
elif backend == 'ninja':
backend_flags = ['--backend=ninja']
backend = Backend.ninja
else:
raise RuntimeError('Unknown backend: {!r}'.format(backend))
compile_commands, clean_commands, test_commands, install_commands, \
uninstall_commands = get_backend_commands(backend, do_debug)
def get_relative_files_list_from_dir(fromdir):
paths = []
for (root, _, files) in os.walk(fromdir):
reldir = os.path.relpath(root, start=fromdir)
for f in files:
path = os.path.join(reldir, f).replace('\\', '/')
if path.startswith('./'):
path = path[2:]
paths.append(path)
return paths
def platform_fix_name(fname, compiler, env):
if '?lib' in fname:
if mesonlib.for_cygwin(env.is_cross_build(), env):
fname = re.sub(r'lib/\?lib(.*)\.so$', r'bin/cyg\1.dll', fname)
fname = re.sub(r'\?lib(.*)\.dll$', r'cyg\1.dll', fname)
else:
fname = re.sub(r'\?lib', 'lib', fname)
if fname.endswith('?exe'):
fname = fname[:-4]
if mesonlib.for_windows(env.is_cross_build(), env) or mesonlib.for_cygwin(env.is_cross_build(), env):
return fname + '.exe'
if fname.startswith('?msvc:'):
fname = fname[6:]
if compiler != 'cl':
return None
if fname.startswith('?gcc:'):
fname = fname[5:]
if compiler == 'cl':
return None
return fname
def validate_install(srcdir, installdir, compiler, env):
# List of installed files
info_file = os.path.join(srcdir, 'installed_files.txt')
# If this exists, the test does not install any other files
noinst_file = 'usr/no-installed-files'
expected = {}
ret_msg = ''
# Generate list of expected files
if os.path.exists(os.path.join(installdir, noinst_file)):
expected[noinst_file] = False
elif os.path.exists(info_file):
with open(info_file) as f:
for line in f:
line = platform_fix_name(line.strip(), compiler, env)
if line:
expected[line] = False
# Check if expected files were found
for fname in expected:
file_path = os.path.join(installdir, fname)
if os.path.exists(file_path) or os.path.islink(file_path):
expected[fname] = True
for (fname, found) in expected.items():
if not found:
# Ignore missing PDB files if we aren't using cl
if fname.endswith('.pdb') and compiler != 'cl':
continue
ret_msg += 'Expected | file {0} missing.\n'.format(fname)
# Check if there are any unexpected files
found = get_r | elative_files_list_from_dir(installdir)
for fname in found:
# Windows-specific tests check for the existence of installed PDB
# files, but common tests do not, for obvious reasons. Ignore any
# extra PDB files found.
if fname not in expected and not fname.endswith('.pdb') and compiler == 'cl':
ret_msg += 'Extra file {0} found.\n'.format(fname)
return ret_msg
def log_text_file(logfile, testdir, stdo, stde):
global stop, executor, futures
logfile.write('%s\nstdout\n\n---\n' % testdir.as_posix())
logfile.write(stdo)
logfile.write('\n\n---\n\nstderr\n\n---\n')
logfile.write(stde)
logfile.write('\n\n---\n\n')
if print_debug:
try:
print(stdo)
except UnicodeError:
sanitized_out = stdo.encode('ascii', errors='replace').decode()
print(sanitized_out)
try:
print(stde, file=sys.stderr)
except UnicodeError:
sanitized_err = stde.encode('ascii', errors='replace').decode()
print(sanitized_err, file=sys.stderr)
if stop:
print("Aborting..")
for f in futures:
f[2].cancel()
executor.shutdown()
raise StopException()
def bold(text):
return mlog.bold(text).get_text(mlog.colorize_console)
def green(text):
return mlog.green(text).get_text(mlog.colorize_console)
def red(text):
return mlog.red(text).get_text(mlog.colorize_console)
def yellow(text):
return mlog.yellow(text).get_text(mlog.colorize_console)
def run_test_inprocess(testdir):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd()
os.chdir(testdir)
test_log_fname = Path('meson-logs', 'testlog.txt')
try:
returncode_test = mtest.run(['--no-rebuild'])
if test_log_fname.exists():
test_log = test_log_fname.open(errors='ignore').read()
else:
test_log = ''
returncode_benchmark = mtest.run(['--no-rebuild', '--benchmark', '--logbase', 'benchmarklog'])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
os.chdir(old_cwd)
return max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue(), test_log
def parse_test_args(testdir):
args = []
try:
with open(os.path.join(testdir, 'test_args.txt'), 'r') as f:
content = f.read()
try:
args = liter |
piry, message_id)
def get_sequence_number_message_id(self, sequence_number):
return self.redis.get(sequence_number_key(sequence_number))
def cache_message(self, message):
key = message_key(message['message_id'])
expire = self.config.submit_sm_expiry
return self.redis.setex(key, expire, message.to_json())
def get_cached_message(self, message_id):
d = self.redis.get(message_key(message_id))
d.addCallback(lambda json_data: (
TransportUserMessage.from_json(json_data)
if json_data else None))
return d
def delete_cached_message(self, message_id):
return self.redis.delete(message_key(message_id))
def set_remote_message_id(self, message_id, smpp_message_id):
if message_id is None:
# If we store None, we end up with the string "None" in Redis. This
# confuses later lookups (which treat any non-None value as a valid
# identifier) and results in broken delivery reports.
return succeed(None)
key = remote_message_key(smpp_message_id)
expire = self.config.third_party_id_expiry
d = self.redis.setex(key, expire, message_id)
d.addCallback(lambda _: message_id)
return d
def get_internal_message_id(self, smpp_message_id):
return self.redis.get(remote_message_key(smpp_message_id))
class SmppTransceiverTransport(Transport):
CONFIG_CLASS = SmppTransportConfig
factory_class = SmppTransceiverClientFactory
service_class = SmppService
sequence_class = RedisSequence
clock = reactor
start_message_consumer = False
@inlineCallbacks
def setup_transport(self):
config = self.get_static_config()
log.msg('Starting SMPP Transport for: %s' % (config.twisted_endpoint,))
default_prefix = '%s@%s' % (config.system_id,
config.transport_name)
redis_prefix = config.split_bind_prefix or default_prefix
self.redis = (yield TxRedisManager.from_config(
config.redis_manager)).sub_manager(redis_prefix)
self.dr_processor = config.delivery_report_processor(
self, config.delivery_report_processor_config)
self.deliver_sm_processor = config.deliver_short_message_processor(
self, config.deliver_short_message_processor_config)
self.submit_sm_processor = config.submit_short_message_processor(
self, config.submit_short_message_processor_config)
self.sequence_generator = self.sequence_class(self.redis)
self.message_stash = SmppMessageDataStash(self.redis, config)
self.throttled = None
self._throttled_message_ids = []
self._unthrottle_delayedCall = None
self.factory = self.factory_class(self)
self.service = self.start_service(self.factory)
self.tps_counter = 0
self.tps_limit = config.mt_tps
if config.mt_tps > 0:
self.mt_tps_lc = LoopingCall(self.reset_mt_tps)
self.mt_tps_lc.clock = self.clock
self.mt_tps_lc.start(1, now=True)
else:
self.mt_tps_lc = None
def start_service(self, factory):
config = self.get_static_config()
service = self.service_class(config.twisted_endpoint, factory)
service.startService()
return service
@inlineCallbacks
def teardown_transport(self):
if self.service:
yield self.service.stopService()
if self.mt_tps_lc and self.mt_tps_lc.running:
self.mt_tps_lc.stop()
yield self.redis._close()
def reset_mt_tps(self):
if self.throttled and self.need_mt_throttling():
if not self.service.is_bound():
# We don't have a bound SMPP connection, so try again later.
log.msg("Can't stop throttling while unbound, trying later.")
return
self.reset_mt_throttle_counter()
self.stop_throttling(quiet=True)
def reset_mt_throttle_counter(self):
self.tps_counter = 0
def incr_mt_throttle_counter(self):
self.tps_counter += 1
def need_mt_throttling(self):
return self.tps_counter >= self.tps_limit
def bind_requires_throttling(self):
config = self.get_static_config()
return config.mt_tps > 0
def check_mt_throttling(self):
self.incr_mt_throttle_counter()
if self.need_mt_throttling():
# We can't yield here, because we need this message to finish
# processing before it will return.
self.start_throttling(quiet=True)
def _check_address_valid(self, message, field):
try:
message[field].encode('ascii')
except UnicodeError:
return False
return True
def _reject_for_invalid_address(self, message, field):
return self.publish_nack(
message['message_id'], u'Invalid %s: %s' % (field, message[field]))
@inlineCallbacks
def handle_outbound_message(self, message):
if self.bind_requires_throttling():
yield self.check_mt_throttling()
protocol = yield self.service.get_protocol()
if not self._check_address_valid(message, 'to_addr'):
yield self._reject_for_invalid_address(message, 'to_addr')
return
if not self._check_address_valid(message, 'from_addr'):
yield self._reject_for_invalid_address(message, 'from_addr')
return
yield self.submit_sm_processor.handle_outbound_message(
message, protocol)
yield self.message_stash.cache_message(message)
@inlineCallbacks
def process_submit_sm_event(self, message_id, event_type, remote_id,
command_status):
if event_type == 'ack':
yield self.publish_ack(message_id, remote_id)
yield self.message_stash.delete_cached_message(message_id)
else:
if event_type != 'fail':
log.warning(
"Unexpected multipart event type %r, assuming 'fail'" % (
event_type,))
err_msg = yield self.message_stash.get_cached_message(message_id)
command_status = command_status or 'Unspecified'
if err_msg is None:
log.warning(
"Could not retrieve failed | message: %s" % (message_id,))
else:
yield self.message_stash.delete_cached_message(message_id)
yield self.publish_nack(message_id, command_status)
yield self.failure_publisher.publish_mess | age(
FailureMessage(message=err_msg.payload,
failure_code=None,
reason=command_status))
@inlineCallbacks
def handle_submit_sm_success(self, message_id, smpp_message_id,
command_status):
yield self.message_stash.update_multipart_info_success(
message_id, smpp_message_id)
event_info = yield self.message_stash.get_multipart_event_info(
message_id, 'ack', smpp_message_id)
event_required, event_type, remote_id = event_info
if event_required:
yield self.process_submit_sm_event(
message_id, event_type, remote_id, command_status)
if self.throttled:
yield self.check_stop_throttling(0)
@inlineCallbacks
def handle_submit_sm_failure(self, message_id, smpp_message_id,
command_status):
yield self.message_stash.update_multipart_info_failure(
message_id, smpp_message_id)
event_info = yield self.message_stash.get_multipart_event_info(
message_id, 'fail', smpp_message_id)
event_required, event_type, remote_id = event_info
if event_required:
yield self.process_submit_sm_event(
message_id, event_type, remote_id, command_status)
if self.throttled:
self.check_stop_throttling(0)
@inlineCallbacks
def h |
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import os
import logging
from stat import ST_MODE
import xattr
# project
from kiwi.command import Command
log = logging.getLogger('kiwi')
class DataSync:
"""
**Sync data from a source directory to a target directory
using the rsync protocol**
:param str source_dir: source directory path name
:param str target_dir: target directory path name
"""
def __init__(self, source_dir, target_dir):
self.source_dir = source_dir
self.target_dir = target_dir
def sync_data(self, options=None, exclude=None):
"""
Sync data from source to target using rsync
:param list options: rsync options
:param list exclude: file patterns to exclude
"""
target_entry_permissions = None
exclude_options = []
rsync_options = []
if options:
rsync_options = options
if not self.target_supports_extended_attribut | es():
warn_me = False
if '-X' in rsync_options:
rsync_options.remove('-X')
warn_me = True
if '-A' in rsync_options:
rsync_options.remove('-A')
warn_me = True
if warn_me:
log.warning(
'Extende | d attributes not supported for target: %s',
self.target_dir
)
if exclude:
for item in exclude:
exclude_options.append('--exclude')
exclude_options.append(
'/' + item
)
if os.path.exists(self.target_dir):
target_entry_permissions = os.stat(self.target_dir)[ST_MODE]
Command.run(
['rsync'] + rsync_options + exclude_options + [
self.source_dir, self.target_dir
]
)
if target_entry_permissions:
# rsync applies the permissions of the source directory
# also to the target directory which is unwanted because
# only permissions of the files and directories from the
# source directory and its contents should be transfered
# but not from the source directory itself. Therefore
# the permission bits of the target directory before the
# sync are applied back after sync to ensure they have
# not changed
os.chmod(self.target_dir, target_entry_permissions)
def target_supports_extended_attributes(self):
"""
Check if the target directory supports extended filesystem
attributes
:return: True or False
:rtype: bool
"""
try:
xattr.getxattr(self.target_dir, 'user.mime_type')
except Exception as e:
if format(e).startswith('[Errno 95]'):
# libc interface [Errno 95] Operation not supported:
return False
return True
|
# -*- coding: utf-8 -*-
# vim: ft=python:sw=4:ts=4:sts=4:et:
import requests
import decimal
from datetime import timedelta
from xml.etree import ElementTree
from django_bnr.models import Rate
def get_bnr_rate(date, currency='USD'):
try:
rate = Rate.objects.get(date=date, currency=currency)
return rate.rate
except Rate.DoesNotExist:
d = date.strftime('%Y-%m-%d')
r = requests.get('https://www.bnr.ro/nbrfxrates10days.xml')
r.raise_for_status()
rate = None
days = 0
xpath_fmt = ("./{xsd}Body/{xsd}Cube[@date='{date}']/"
"{xsd}Rate[@currency='{currency}']")
while rate is None:
rate = ElementTree.fromstring(r.text).find(xpath_fmt.format(
xsd='{http://www.bnr.ro/xsd}',
date=d,
currency=currency
))
if rate is None:
days += 1
if days = | = 7:
raise RuntimeError('Cannot get exchange rate for '
'%(currency)s from %(date)s' % {
'currency': currency,
'date': date
})
d = (date - timedelta(days=days)).strftime('%Y-%m-%d')
rate = decimal.Decimal(rate.text)
try:
Rate.objects.create(date=date, currency=currency, rat | e=rate)
except:
pass
return rate
|
//github.com/NVIDIA/DIGITS
"""
Classify an image using individual model files imported from DIGITS
This tool is for testing your deployed model with images from the test folder.
All the configuration params are removed or commented out as compared to the original example.py
- Copy your digits trained model to the model folder
- Copy your test images to the images folder
"""
import argparse
import os
import time
from google.protobuf import text_format
import numpy as np
import PIL.Image
import scipy.misc
os.environ['GLOG_minloglevel'] = '2' # Suppress most caffe output
import caffe
from caffe.proto import caffe_pb2
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_transformer(deploy_file, mean_file=None):
"""
Returns an instance of caffe.io.Transformer
Arguments:
deploy_file -- path to a .prototxt file
Keyword arguments:
mean_file -- path to a .binaryproto file (optional)
"""
network = caffe_pb2.NetParameter()
with open(deploy_file) as infile:
text_format.Merge(infile.read(), network)
if network.input_shape:
dims = network.input_shape[0].dim
else:
dims = network.input_dim[:4]
t = caffe.io.Transformer(
inputs = {'data': dims}
)
t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)
# color images
if dims[1] == 3:
# channel swap
t.set_channel_swap('data', (2,1,0))
if mean_file:
# set mean pixel
with open(mean_file,'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
if blob.HasField('shape'):
blob_dims = blob.shape
assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
elif blob.HasField('num') and blob.HasField('channels') and \
blob.HasField('height') and blob.HasField('width'):
blob_dims = (blob.num, blob.channels, blob.height, blob.width)
else:
raise ValueError('blob does not provide shape or 4d dimensions')
pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
t.set_mean('data', pixel)
return t
def load_image(path, height, width, mode='RGB'):
"""
Load an image from disk
Returns an np.ndarray (channels x width x height)
Arguments:
path -- path to an image on disk
width -- resize dimension
height -- resize dimension
Keyword arguments:
mode -- the PIL mode that the image should be converted to
(RGB for color or L for grayscale)
"""
image = PIL.Image.open(path)
image = image.convert(mode)
image = np.array(image)
# squash
image = scipy.misc.imresize(image, (height, width), 'bilinear')
return image
def forward_pass(images, net, transformer, batch_size=None):
"""
Returns scores for each image as an np.ndarray (nImages x nClasses)
Arguments:
images -- a list of np.ndarrays
net -- a caffe.Net
transformer -- a caffe.io.Transformer
Keyword arguments:
batch_size -- how many images can be processed at once
(a high value may result in out-of-memory errors)
"""
if batch_size is None:
batch_size = 1
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:,:,np.newaxis])
else:
caffe_images.append(image)
caffe_images = np.array(caffe_images)
dims = transformer.inputs['data'][1:]
scores = None
for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
new_shape = (len(chunk),) + tuple(dims)
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
image_data = transformer.preprocess('data', image)
net.blobs['data'].data[index] = image_data
output = net.forward()[net.outputs[-1]]
if scores is None:
scores = np.copy(output)
else:
scores = np.vstack((scores, output))
return scores
def read_labels(labels_file):
"""
Returns a list of strings
Arguments:
labels_file -- path to a .txt file
"""
if not labels_file:
print 'WARNING: No labels file provided. Results will be difficult to interpret.'
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels), 'No labels found'
return labels
def classify(caffemodel, deploy_file, image_files,
mean_file=None, labels_file=None, batch_size=None, use_gpu=True):
"""
Classify some images against a Caffe model and print the results
Arguments:
caffemodel -- path to a .caffemodel
deploy_file -- path to a .prototxt
image_files -- list of paths to images
Keyword arguments:
mean_file -- path to a .binaryproto
labels_file path to a .txt file
use_gpu -- if True, run inference on the GPU
"""
# Load the model and images
net = get_net(caffemodel, deploy_file, use_gpu)
transformer = get_transformer(deploy_file, mean_file)
_, channels, height, width = transformer.inputs['data']
if channels == 3:
mode = 'RGB'
elif channels == 1:
mode = 'L'
else:
raise ValueError('Invalid number for channels: %s' % channels)
images = [load_image(image_file, height, width, mode) for image_file in image_files]
labels = read_labels(labels_file)
# Classify the image
classify_start_time = time.time()
scores = forward_pass(images, net, transformer, batch_size=batch_size)
print 'Classification took %s seconds.' % (time.time() - classify_start_time,)
### Process the results
indices = (-scores).argsort()[:, :5] # take top 5 results
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# 'i' is a category in labels and also an index into scores
if labels is None:
label = 'Class #%s' % i
else:
label = labels[i]
result.append((label, round(100.0*scores[image_index, i],4)))
classifications.append(result)
return classifications
def print_classification_results(results, image_files):
for index, classification in enumerate(results):
print '{:-^80}'.format(' Prediction for %s ' % image_files[index])
for label, confidence in classification:
print '{:9.4%} - "{}"'.format(confidence/100.0, label)
| print
if __name__ == '__main__':
script_start_time = time.time()
parser = argparse.ArgumentParser(description='Classification example - DIGITS')
### Positional arguments
parser.add_argument | ('caffemodel', help='Path to a .caffemodel')
parser.add_argument('deploy_file', help='Path to the deploy file')
parser.add_argument('image_file',
nargs='+',
help='Path[s] to an image')
### Optional arguments
parser.add_argument('-m', '--mean',
help='Path to a mean file (*.npy)')
parser.add_argument('-l', '--labels',
help='Path to a labels file')
parser.add_argument('--batch-size',
type=int)
parser.add_argument('--nogpu',
action='store_true',
help="Don't use the GPU")
args = vars(parser.parse_args())
results = classify(args['caffemodel'], args['deploy_file'], args['image_file'],
args['mean'], args['labels'], not args['nogp |
index)
del Node.allnodes[index]
def _write_data(writer, data):
"Writes datachars to writer."
data = string.replace(data, "&", "&")
data = string.replace(data, "<", "<")
data = string.replace(data, "\"", """)
data = string.replace(data, ">", ">")
writer.write(data)
def _getElementsByTagNameHelper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_getElementsByTagNameHelper(node, name, rc)
return rc |
def _getElementsByTagNameNSHelper(parent, nsURI, localName, rc):
for node in parent.chil | dNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.tagName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_getElementsByTagNameNSHelper(node, name, rc)
class Attr(Node):
nodeType = Node.ATTRIBUTE_NODE
def __init__(self, qName, namespaceURI="", localName=None, prefix=None):
# skip setattr for performance
self.__dict__["localName"] = localName or qName
self.__dict__["nodeName"] = self.__dict__["name"] = qName
self.__dict__["namespaceURI"] = namespaceURI
self.__dict__["prefix"] = prefix
self.attributes = None
Node.__init__(self)
# nodeValue and value are set elsewhere
def __setattr__(self, name, value):
if name in ("value", "nodeValue"):
self.__dict__["value"] = self.__dict__["nodeValue"] = value
else:
self.__dict__[name] = value
class AttributeList:
"""the attribute list is a transient interface to the underlying
dictionaries. mutations here will change the underlying element's
dictionary"""
def __init__(self, attrs, attrsNS):
self._attrs = attrs
self._attrsNS = attrsNS
self.length = len(self._attrs.keys())
def item(self, index):
try:
return self[self.keys()[index]]
except IndexError:
return None
def items(self):
return map(lambda node: (node.tagName, node.value),
self._attrs.values())
def itemsNS(self):
return map(lambda node: ((node.URI, node.localName), node.value),
self._attrs.values())
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def __len__(self):
return self.length
def __cmp__(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return cmp(id(self), id(other))
#FIXME: is it appropriate to return .value?
def __getitem__(self, attname_or_tuple):
if type(attname_or_tuple) is types.TupleType:
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if type(value) is types.StringType:
node = Attr(attname)
node.value=value
else:
assert isinstance(value, Attr) or type(value) is types.StringType
node = value
old = self._attrs.get(attname, None)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
node.unlink()
del self._attrs[node.name]
del self._attrsNS[(node.namespaceURI, node.localName)]
class Element(Node):
nodeType = Node.ELEMENT_NODE
def __init__(self, tagName, namespaceURI="", prefix="",
localName=None):
Node.__init__(self)
self.tagName = self.nodeName = tagName
self.localName = localName or tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.nodeValue = None
self._attrs={} # attributes are double-indexed:
self._attrsNS={}# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation of attribute objects
# this is too tricky for now because of headaches
# with namespaces.
def getAttribute(self, attname):
return self._attrs[attname].value
def getAttributeNS(self, namespaceURI, localName):
return self._attrsNS[(namespaceURI, localName)].value
def setAttribute(self, attname, value):
attr = Attr(attname)
# for performance
attr.__dict__["value"] = attr.__dict__["nodeValue"] = value
self.setAttributeNode(attr)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
# for performance
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.__dict__["value"] = attr.__dict__["nodeValue"] = value
self.setAttributeNode(attr)
# FIXME: return original node if something changed.
def getAttributeNode(self, attrname):
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
return self._attrsNS[(namespaceURI, localName)]
def setAttributeNode(self, attr):
old = self._attrs.get(attr.name, None)
if old:
old.unlink()
self._attrs[attr.name] = attr
self._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# FIXME: return old value if something changed
def removeAttribute(self, name):
attr = self._attrs[name]
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
attr = self._attrsNS[(namespaceURI, localName)]
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
node.unlink()
del self._attrs[node.name]
del self._attrsNS[(node.namespaceURI, node.localName)]
def getElementsByTagName(self, name):
return _getElementsByTagNameHelper(self, name, [])
def getElementsByTagNameNS(self, namespaceURI, localName):
_getElementsByTagNameNSHelper(self, namespaceURI, localName, [])
def __repr__(self):
return "<DOM Element: %s at %s>" % (self.tagName, id(self))
# undocumented
def writexml(self, writer):
writer.write("<" + self.tagName)
a_names = self._get_attributes().keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, self._get_attributes()[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
for node in self.childNodes:
node.writexml(writer)
writer.write("</%s>" % self.tagName)
else:
writer.write("/>")
def _get_attributes(self):
return AttributeList(self._attrs, self._attrsNS)
class Comment(Node):
nodeType = Node.COMMENT_NODE
def __init__(self, data):
Node.__init__(self)
self.data = self.nodeValue = data
self.nodeName = "#comment"
self.attributes = None
def writexml(self, writer):
writer.write("<!--%s-->" % self.data)
class ProcessingInstruction(Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self, target, data):
Node.__init__(self)
self.target = self.nodeName = target
self.data = self.nodeValue = data
self.attributes = None
def writexml(self, writer):
writer.write("<?%s %s?>" % (self.target, self.data))
class Text(Node):
nodeType = Node.TEXT_NODE
nodeName = "#text"
def __init__(self, data):
Node.__init__(self)
self.data = self.nodeValue = data
self.attributes = None
def __repr__(self):
if len(self.data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
|
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from ElevatorConstants import *
from ElevatorUtils import *
import DistributedElevator
import DistributedElevatorExt
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNot | ifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from toontown.hood import ZoneUtil
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
class DistributedBossElevator(DistributedElevatorExt.DistributedElevatorExt):
def __init__(self, cr):
DistributedElevatorExt.DistributedElevatorExt. | __init__(self, cr)
self.elevatorPoints = BigElevatorPoints
self.openSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_sliding.ogg')
self.finalOpenSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_final.ogg')
self.closeSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_sliding.ogg')
self.finalCloseSfx = base.loadSfx('phase_9/audio/sfx/CHQ_FACT_door_open_final.ogg')
self.type = ELEVATOR_VP
self.countdownTime = ElevatorData[self.type]['countdown']
def disable(self):
DistributedElevator.DistributedElevator.disable(self)
def generate(self):
DistributedElevatorExt.DistributedElevatorExt.generate(self)
def delete(self):
self.elevatorModel.removeNode()
del self.elevatorModel
DistributedElevatorExt.DistributedElevatorExt.delete(self)
def setupElevator(self):
self.elevatorModel = loader.loadModel('phase_9/models/cogHQ/cogHQ_elevator')
icon = self.elevatorModel.find('**/big_frame/')
icon.hide()
self.leftDoor = self.elevatorModel.find('**/left-door')
self.rightDoor = self.elevatorModel.find('**/right-door')
geom = base.cr.playGame.hood.loader.geom
locator = geom.find('**/elevator_locator')
self.elevatorModel.reparentTo(locator)
self.elevatorModel.setH(180)
DistributedElevator.DistributedElevator.setupElevator(self)
def getElevatorModel(self):
return self.elevatorModel
def gotBldg(self, buildingList):
return DistributedElevator.DistributedElevator.gotBldg(self, buildingList)
def getZoneId(self):
return 0
def __doorsClosed(self, zoneId):
pass
def setBossOfficeZone(self, zoneId):
if self.localToonOnBoard:
hoodId = self.cr.playGame.hood.hoodId
doneStatus = {'loader': 'cogHQLoader',
'where': 'cogHQBossBattle',
'how': 'movie',
'zoneId': zoneId,
'hoodId': hoodId}
self.cr.playGame.getPlace().elevator.signalDone(doneStatus)
def setBossOfficeZoneForce(self, zoneId):
place = self.cr.playGame.getPlace()
if place:
place.fsm.request('elevator', [self, 1])
hoodId = self.cr.playGame.hood.hoodId
doneStatus = {'loader': 'cogHQLoader',
'where': 'cogHQBossBattle',
'how': 'movie',
'zoneId': zoneId,
'hoodId': hoodId}
if hasattr(place, 'elevator') and place.elevator:
place.elevator.signalDone(doneStatus)
else:
self.notify.warning("setMintInteriorZoneForce: Couldn't find playGame.getPlace().elevator, zoneId: %s" % zoneId)
else:
self.notify.warning("setBossOfficeZoneForce: Couldn't find playGame.getPlace(), zoneId: %s" % zoneId)
def getDestName(self):
return TTLocalizer.ElevatorSellBotBoss
|
# -*- coding: | utf-8 -*-
"""
website.api
~~~~~~~~~~~
website api blueprint.
"""
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this fil | e except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apac | he.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_liter | als
try:
from builtins import *
from future import standard_library
standard_library. | install_aliases()
except:
# we might be on py3.2, which the future library doesn't support
pass
import os
import sys
if sys.version_info < (2, 7, 0): # pragma: no cover
import unittest2 as unittest
else:
import unittest
from layeredconfig import LayeredConfig, Defaults, Environment, INIFile
@unittest.skipIf (sys.version_info[0] == 3 and sys.version_info[1] < 3,
"Python 3.2 and lower doesn't support the future module")
class TestFuture(unittest.TestCase):
def test_newint(self):
os.environ['FERENDA_DOWNLOADMAX'] = '3'
config = LayeredConfig(Defaults({'downloadmax': int}),
Environment(prefix="FERENDA_"))
self.assertEqual(3, config.downloadmax)
self.assertIsInstance(config.downloadmax, int)
|
eto': imp_neto,
'impto_liq': impto_liq, 'impto_liq_rni': impto_liq_rni,
'imp_op_ex': imp_op_ex, 'imp_tot_conc': imp_tot_conc,
'imp_perc': imp_perc, 'imp_perc_mun': imp_perc_mun,
'imp_iibb': imp_iibb, 'imp_internos': imp_internos,
'imp_moneda_id': imp_moneda_id, 'imp_moneda_ctz': imp_moneda_ctz,
'cbtes_asoc': [],
'iva': [],
'detalles': [],
}
self.factura = fact
return True
def AgregarItem(self, ncm, sec, ds, qty, umed, precio, bonif, iva_id, imp_total, **kwargs):
"Agrego un item a una factura (interna)"
##ds = unicode(ds, "latin1") # convierto a latin1
# Nota: no se calcula neto, iva, etc (deben venir calculados!)
self.factura['detalles'].append({
'ncm': ncm, 'sec': sec,
'ds': ds,
'qty': qty,
'umed': umed,
'precio': precio,
'bonif': bonif,
'iva_id': iva_id,
'imp_total': imp_total,
})
return True
@inicializar_y_capturar_excepciones
def Authorize(self, id):
"Autoriza la factura cargada en memoria"
f = self.factura
ret = self.cl | ient.BF | EAuthorize(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
Cmp={
'Id': id,
'Zona': f['zona'],
'Fecha_cbte': f['fecha_cbte'],
'Tipo_cbte': f['tipo_cbte'],
'Punto_vta': f['punto_vta'],
'Cbte_nro': f['cbte_nro'],
'Tipo_doc': f['tipo_doc'], 'Nro_doc': f['nro_doc'],
'Imp_moneda_Id': f['imp_moneda_id'],
'Imp_moneda_ctz': f['imp_moneda_ctz'],
'Imp_total': f['imp_total'],
'Imp_tot_conc': f['imp_tot_conc'], 'Imp_op_ex': f['imp_op_ex'],
'Imp_neto': f['imp_neto'], 'Impto_liq': f['impto_liq'],
'Impto_liq_rni': f['impto_liq_rni'],
'Imp_perc': f['imp_perc'], 'Imp_perc_mun': f['imp_perc_mun'],
'Imp_iibb': f['imp_iibb'],
'Imp_internos': f['imp_internos'],
'Items': [
{'Item': {
'Pro_codigo_ncm': d['ncm'],
'Pro_codigo_sec': d['sec'],
'Pro_ds': d['ds'],
'Pro_qty': d['qty'],
'Pro_umed': d['umed'],
'Pro_precio_uni': d['precio'],
'Imp_bonif': d['bonif'],
'Imp_total': d['imp_total'],
'Iva_id': d['iva_id'],
}} for d in f['detalles']],
})
result = ret['BFEAuthorizeResult']
self.__analizar_errores(result)
if 'BFEResultAuth' in result:
auth = result['BFEResultAuth']
# Resultado: A: Aceptado, R: Rechazado
self.Resultado = auth.get('Resultado', "")
# Obs:
self.Obs = auth.get('Obs', "")
self.Reproceso = auth.get('Reproceso', "")
self.CAE = auth.get('Cae', "")
self.CbteNro = auth.get('Fch_cbte', "")
self.ImpTotal = str(auth.get('Imp_total', ''))
self.ImptoLiq = str(auth.get('Impto_liq', ''))
self.ImpNeto = str(auth.get('Imp_neto', ''))
vto = str(auth.get('Fch_venc_Cae', ''))
self.FchVencCAE = vto
self.Vencimiento = "%s/%s/%s" % (vto[6:8], vto[4:6], vto[0:4])
return self.CAE
@inicializar_y_capturar_excepciones
def Dummy(self):
"Obtener el estado de los servidores de la AFIP"
result = self.client.BFEDummy()['BFEDummyResult']
self.__analizar_errores(result)
self.AppServerStatus = str(result['AppServer'])
self.DbServerStatus = str(result['DbServer'])
self.AuthServerStatus = str(result['AuthServer'])
return True
@inicializar_y_capturar_excepciones
def GetCMP(self, tipo_cbte, punto_vta, cbte_nro):
"Recuperar los datos completos de un comprobante ya autorizado"
ret = self.client.BFEGetCMP(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit},
Cmp={"Tipo_cbte": tipo_cbte,
"Punto_vta": punto_vta, "Cbte_nro": cbte_nro})
result = ret['BFEGetCMPResult']
self.__analizar_errores(result)
if 'BFEResultGet' in result:
resultget = result['BFEResultGet']
# Obs, cae y fecha cae
if 'Cae' in resultget:
self.Obs = resultget['Obs'] and resultget['Obs'].strip(" ") or ''
self.CAE = resultget['Cae']
vto = str(resultget['Fch_venc_Cae'])
self.Vencimiento = "%s/%s/%s" % (vto[6:8], vto[4:6], vto[0:4])
self.FechaCbte = resultget['Fecha_cbte_orig'] #.strftime("%Y/%m/%d")
self.FechaCAE = resultget['Fecha_cbte_cae'] #.strftime("%Y/%m/%d")
self.PuntoVenta = resultget['Punto_vta'] # 4000
self.Resultado = resultget['Resultado']
self.CbteNro =resultget['Cbte_nro']
self.ImpTotal = resultget['Imp_total']
self.ImptoLiq = resultget['Impto_liq']
self.ImpNeto = resultget['Imp_neto']
return self.CAE
else:
return 0
@inicializar_y_capturar_excepciones
def GetLastCMP(self, tipo_cbte, punto_vta):
"Recuperar último número de comprobante emitido"
ret = self.client.BFEGetLast_CMP(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit,
"Tipo_cbte": tipo_cbte,
"Pto_venta": punto_vta,
})
result = ret['BFEGetLast_CMPResult']
self.__analizar_errores(result)
if 'BFEResult_LastCMP' in result:
resultget = result['BFEResult_LastCMP']
self.CbteNro =resultget.get('Cbte_nro')
self.FechaCbte = resultget.get('Cbte_fecha') #.strftime("%Y/%m/%d")
return self.CbteNro
@inicializar_y_capturar_excepciones
def GetLastID(self):
"Recuperar último número de transacción (ID)"
ret = self.client.BFEGetLast_ID(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit, })
result = ret['BFEGetLast_IDResult']
self.__analizar_errores(result)
if 'BFEResultGet' in result:
resultget = result['BFEResultGet']
return resultget.get('Id')
@inicializar_y_capturar_excepciones
def GetParamUMed(self):
ret = self.client.BFEGetPARAM_UMed(
Auth={'Token': self.Token, 'Sign': self.Sign, 'Cuit': self.Cuit, })
result = ret['BFEGetPARAM_UMedResult']
self.__analizar_errores(result)
umeds = [] # unidades de medida
for u in result['BFEResultGet']:
u = u['ClsBFEResponse_UMed']
try:
umed = {'id': u.get('Umed_Id'), 'ds': u.get('Umed_Ds'),
'vig_desde': u.get('Umed_vig_desde'),
'vig_hasta': u.get('Umed_vig_hasta')}
except Exception, e:
print e
if u is None:
# <ClsFEXResponse_UMed xsi:nil="true"/> WTF!
umed = {'id':'', 'ds':'','vig_desde':'','vig_hasta':''}
#import pdb; pdb.set_trace()
#print u
umeds.append(umed)
return ['%(id)s: %(ds)s (%(vig_desde)s - %(vig_hasta)s)' % p for p in umeds]
@inicializar_y_capturar_excepciones
def GetParamMon(self):
ret = self.client.BFEGetPARAM_MON(
Auth={'Token': sel |
update[x], Bullet):
self.to_update[x].update()
elif isinstance(self.to_update[x], EnemyDrop):
self.to_update[x].update()
elif isinstance(self.to_update[x], HealthPack):
self.to_update[x].update()
elif isinstance(self.to_update[x], NoteController):
self.to_update[x].update()
elif isinstance(self.to_update[x], Stopper):
self.to_update[x].update(self.bullet_list, self.enemy_list, self.player, self.note_controller)
elif isinstance(self.to_update[x], HealthBar):
self.to_update[x].update()
def display_all(self):
# fill screen with black and display all game information
main_s.fill((20, 20, 20))
for x in range(0, len(self.to_display)):
if isinstance(self.to_display[x], Player):
if self.to_display[x].alive:
self.to_display[x].display()
else:
self.to_display[x].display()
main_s.blit(self.info_bar, (0, 0))
main_s.blit(font.render("ESC TO PAUSE", True, (255, 255, 255)), (width - 115, 5))
def text_all(self):
# display all text needed at the top of the screen
total_length = 0
for x in range(0, len(self.to_text)):
main_s.blit(font.render(str(self.to_text[x]), True, (255, 255, 255)), (5 + (15 * total_length), 5))
total_length += len(self.to_text[x])
def hit_particles(self, rect_hit, colour):
# create particles with random speeds, directions and sizes
numbers_z = range(-10, 10)
numbers_nz = range(-10, -1) + range(1, 10)
for x in range(0, settings.loaded_enemy_particles):
x_temp = random.choice(numbers_z)
y_temp = random.choice(numbers_z)
dy = y_temp
dx = x_temp
# make sure that dx and dy are not both 0 so that there
# are no particles static on the screen
if x_temp == 0 and y_temp != 0:
dy = y_temp
dx = x_temp
if y_temp == 0 and x_temp != 0:
dy = y_temp
dx = x_temp
if x_temp == y_temp == 0:
dy = random.choice(numbers_nz)
dx = random.choice(numbers_nz)
particle = Particle(random.randint(1, 3), (dx, dy), rect_hit, colour)
self.particle_list.append(particle)
def remove_particles(self):
# remove particles that are no longer colliding with the screen
# removed from the end first so that the list does not effect
# later elements to remove
for x in range(0, len(self.particle_list)):
try:
if not pygame.sprite.collide_rect(screen_rect, self.particle_list[len(self.particle_list) - x - 1]):
del self.particle_list[len(self.particle_list) - x - 1]
except:
# break in case [len(p_list) - x - 1] is out of range
break
def remove_stars(self):
# remove stars that are no longer colliding with the screen
# removed from the end first so that the list does not effect
# later elements to remove
for x in range(0, len(self.star_list)):
try:
if not pygame.sprite.collide_rect(screen_rect, self.star_list[len(self.star_list) - x - 1]):
del self.star_list[len(self.star_list) - x - 1]
except:
# break in case [len(p_list) - x - 1] is out of range
break
def remove_packages(self):
print(len(self.package_list))
for i in range(0, len(self.package_list)):
try:
if not pygame.sprite.collide_rect(screen_rect, self.package_list[len(self.package_list) - i - 1]):
del self.package_list[len(self.package_list) - i - 1]
except IndexError:
# break in case [len(p_list) - x - 1] is out of range
break
def check_enemy_alive(self):
# add enemies to a removal list if they are dead
for x in range(0, len(self.enemy_list)):
if self.enemy_list[x].dead:
self.kill_list.append(self.enemy_list[x])
def kill_enemies(self):
# remove enemies from enemy list that are on the kill list
# create a package and give the player the coins dropped
# create particles originating from the now dead enemy
# create a notification for the user saying they have found money
for x in range(0, len(self.kill_list)):
for y in range(0, len(self.enemy_list)):
try:
if self.kill_list[len(self.kill_list) - x - 1].id == self.enemy_list[len(self.enemy_list) - y - 1].id:
del self.kill_list[len(self.kill_list) - x - 1]
self.note_controller.add_note("+ " + str(self.enemy_list[len(self.enemy_list) - y - 1].money * self.player.money_collection) + " coins", main_theme)
self.player.get_coins(self.enemy_list[len(self.enemy_list) - y - 1].money)
self.hit_particles(self.enemy_list[len(self.enemy_list) - y - 1].rect, white)
self.random_enemy_drop(self.enemy_list[len(self.enemy_list) - y - 1].dx,
self.enemy_list[len(self.enemy_list) - y - 1].rect.center)
del self.enemy_list[len(self.enemy_list) - y - 1]
break
except:
break
def random_event_enemy(self):
# create an enemy if the random variable is 1
if random.randint(1, settings.loaded_enemy_chance) == 1:
enemy = Enemy(self.enemy_id_tracker)
self.enemy_list.append(enemy)
self.enemy_id_tracker += 1
def random_event_star(self):
if random.randint(1, star_chance) == 1:
# create a star starting at the right and set to move to the left
s = Star(width + 10, # x pos (start a little off screen)
random.randint(0, height), # y pos
random.randint(1, 2), # dx
0) # dy
self.star_list.append(s)
def random_enemy_drop(self, speed, pos):
# random chance that package will be created
if random.randint(1, package_chance) == 1:
e = EnemyDrop(speed, pos)
self.package_list.append(e)
def random_health_pack(self):
pos = (width + 10, random.randint(20, height - 20))
# random chance that package will be created
if random.randint(1, package_chance * 50) == 1:
h = | HealthPack(-random.randint(1 | , 2), pos)
self.package_list.append(h)
def input(self, event_list):
# player input
key = pygame.key.get_pressed()
if key[pygame.K_UP]:
self.player.move(-1)
if key[pygame.K_DOWN]:
self.player.move(1)
if key[pygame.K_SPACE]:
self.bullet_list = self.player.shoot(self.bullet_list)
for event in event_list:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.next_state = pause_state
def run(self, event_list):
# run all game functions
self.input(event_list)
self.random_event_enemy()
self.random_event_star()
self.random_health_pack()
self.check_enemy_alive()
self.kill_enemies()
self.remove_particles()
self.remove_stars()
#self.remove_packages()
# reload all lists
self.to_display = self.package_list + self.star_list + self.bullet_list + self.enemy_list + \
self.particle_list + [self.player, self.note_controller, self.health_bar]
self.to_update = [self.player, self.note_controller, self.left_stop, self.right_stop] + \
self.package_list + self.star_list + self.bullet_list + self.enemy_list + s |
"""A torture test to ferret out problems with multi-threading."""
import sys
import threading
from ndb import tasklets
from ndb import eventloop
def main():
##sys.stdout.write('_State.__bases__ = %r\n' % (eventloop._State.__bases__,))
num = 10
try:
num = int(sys.argv[1])
except Exception:
pass
threads = []
for i in range(num):
t = threading.Thread(target=one_thread, args=(i, num,))
t.start()
threads.append(t)
for t in threads:
t.join()
@tasklets.toplevel
def one_thread(i, num):
##sys.stdout.write('eventloop = 0x%x\n' % id(eventloop. | get_event_loop()))
x = yield fibonacci(num)
sys.stdout.write('%d: %d --> %d\n' % (i, num, x))
@tasklets.tasklet
def fibonacci(n):
"""A recursive Fibonacci to exercise task switching."""
if n <= 1:
raise tasklets.Return(n)
a = yield fibonacci | (n - 1)
b = yield fibonacci(n - 2)
raise tasklets.Return(a + b)
if __name__ == '__main__':
main()
|
from struct import pack
from sqlite3 import Binary
def pts(c):
return ["dd",[c.X,c.Y]]
def pt4mp(c):
return ["Bidd",[1,1,c.X,c.Y]]
def mp(coordinates):
partCount=coordinates.partCount
i=0
out = ["I",[0]]
while i<partCount:
pt = coordinates.getPart(i)
[ptrn,c]=pt4mp(pt)
out[0]+=ptrn
out[1][0]+=1
out[1].extend(c)
i+=1
return out
def lineSt(coordinates):
partCount=coordinates.count
i=0
out = ["I",[0]]
while i<partCount:
pt = coordinates[i]
[ptrn,c]=pts(pt)
out[0]+=ptrn
out[1][0]+=1
out[1].extend(c)
i+=1
return out
def multiLine(coordinates):
partCount=coordinates.partCount
i=0
out = ["I",[0]]
while i<partCount:
part = coordinates.getPart(i)
[ptrn,c]=lineSt(part)
out[0]+="BI"
out[0]+=ptrn
out[1][0]+=1
out[1].extend([1,2])
out[1].extend(c)
i+=1
return out
def linearRing(coordinates):
partCount=coordinates.count
i=0
values =[0]
outnum = "I"
out = ["I",[0]]
while i<partCount:
pt = coordinates[i]
if pt:
[ptrn,c]=pts(pt)
outnum+=ptrn
values[0]+=1
values.extend(c)
else:
if values[0]<4:
return False
out[0]+=outnum
out[1][0]+=1
out[1].extend(values)
values =[0]
outnum = "I"
i+=1
if values[0]<4:
return False
out[0]+=outnum
out[1][0]+=1
out[1].extend(values)
return out
def multiRing(coordinates):
partCount=coordinates.partCount
i=0
out = ["I",[0]]
while i<partCount:
part = coordinates.getPart(i)
[ptrn,c]=linearRing(part)
out[0]+="BI"
out[0]+=ptrn
out[1][0]+=1
out[1].extend([1,3])
out[1].extend(c)
i+=1
return out
return out
def makePoint(c):
values = ["<BI",1,1]
[ptrn,coords] = pts(c.getPart(0))
values[0]+=ptrn
values.extend(coords)
return Binary(pack(*values))
def makeMultiPoint(c):
values = ["<BI",1,4]
[ptrn,coords]=mp(c)
values[0]+=ptrn
values.extend(coords)
return Binary(pack(*values))
def makeMultiLineString(c):
if c.partCount==1:
values = ["<BI",1,2]
[ptrn,coords]=lineSt(c.getPart(0))
elif c.partCount>1:
values = ["<BI",1,5]
[ptrn,coords]=multiLine(c)
else:
return False
values[0]+=ptrn
values.extend(coords)
return Binary(pack(*valu | es))
def makeMultiPolygon(c):
if c.partCount==1:
values = ["<BI",1,3]
[ptrn,coords]=linearRing(c.getPart(0))
elif c.partCount>1:
values = ["<BI",1,6]
[ptrn,coords]=multiRing(c)
else:
return False
values[0]+=ptrn
values.extend(coords)
return Binary(pack(*values))
def getWKBFunc(type,field):
if type == "point":
return lambda row:makePoint(row.getValue(field))
elif type == "mu | ltipoint":
return lambda row: makeMultiPoint(row.getValue(field))
elif type == "polyline":
return lambda row: makeMultiLineString(row.getValue(field))
elif type == "polygon":
return lambda row: makeMultiPolygon(row.getValue(field))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
| dependencies = [
('index', '0004_repository_public'),
]
operations = [
| migrations.AddField(
model_name='project',
name='related_project',
field=models.ForeignKey(blank=True, to='index.Project', null=True),
preserve_default=True,
),
]
|
if hasattr(sys, "frozen"):
plugin_path = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "PyQt5", "plugins")
Logger.log("i", "Adding QT5 plugin path: %s" % (plugin_path))
QCoreApplication.addLibraryPath(plugin_path)
else:
import site
for dir in site.getsitepackages():
QCoreApplication.addLibraryPath(os.path.join(dir, "PyQt5", "plugins"))
elif sys.platform == "darwin":
plugin_path = os.path.join(Application.getInstallPrefix(), "Resources", "plugins")
if plugin_path:
Logger.log("i", "Adding QT5 plugin path: %s" % (plugin_path))
QCoreApplication.addLibraryPath(plugin_path)
os.environ["QSG_RENDER_LOOP"] = "basic"
super().__init__(sys.argv, **kwargs)
self._plugins_loaded = False #Used to determine when it's safe to use the plug-ins.
self._main_qml = "main.qml"
self._engine = None
self._renderer = None
self._main_window = None
self._shutting_down = False
self._qml_import_paths = []
self._qml_import_paths.append(os.path.join(os.path.dirname(sys.executable), "qml"))
self._qml_import_paths.append(os.path.join(Application.getInstallPrefix(), "Resources", "qml"))
self.setAttribute(Qt.AA_UseDesktopOpenGL)
try:
self._splash = self._createSplashScreen()
except FileNotFoundError:
self._splash = None
else:
self._splash.show()
self.processEvents()
signal.signal(signal.SIGINT, signal.SIG_DFL)
# This is done here as a lot of plugins require a correct gl context. If you want to change the framework,
# these checks need to be done in your <framework>Application.py class __init__().
i18n_catalog = i18nCatalog("uranium")
self.showSplashMessage(i18n_catalog.i18nc("@info:progress", "Loading plugins..."))
self._loadPlugins()
self.parseCommandLine()
Logger.log("i", "Command line arguments: %s", self._parsed_command_line)
self._plugin_registry.checkRequiredPlugins(self.getRequiredPlugins())
self.showSplashMessage(i18n_catalog.i18nc("@info:progress", "Updating configuration..."))
upgraded = UM.VersionUpgradeManager.VersionUpgradeManager.getInstance().upgrade()
if upgraded:
preferences = UM.Preferences.getInstance() #Preferences might have changed. Load them again.
#Note that the language can't be updated, so that will always revert to English.
try:
preferences.readFromFile(Resources.getPath(Resources.Preferences, self._application_name + ".cfg"))
except FileNotFoundError:
pass
self.showSplashMessage(i18n_catalog.i18nc("@info:progress", "Loading preferences..."))
try:
file = Resources.getPath(Resources.Preferences, self.getApplicationName() + ".cfg")
Preferences.getInstance().readFromFile(file)
except FileNotFoundError:
pass
def run(self):
pass
def hideMessage(self, message):
with self._message_lock:
if message in self._visible_messages:
self._visible_messages.remove(message)
self.visibleMessageRemoved.emit(message)
def showMessage(self, message):
with self._message_lock:
if message not in self._visible_messages:
self._visible_messages.append(message)
message.setTimer(QTimer())
self.visibleMessageAdded.emit(message)
def setMainQml(self, path):
self._main_qml = path
def initializeEngine(self):
# TODO: Document native/qml import trickery
Bindings.register()
self._engine = QQmlApplicationEngine()
for path in self._qml_import_paths:
self._engine.addImportPath(path)
if not hasattr(sys, "frozen"):
self._engine.addImportPath(os.path.join(os.path.dirname(__file__), "qml"))
self._engine.rootContext().setContextProperty("QT_VERSION_STR", QT_VERSION_STR)
self._engine.rootContext().setContextProperty("screenScaleFactor", self._screenScaleFactor())
self.registerObjects(self._engine)
self._engine.load(self._main_qml)
self.engineCreatedSignal.emit()
engineCreatedSignal = Signal()
def isShuttingDown(self):
return self._shutting_down
def registerObjects(self, engine):
pass
def getRenderer(self):
if not self._renderer:
self._renderer = QtRenderer()
return self._renderer
def addCommandLineOptions(self, parser):
parser.add_argument("--disable-textures",
dest="disable-textures",
action="store_true", default=False,
help="Disable Qt texture loading as a workaround for certain crashes.")
# Overridden from QApplication::setApplicationName to call our internal setApplicationName
def setApplicationName(self, name):
Application.setApplicationName(self, name)
mainWindowChanged = Signal()
def getMainWindow(self):
return self._main_window
def setMainWindow(self, window):
if window != self._main_window:
self._main_window = window
self.mainWindowChanged.emit()
# Handle a function that should be called later.
def functionEvent(self, event):
e = _QtFunctionEvent(event)
QCoreApplication.postEvent(self, e)
# Handle Qt events
def event(self, event):
if event.type() == _QtFunctionEvent.QtFunctionEvent:
event._function_event.call()
return True
return super().event(event)
def windowClosed(self):
Logger.log("d", "Shutting down %s", self.getApplicationName())
self._shutting_down = True
try:
Preferences.getInstance().writeToFile(Resources.getStoragePath(Resources.Preferences, self.getApplicationName() + ".cfg"))
except Exception as e:
Logger.log("e", "Exception while saving preferences: %s", repr(e))
try:
self.applicationShuttingDown.emit()
except Exception as e:
Logger.log("e", "Exception while emitting shutdown signal: %s", repr(e))
try:
self.getBackend().close()
except Exception as e:
Logger.log("e", "Exception while closing backend: %s", repr(e))
self.quit()
## Load a Qt translation catalog.
#
# This method will locate, load and install a Qt mes | sage catalog that can be used
# by Qt's translation system, like qsTr() in QML files.
#
# \param file The file name to load, without extension. It will be searched for in
# the i18nLocation Resources directory. If it can not be found a warning
# will be logged but no error will be thrown.
# \param language The language to load translations for. This can be any valid language code
# or 'default' in which case the la | nguage is looked up based on system locale.
# If the specified language can not be found, this method will fall back to
# loading the english translations file.
#
# \note When `language` is `default`, the language to load can be changed with the
# environment variable "LANGUAGE".
def loadQtTranslation(self, file, language = "default"):
#TODO Add support for specifying a language from preferences
path = None
if language == "default":
path = self._getDefaultLanguage(file)
else:
path = Resources.getPath(Resources.i18n, language, "LC_MESSAGES", file + ".qm")
# If all else fails, fall back to english.
if not path:
Logger.log("w", "Could not find any translations matching {0} for file {1}, falling back to english".format(language, file))
tr |
# -*- coding:utf-8 -*-
"""
一些能够帮你提升效率的辅助函数
"""
from __future__ import unicode_literals, division
from copy import deepcopy
from datetime import timedelta
from threading import Thread
import requests
import requests.exceptions
from six.moves import urllib
from .log import logger
from .value import ENV
__all__ = ['get_point', 'cal_gpa', 'cal_term_code', 'term_str2code', 'sort_hosts', 'filter_curriculum']
def get_point(grade_str):
"""
根据成绩判断绩点
:param grade_str: 一个字符串,因为可能是百分制成绩或等级制成绩
:return: 成绩绩点
:rtype: float
"""
try:
grade = float(grade_str)
assert 0 <= grade <= 100
if 95 <= grade <= 100:
return 4.3
elif 90 <= grade < 95:
return 4.0
elif 85 <= grade < 90:
return 3.7
elif 82 <= grade < 85:
return 3.3
elif 78 <= grade < 82:
return 3.0
elif 75 <= grade < 78:
return 2.7
elif 72 <= grade < 75:
return 2.3
elif 68 <= grade < 72:
return 2.0
elif 66 <= grade < 68:
return 1.7
elif 64 <= grade < 66:
return 1.3
elif 60 <= grade < 64:
return 1.0
else:
return 0.0
except ValueError:
if grade_str == '优':
return 3.9
elif grade_str == '良':
return 3.0
elif grade_str == '中':
return 2.0
elif grade_str == '及格':
return 1.2
elif grade_str in ('不及格', '免修', '未考'):
return 0.0
else:
raise ValueError('{:s} 不是有效的成绩'.format(grade_str))
def cal_gpa(grades):
"""
根据成绩数组计算课程平均绩点和 gpa, 算法不一定与学校一致, 结果仅供参考
:param grades: :meth:`models.StudentSession.get_my_achievements` 返回的成绩数组
:return: 包含了课程平均绩点和 gpa 的元组
"""
# 课程总数
courses_sum = len(grades)
# 课程绩点和
points_sum = 0
# 学分和
credit_sum = 0
# 课程学分 x 课程绩点之和
gpa_points_sum = 0
for grade in grades:
point = get_point(grade.get('补考成绩') or grade['成绩'])
credit = float(grade['学分'])
points_sum += point
credit_sum += credit
gpa_points_sum += credit * point
ave_point = points_sum / courses_sum
gpa = gpa_points_sum / credit_sum
return round(ave_point, 5), round(gpa, 5)
def cal_term_code(year, is_first_term=True):
"""
计算对应的学期代码
:param year: 学年开始年份,例如 "2012-2013学年第二学期" 就是 2012
:param is_first_term: 是否为第一学期
:type is_first_term: bool
:return: 形如 "022" 的学期代码
"""
if year <= 2001:
msg = '出现了超出范围年份: {}'.format(year)
raise ValueError(msg)
term_code = (year - 2001) * 2
if is_first_term:
term_code -= 1
return '%03d' % term_code
def term_str2co | de(term_str):
"""
将学期字符串转换为对应的学期代码串
:param term_str: 形如 "2012-2013学年第二学期" 的学期字符串
:return: 形如 "022" 的学期代码
"""
result = ENV['TERM_PATTERN'].match(term_str).groups()
year = int(result[0])
return cal_term_code(year, result[1] == '一')
def sort_hosts(hosts, | method='GET', path='/', timeout=(5, 10), **kwargs):
"""
测试各个地址的速度并返回排名, 当出现错误时消耗时间为 INFINITY = 10000000
:param method: 请求方法
:param path: 默认的访问路径
:param hosts: 进行的主机地址列表, 如 `['http://222.195.8.201/']`
:param timeout: 超时时间, 可以是一个浮点数或 形如 ``(连接超时, 读取超时)`` 的元祖
:param kwargs: 其他传递到 ``requests.request`` 的参数
:return: 形如 ``[(访问耗时, 地址)]`` 的排名数据
"""
ranks = []
class HostCheckerThread(Thread):
def __init__(self, host):
super(HostCheckerThread, self).__init__()
self.host = host
def run(self):
INFINITY = 10000000
try:
url = urllib.parse.urljoin(self.host, path)
res = requests.request(method, url, timeout=timeout, **kwargs)
res.raise_for_status()
cost = res.elapsed.total_seconds() * 1000
except Exception as e:
logger.warning('访问出错: %s', e)
cost = INFINITY
# http://stackoverflow.com/questions/6319207/are-lists-thread-safe
ranks.append((cost, self.host))
threads = [HostCheckerThread(u) for u in hosts]
for t in threads:
t.start()
for t in threads:
t.join()
ranks.sort()
return ranks
def filter_curriculum(curriculum, week, weekday=None):
"""
筛选出指定星期[和指定星期几]的课程
:param curriculum: 课程表数据
:param week: 需要筛选的周数, 是一个代表周数的正整数
:param weekday: 星期几, 是一个代表星期的整数, 1-7 对应周一到周日
:return: 如果 weekday 参数没给出, 返回的格式与原课表一致, 但只包括了在指定周数的课程, 否则返回指定周数和星期几的当天课程
"""
if weekday:
c = [deepcopy(curriculum[weekday - 1])]
else:
c = deepcopy(curriculum)
for d in c:
l = len(d)
for t_idx in range(l):
t = d[t_idx]
if t is None:
continue
# 一般同一时间课程不会重复,重复时给出警告
t = list(filter(lambda k: week in k['上课周数'], t)) or None
if t is not None and len(t) > 1:
logger.warning('第 %d 周周 %d 第 %d 节课有冲突: %s', week, weekday or c.index(d) + 1, t_idx + 1, t)
d[t_idx] = t
return c[0] if weekday else c
def curriculum2schedule(curriculum, first_day, compress=False, time_table=None):
"""
将课程表转换为上课时间表, 如果 compress=False 结果是未排序的, 否则为压缩并排序后的上课时间表
:param curriculum: 课表
:param first_day: 第一周周一, 如 datetime.datetime(2016, 8, 29)
:param compress: 压缩连续的课时为一个
:param time_table: 每天上课的时间表, 形如 ``((start timedelta, end timedelta), ...)`` 的 11 × 2 的矩阵
:return: [(datetime.datetime, str) ...]
"""
schedule = []
time_table = time_table or (
(timedelta(hours=8), timedelta(hours=8, minutes=50)),
(timedelta(hours=9), timedelta(hours=9, minutes=50)),
(timedelta(hours=10, minutes=10), timedelta(hours=11)),
(timedelta(hours=11, minutes=10), timedelta(hours=12)),
(timedelta(hours=14), timedelta(hours=14, minutes=50)),
(timedelta(hours=15), timedelta(hours=15, minutes=50)),
(timedelta(hours=16), timedelta(hours=16, minutes=50)),
(timedelta(hours=17), timedelta(hours=17, minutes=50)),
(timedelta(hours=19), timedelta(hours=19, minutes=50)),
(timedelta(hours=19, minutes=50), timedelta(hours=20, minutes=40)),
(timedelta(hours=20, minutes=40), timedelta(hours=21, minutes=30))
)
for i, d in enumerate(curriculum):
for j, cs in enumerate(d):
for c in cs or []:
course = '{name}[{place}]'.format(name=c['课程名称'], place=c['课程地点'])
for week in c['上课周数']:
day = first_day + timedelta(weeks=week - 1, days=i)
start, end = time_table[j]
item = (week, day + start, day + end, course)
schedule.append(item)
schedule.sort()
if compress:
new_schedule = [schedule[0]]
for i in range(1, len(schedule)):
sch = schedule[i]
# 同一天的连续课程
if new_schedule[-1][1].date() == sch[1].date() and new_schedule[-1][3] == sch[3]:
# 更新结束时间
old_item = new_schedule.pop()
# week, start, end, course
new_item = (old_item[0], old_item[1], sch[2], old_item[3])
else:
new_item = sch
new_schedule.append(new_item)
return new_schedule
return schedule
|
Appearance"]["Add layers list"]:
controls.append('new ol.control.LayerSwitcher({tipLabel: "Layers"})')
mapbounds = bounds(iface, settings["Scale/Zoom"]["Extent"] == "Canvas extent", layers)
mapextent = "extent: %s," % mapbounds if settings["Scale/Zoom"]["Restrict to extent"] else ""
maxZoom = int(settings["Scale/Zoom"]["Max zoom level"])
minZoom = int(settings["Scale/Zoom"]["Min zoom level"])
onHover = unicode(settings["Appearance"]["Show popups on hover"]).lower()
highlight = unicode(settings["Appearance"]["Highlight features"]).lower()
view = "%s maxZoom: %d, minZoom: %d" % (mapextent, maxZoom, minZoom)
values = {"@CSSADDRESS@": cssAddress,
"@JSADDRESS@": jsAddress,
"@STYLEVARS@": styleVars,
"@GEOJSONVARS@": geojsonVars,
"@BOUNDS@": mapbounds,
"@CONTROLS@": ",".join(controls),
"@POPUPLAYERS@": popupLayers,
"@VIEW@": view,
"@ONHOVER@": onHover,
"@DOHIGHLIGHT@": highlight}
with open(os.path.join(folder, "index.html"), "w") as f:
f.write(replaceInTemplate(settings["Appearance"]["Template"] + ".html", values))
finally:
QApplication.restoreOverrideCursor()
return os.path.join(folder, "index.html")
def writeLayersAndGroups(layers, groups, visible, folder, settings):
baseLayer = baseLayerGroup % baseLayers[settings["Appearance"]["Base layer"]]
scaleVisibility = settings["Scale/Zoom"]["Use layer scale dependent visibility"]
layerVars = "\n".join([layerToJavascript(layer, scaleVisibility) for layer in layers])
groupVars = ""
groupedLayers = {}
for group, groupLayers in groups.iteritems():
groupVars += ('''var %s = new ol.layer.Group({
layers: [%s],
title: "%s"});\n''' %
("group_" + safeName(group), ",".join(["lyr_" + safeName(layer.name()) for layer in groupLayers]),
group))
for layer in groupLayers:
groupedLayers[layer.id()] = safeName(group)
mapLayers = []
if settings["Appearance"]["Base layer"] != "None":
mapLayers.append("baseLayer")
usedGroups = []
for layer in layers:
mapLayers.append("lyr_" + safeName(layer.name()))
visibility = "\n".join(["%s.setVisible(%s);" % (layer, unicode(v).lower()) for layer, v in zip(mapLayers[1:], visible)])
# ADD Group
group_list = ["baseLayer"]
no_group_list = []
for layer in layers:
if layer.id() in groupedLayers:
groupName = groupedLayers[layer.id()]
if groupName not in usedGroups:
group_list.append("group_" + safeName(groupName))
usedGroups.append(groupName)
else:
no_group_list.append("lyr_" + safeName(layer.name()))
layersList = "var layersList = [%s];" % ",".join([layer for layer in (group_list + no_group_list)])
path = os.path.join(folder, "layers", "layers.js")
with codecs.open(path, "w", "utf-8") as f:
f.write(baseLayer + "\n")
f.write(layerVars + "\n")
f.write(groupVars + "\n")
f.write(visibility + "\n")
f.write(layersList + "\n")
# f.write(write_group_list)
def replaceInTemplate(template, values):
path = os.path.join(os.path.dirname(__file__), "templates", template)
with open(path) as f:
lines = f.readlines()
s = "".join(lines)
for name, value in values.iteritems():
s = s.replace(name, value)
return s
def bounds(iface, useCanvas, layers):
if useCanvas:
canvas = iface.mapCanvas()
canvasCrs = canvas.mapRenderer().destinationCrs()
transform = QgsCoordinateTransform(canvasCrs, QgsCoordinateReferenceSystem("EPSG:3857"))
try:
extent = transform.transform(canvas.extent())
except QgsCsException:
extent = QgsRectangle(-20026376.39, -20048966.10, 20026376.39, 20048966.10)
else:
extent = None
for layer in layers:
transform = QgsCoordinateTransform(layer.crs(), QgsCoordinateReferenceSystem("EPSG:3857"))
try:
layerExtent = transform.transform(layer.extent())
except QgsCsException:
layerExtent = QgsRectangle(-20026376.39, -20048966.10, 20026376.39, 20048966.10)
if extent is None:
extent = layerExtent
else:
extent.combineExtentWith(layerExtent)
return "[%f, %f, %f, %f]" % (extent.xMinimum(), extent.yMinimum(),
extent.xMaximum(), extent.yMaximum())
def layerToJavascript(layer, scaleVisibility):
# TODO: change scale to resolution
if scaleVisibility and layer.hasScaleBasedVisibility():
minResolution = "\nminResolution:%s,\n" % unicode(1 / ((1 / layer.minimumScale()) * 39.37 * 90.7))
maxResolution = "maxResolution:%s,\n" % unicode(1 / ((1 / layer.maximumScale()) * 39.37 * 90.7))
else:
minResolution = ""
maxResolution = ""
layerName = safeName(layer.name())
if layer.type() == layer.VectorLayer:
return ('''var format_%(n)s = new ol.format.GeoJSON();
var features_%(n)s = format_%(n)s.readFeatures(geojson_%(n)s);
var jsonSource_%(n)s = new ol.source.Vector();
jsonSource_%(n)s.addFeatures(features_%(n)s);
var lyr_%(n)s = new ol.layer.Vector({
source: jsonSource_%(n)s,%(min)s %(max)s
style: style_%(n)s,
title: "%(name)s"
});''' %
{"name": layer.name(), "n": layerName, "min": minResolution,
"max": maxResolution})
elif layer.type() == layer.RasterLayer:
if layer.providerType().lower() == "wms":
source = layer.source()
layers = re.search(r"layers=(.*?)(?:&|$)", source).groups(0)[0]
url = re.search(r"url=(.*?)(?:&|$)", source).groups(0)[0]
return '''var lyr_%(n)s = new ol.layer.Tile({
source: new ol.source.TileWMS(({
url: "%(url)s",
params: {"LAYERS": "%(layers)s", "TILED": "true"},
})),
title: "%(name)s"
});''' % {"layers": layers, "url": url, "n": layerName, "name": layer.name()}
elif layer.providerType().lower() == "gdal":
provider = layer.dataProvider()
transform = QgsCoordinateTransform(provider.crs(), QgsCoordinateReferenceSystem("EPSG:3857"))
extent = transform.transform(provider.extent())
sExtent = "[%f, %f, %f, %f]" % (extent.xMinimum(), extent.yMinimum(),
extent.xMaximum(), extent.yMaximum())
return '''var lyr_%(n)s = new ol.layer.Image({
opacity: 1,
title: "%(name)s",
source: new ol.source.ImageStatic({
url: "./layers/%(n)s.jpg",
projection: 'EPSG:3857',
alwaysInRange: true,
imageSize: [%(col)d, %(row)d],
imageExtent: %(extent)s
} | )
});''' % {"n": layerName, "extent": sExtent, "col": provider.xSize(),
| "name": layer.name(), "row": provider.ySize()}
def exportStyles(layers, folder):
stylesFolder = os.path.join(folder, "styles")
QDir().mkpath(stylesFolder)
for layer in layers:
if layer.type() != layer.VectorLayer:
continue
labelsEnabled = unicode(layer.customProperty("labeling/enabled")).lower() == "true"
if (labelsEnabled):
labelField = layer.customProperty("labeling/fieldName")
if labelField != "":
labelText = 'feature.get("%s")' % labelField.replace('"', '\\"')
else:
labelText = '""'
|
import unittest
import modular_core.libfundamental as lfu
from modular_core.libsimcomponents import ensemble_manager as mng
import os, sys, pdb
#log = open(os.path.join(os.getcwd(), 'test_ensembl | e.log'), 'w')
#sys.stdout = log
import lattice as mod
class dummyTestCase(unittest.TestCase):
"""Tests for `dummy module`."""
simple_mcfg = os.path.join(os.getcwd(),
'lattice_dep_mcfgs',
'lattice_example.mcfg')
mn = mng()
en = mn.add_ensemble(module = mod.main.module_name)
def pause(self, *args, **kwargs):
sys.stdout = sys.__stdout__
pdb.set_trace()
sys | .stdout = log
def test_can_make_ensemble(self):
"""module successfully imported?"""
self.assertFalse(mod.main == None)
mod_name = mod.main.module_name
ensem = self.mn.add_ensemble(module = mod_name)
self.assertFalse(ensem is None)
def test_can_run_mcfg(self):
ran = self.en.run_mcfg(self.simple_mcfg)
out = self.en.produce_output()
self.assertTrue(ran)
self.assertTrue(out)
if __name__ == '__main__':
unittest.main()
|
'0': 'Off',
'1': 'On',
'2': 'Data Extracted'
},
'state': {
'0': 'Stopped - Not at Zero Position',
'1': 'Tracking (PEC disabled)',
'2': 'Slewing',
'3': 'Guiding',
'4': 'Meridian Flipping',
'5': 'Tracking (PEC enabled)',
'6': 'Parked',
'7': 'Stopped - Zero Position'
},
'tracking': {
'0': 'Sidereal',
'1': 'Lunar',
'2': 'Solar',
'3': 'King',
'4': 'Custom'
},
'movement_speed': {
'1': '1x sidereal',
'2': '2x sidereal',
'3': '8x sidereal',
'4': '16x sidereal',
'5': '64x sidereal',
'6': '128x sidereal',
'7': '256x sidereal',
'8': '512x sidereal',
'9': 'Max sidereal',
},
'time_source': {
'1': 'RS-232',
'2': 'Hand Controller',
'3': 'GPS'
},
'hemisphere': {
'0': 'Southern',
'1': 'Northern'
}
}
self.logger.info('Mount created')
################################################################################################
# Properties
################################################################################################
@property
def is_home(self):
""" bool: Mount home status. """
self._is_home = 'Stopped - Zero Position' in self.status.get('state', '')
return self._is_home
@property
def is_tracking(self):
""" bool: Mount tracking status. """
self._is_tracking = 'Tracking' in self.status.get('state', '')
return self._is_tracking
@property
def is_slewing(self):
""" bool: Mount slewing status. """
self._is_slewing = 'Slewing' in self.status.get('state', '')
return self._is_slewing
################################################################################################
# Public Methods
################################################################################################
def initialize(self, set_rates=True, unpark=False, *arg, **kwargs):
""" Initialize the connection with the mount and setup for location.
iOptron mounts are initialized by sending the following two commands
to the mount:
* Version
* MountInfo
If the mount is successfully initialized, the `_setup_location_for_mount` method
is also called.
Returns:
bool: Returns the value from `self.is_initialized`.
"""
if not self.is_connected:
self.logger.info(f'Connecting to mount {__name__}')
self.connect()
if self.is_connected and not self.is_initialized:
self.logger.info(f'Initializing {__name__} mount')
# We trick the mount into thinking it's initialized while we
# initialize otherwise the `query` method will test
# to see if initialized and be put into loop.
self._is_initialized = True
actual_version = self.query('version')
actual_mount_info = self.query('mount_info')
expected_version = self.commands.get('version').get('response')
expected_mount_info = self.commands.get('mount_info').get('response')
self._is_initialized = False
# Test our init procedure for iOptron
if actual_version != expected_version or actual_mount_info != expected_mount_info:
self.logger.debug(f'{actual_version} != {expected_version}')
self.logger.debug(f'{actual_mount_info} != {expected_mount_info}')
raise error.MountNotFound('Problem initializing mount')
else:
self._is_initialized = True
self._setup_location_for_mount()
if set_rates:
self._set_initial_rates()
self.logger.info(f'Mount initialized: {self.is_initialized}')
return self.is_initialized
def park(self,
ra_direction='west',
ra_seconds=11.,
dec_direction='south',
dec_seconds=15.,
*args, **kwargs):
"""Slews to the park position and parks the mount.
This will first move the mount to the home position, then move the RA axis
in the direction specified at 0.9x sidereal rate (the fastest) for the number
of seconds requested. Then move the Dec axis in a similar manner. This should
be adjusted for the particular parking position desired.
Note:
When mount is parked no movement commands will be accepted.
Args:
ra_direction (str, optional): The direction to move the RA axis from
the home position. Defaults to 'west' for northern hemisphere.
ra_seconds (float, optional): The number of seconds at fastest move
speed to move the RA axis from the home position.
dec_direction (str, optional): The direction to move the Dec axis
from the home position. Defaults to 'south' for northern hemisphere.
dec_seconds (float, optional): The number of seconds at the fastest
move speed to move the Dec axis from the home position.
Returns:
bool: indicating success
"""
if self.is_parked:
self.logger.info('Mount is parked')
return self._is_parked
if self.slew_to_home(blocking=True):
# The mount is currently not parking in correct position so we manually move it there.
self.query('set_button_moving_rate', 9)
self.move_direction(direction=ra_direction, seconds=ra_seconds)
while self.is_slewing:
self.logger.debug('Slewing RA axis to park position...')
time.sleep(3)
self.move_direction(direction=dec_direction, seconds=dec_seconds)
while self.is_slewing:
self.logger.debug('Slewing Dec axis to park position...')
time.sleep(3)
self._is_parked = True
self.logger.debug(f'Mount parked: {self.is_parked}')
return self._is_parked
################################################################################################
# Private Methods
################################################################################################
def _set_initial_rates(self):
# Make sure we start at sidereal
self.set_tracking_rate()
self.logger.debug('Setting manual moving rate to max')
self.query('set_button_moving_rate', 9)
self.logger.debug(f'Mount guide rate: {self.query("get_guide_rate")}')
self.query('set_guide_rate', '9090')
guide_rate = self.query('get_guide_rate')
self.ra_guide_rate = int(guide_rate[0:2]) / 100
self.dec_guide_rate = int(guide_rate[2:]) / 100
self.logger.debug(f'Mount guide rate: {self.ra_guide_rate} {self.dec_guide_rate}')
def _setup_location_for_mount(self):
"""
Sets the mount up to the current location. Mount must be initialized first.
This uses mount.location (an astropy.coords.EarthLocation) to set
most of the params and the rest is read from a config file. Users
should not call this directly.
Includes:
* Latitude set_long
* Longitude set_lat
* Daylight Savings disable_daylight_savings
* Universal Time Offset set_gmt_offset
* Current Date set_local_date
* Current Time set_local_time
"""
assert self.is_initialized, self.logger.warnin | g('Mount has not been initialized')
| assert self.location is not None, self.logger.warning(
'Please set a location before attempting setup')
|
selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import UnexpectedAlertPresentException, ElementNotVisibleException
from TmallSingleCrawler.utils.TMallUtils import get_captcha_image, get_favorite_count, get_comment_count, \
get_comment_images
from TmallSingleCrawler.custom_items.TMallGoodsItem import TMallGoodsItem
import math
import time
import re
import os
class TmallSpider(scrapy.Spider):
handle_httpstatus_list = [302]
brower = None
cookies = ""
name = 'tmall'
start_urls = [
'https://list.tmall.com/search_product.htm?q=%C5%AE%D7%B0&type=p&vmarket=&spm=875.7931836%2FB.a2227oh.d100&from=mallfp..pc_1_searchbutton',
'https://list.tmall.com/search_product.htm?q=%C5%AE%D0%AC&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton',
'https://list.tmall.com/search_product.htm?q=%C4%D0%D7%B0&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton',
'https://list.tmall.com/search_product.htm?q=%C4%D0%D0%AC&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton']
def __init__(self):
super(TmallSpider, self).__init__()
dispatcher.connect(self.spider_closed, signals.spider_closed)
# 先进行登陆,登陆成功后才能进行爬取
def start_requests(self):
if self.login():
for url in self.start_urls:
yield scrapy.Request(url, cookies=self.cookies, callback=self.parse)
else:
return []
def parse(self, response):
"""
在爬取过程中,可能会因为各种问题,例如爬取速度导致
页面会重定向到 需要输入验证码的页面
这时将请求返回给 chrome 从 chrome 上截取屏幕图片并让 用户输入验证码
"""
if response.status == 302:
# if response.headers.has_key("location"):
# location = str(response.headers['Location'], encoding="utf-8")
# redirect_url = ""
#
# if location.find("sec.taobao") > 0:
# self.parse_code(url=location, origin_url=response.url)
# redirect_url = response.url
# else:
# redirect_url = location
redirect_url = response.request.url
yield scrapy.Request(url=redirect_url, cookies=self.cookies, dont_filter=True, callback=self.parse)
else:
for item_block in response.css("#J_ItemList .product"):
try:
item_loader = ItemLoader(item=TMallGoodsItem(), selector=item_block)
id = item_block.css("::attr(data-id)").extract_first()
# 如果 id 不存在,说明这个 商品块 是天猫的推荐商品,直接跳过
if id is None:
continue
item_loader.add_value("id", id)
item_loader.add_css("main_image", ".productImg-wrap img::attr(src)")
item_loader.add_css("thumb_image", ".proThumb-img img::attr(data-ks-lazyload)")
item_loader.add_css("price", ".productPrice em::attr(title)")
item_loader.add_css("title", ".productTitle a::attr(title)")
item_loader.add_css("shop", ".productShop a::text")
item_loader.add_css("moon_sell", ".productStatus span:nth-child(1) em::text")
item_loader.add_css("comment_number", ".productStatus span:nth-child(2) a::text")
goods_item = item_loader.load_item()
detail_url = "https:" + item_block.css(".productTitle a::attr(href)").extract_first()
yield scrapy.Request(url=detail_url, callback=self.parse_detail, | meta={"item": goods_item})
except:
continue
# 进入下一页
next_page = "https://list.tmall.com/search_product.htm" + response.css(
".ui-page-next::attr(href)").extract_first("")
yield scrapy.Request(url=next_page, callback=self.parse)
def parse_detail(self, response):
# 检查重定向,跟 | 上面同理
if response.status == 302:
# if response.headers.has_key("location"):
# location = str(response.headers['Location'], encoding="utf-8")
# redirect_url = ""
#
# if location.find("sec.taobao") > 0:
# self.parse_code(url=location, origin_url=response.url)
# redirect_url = response.url
# else:
# redirect_url = location
redirect_url = response.request.url
yield scrapy.Request(url=redirect_url, cookies=self.cookies, dont_filter=True,
callback=self.parse_detail, meta=response.meta)
else:
# 从 url 提取出id,进行配对
goods_item = response.meta['item']
item_loader = ItemLoader(item=goods_item, response=response)
item_loader.add_css("params", "#J_AttrUL li::text")
# 此时需要自己获取 itemId spuId 和 sellerId 进行拼接请求
# 拼接的请求有 收藏数, 评论数, 带图片的评论内容
# sellerId 和 spuId
sellerId = re.findall(r"sellerId=(\d+)", response.text)[0]
spuId = re.findall(r"spuId=(\d+)", response.text)[0]
favorite_count_url = "https:" + re.findall(r"apiBeans\":\"(.*?)\"", response.text)[0] + "&callback=jsonp259"
comment_count_url = "https://dsr-rate.tmall.com/list_dsr_info.htm?itemId={itemId}&spuId={spuId}&sellerId={sellerId}&callback=jsonp206" \
"".format(itemId=goods_item['id'][0], spuId=spuId, sellerId=sellerId)
comment_imags_url = "https://rate.tmall.com/list_detail_rate.htm?itemId={itemId}&spuId={spuId}&sellerId={sellerId}&order=3¤tPage=1&append=0&content=1&tagId=&posi=&picture=1&ua=238UW5TcyMNYQwiAiwZTXFIdUh1SHJOe0BuOG4%3D%7CUm5Ockp3TntCekN2QnlBfSs%3D%7CU2xMHDJ7G2AHYg8hAS8XIw0tA18%2BWDRTLVd5L3k%3D%7CVGhXd1llXWBZbFVtVGFVblZqXWBCfUZzSHxAfUd5TXVBekZ4Rn9RBw%3D%3D%7CVWldfS0SMg4yDy8bOxVgS2wLcCBCIAQqfCo%3D%7CVmhIGCUFOBgkHSAaOgE6DjUVKRApFDQAPQAgHCUcIQE0DzJkMg%3D%3D%7CV25OHjAePgc%2BAyMfIh4jAzwDPwtdCw%3D%3D%7CWGFBET8RMQg2CysXKhYrCzIMMApcCg%3D%3D%7CWWFBET8RMWFYZlt7R3pGey0NMBA%2BEDAJNww2YDY%3D%7CWmNeY0N%2BXmFBfUR4WGZeZER9XWFcfEhoVG44&isg=AuDgX6vXrZIrfRECc7u5mTzxse5yQcXX4sPaQlrw4Pu6VYB_APmUQ7Ylm8qv&itemPropertyId=&itemPropertyIndex=&userPropertyId=&userPropertyIndex=&rateQuery=&location=&needFold=0&callback=jsonp981" \
"".format(itemId=goods_item['id'][0], spuId=spuId, sellerId=sellerId)
favorite_count = get_favorite_count(favorite_count_url)
comment_count = get_comment_count(comment_count_url)
comment_imags = get_comment_images(comment_imags_url)
item_loader.add_value("favorite_count", favorite_count)
item_loader.add_value("comment_count", comment_count)
item_loader.add_value("comment_images", comment_imags)
goods_item = item_loader.load_item()
yield goods_item
def spider_closed(self, spider):
self.log("closing spider")
self.brower.quit()
# 通过 chrome 来进行登陆
def login(self):
self.brower = webdriver.Chrome(
executable_path="C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe")
# self.brower.implicitly_wait(30)
self.brower.get(
"https://login.taobao.com/member/login.jhtml?tpl_redirect_url=https%3A%2F%2Fwww.tmall.com%2F&style=miniall&full_redirect=true")
self.brower.find_element_by_id("TPL_username_1").send_keys("天猫用户名")
self.brower.find_element_by_id("TPL_password_1").send_keys("密码")
# 检查是否需要滑动验证
self.drag_bar()
# 检查是否可以进行登陆
self.check_success()
# 检查是否需要输入手机验证码
self.type_safe_code()
# 提取chrome的cookies
self.cookies = self.extract_cookies()
if self.cookies == "":
return False
return True
def drag_bar(self):
time.sleep(2)
locator = (By.ID, "nc_1_wrapper")
# 看有没有滑动条
# 有的话就滑一下
# 没有的话就提交登陆
try: |
import numpy as np
from ._layout import Layout
from ._multivector import MultiVector
class ConformalLayout(Layout):
r"""
A layout for a conformal algebra, which adds extra constants and helpers.
Typically these should be constructed via :func:`clifford.conformalize`.
.. versionadded:: 1.2.0
Attributes
----------
ep : MultiVector
The first added basis element, :math:`e_{+}`, usually with :math:`e_{+}^2 = +1`
en : MultiVector
The first added basis element, :math:`e_{-}`, usually with :math:`e_{-}^2 = -1`
eo : MultiVect | or
The null basis vector at the origin, :math:`e_o = 0.5(e_{-} - e_{+})`
einf : MultiVector
The null vector at infinity, :math:`e_\infty = e_{-} + e_{+}`
| E0 : MultiVector
The minkowski subspace bivector, :math:`e_\infty \wedge e_o`
I_base : MultiVector
The pseudoscalar of the base ga, in cga layout
"""
def __init__(self, *args, layout=None, **kwargs):
super().__init__(*args, **kwargs)
self._base_layout = layout
ep, en = self.basis_vectors_lst[-2:]
# setup null basis, and minkowski subspace bivector
eo = .5 ^ (en - ep)
einf = en + ep
E0 = einf ^ eo
I_base = self.pseudoScalar*E0
# helper properties
self.ep = ep
self.en = en
self.eo = eo
self.einf = einf
self.E0 = E0
self.I_base = I_base
@classmethod
def _from_base_layout(cls, layout, added_sig=[1, -1], **kwargs) -> 'ConformalLayout':
""" helper to implement :func:`clifford.conformalize` """
sig_c = list(layout.sig) + added_sig
return cls(
sig_c,
ids=layout._basis_vector_ids.augmented_with(len(added_sig)),
layout=layout, **kwargs)
# some convenience functions
def up(self, x: MultiVector) -> MultiVector:
""" up-project a vector from GA to CGA """
try:
if x.layout == self._base_layout:
# vector is in original space, map it into conformal space
old_val = x.value
new_val = np.zeros(self.gaDims)
new_val[:len(old_val)] = old_val
x = self.MultiVector(value=new_val)
except(AttributeError):
# if x is a scalar it doesnt have layout but following
# will still work
pass
# then up-project into a null vector
return x + (.5 ^ ((x**2)*self.einf)) + self.eo
def homo(self, x: MultiVector) -> MultiVector:
""" homogenize a CGA vector """
return x/(-x | self.einf)[()]
def down(self, x: MultiVector) -> MultiVector:
""" down-project a vector from CGA to GA """
x_down = (self.homo(x) ^ self.E0)*self.E0
# new_val = x_down.value[:self.base_layout.gaDims]
# create vector in self.base_layout (not cga)
# x_down = self.base_layout.MultiVector(value=new_val)
return x_down
|
"""deCONZ climate platform tests."""
from unittest.mock import Mock, patch
import asynctest
from homeassistant import config_entries
from homeassistant.components import deconz
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
import homeassistant.components.climate as climate
from tests.common import mock_coro
SENSOR = {
"1": {
"id": "Climate 1 id",
"name": "Climate 1 name",
"type": "ZHAThermostat",
"state": {"on": True, "temperature": 2260},
"config": {"battery": 100, "heatsetpoint": 2200, "mode": "auto",
"offset": 10, "reachable": True, "valve": 30},
"uniqueid": "00:00:00:00:00:00:00:00-00"
},
"2": {
"id": "Sensor 2 id",
"name": "Sensor 2 name",
"type": "ZHAPresence",
"state": {"presence": False},
"config": {}
}
}
ENTRY_CONFIG = {
deconz.const.CONF_ALLOW_CLIP_SENSOR: True,
deconz.const.CONF_ALLOW_DECONZ_GROUPS: True,
deconz.config_flow.CONF_API_KEY: "ABCDEF",
deconz.config_flow.CONF_BRIDGEID: "0123456789",
deconz.config_flow.CONF_HOST: "1.2.3.4",
deconz.config_flow.CONF_PORT: 80
}
async def setup_gateway(hass, data, allow_clip_sensor=True):
"""Load the deCONZ sensor platform."""
from pydeconz import DeconzSession
response = Mock(
status=200, json=asynctest.CoroutineMock(),
text=asynctest.CoroutineMock())
response.content_type = 'application/json'
session = Mock(
put=asynctest.CoroutineMock(
return_value=response
)
)
ENTRY_CONFIG[deconz.const.CONF_ALLOW_CLIP_SENSOR] = allow_clip_sensor
config_entry = config_entries.ConfigEntry(
1, deconz.DOMAIN, 'Mock Title', ENTRY_CONFIG, 'test',
config_entries.C | ONN_CLASS_LOCAL_PUSH)
gateway = deconz.DeconzGateway(hass, config_entry)
gateway.api = DeconzSession(hass.loop, session, **config_entry.data)
gateway.api.config = Mock()
hass.data[deconz.DOMAIN] = gateway
with patch('pydeconz.DeconzSession.async_get_state',
return_value=mock_coro(data)):
await gateway.api.async_load_parameters()
await hass.config_entries.async_forward_entry_setup(
config_entry, 'climate')
| # To flush out the service call to update the group
await hass.async_block_till_done()
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert await async_setup_component(hass, climate.DOMAIN, {
'climate': {
'platform': deconz.DOMAIN
}
}) is True
assert deconz.DOMAIN not in hass.data
async def test_no_sensors(hass):
"""Test that no sensors in deconz results in no climate entities."""
await setup_gateway(hass, {})
assert not hass.data[deconz.DOMAIN].deconz_ids
assert not hass.states.async_all()
async def test_climate_devices(hass):
"""Test successful creation of sensor entities."""
await setup_gateway(hass, {"sensors": SENSOR})
assert "climate.climate_1_name" in hass.data[deconz.DOMAIN].deconz_ids
assert "sensor.sensor_2_name" not in hass.data[deconz.DOMAIN].deconz_ids
assert len(hass.states.async_all()) == 1
hass.data[deconz.DOMAIN].api.sensors['1'].async_update(
{'state': {'on': False}})
await hass.services.async_call(
'climate', 'turn_on', {'entity_id': 'climate.climate_1_name'},
blocking=True
)
hass.data[deconz.DOMAIN].api.session.put.assert_called_with(
'http://1.2.3.4:80/api/ABCDEF/sensors/1/config',
data='{"mode": "auto"}'
)
await hass.services.async_call(
'climate', 'turn_off', {'entity_id': 'climate.climate_1_name'},
blocking=True
)
hass.data[deconz.DOMAIN].api.session.put.assert_called_with(
'http://1.2.3.4:80/api/ABCDEF/sensors/1/config',
data='{"mode": "off"}'
)
await hass.services.async_call(
'climate', 'set_temperature',
{'entity_id': 'climate.climate_1_name', 'temperature': 20},
blocking=True
)
hass.data[deconz.DOMAIN].api.session.put.assert_called_with(
'http://1.2.3.4:80/api/ABCDEF/sensors/1/config',
data='{"heatsetpoint": 2000.0}'
)
assert len(hass.data[deconz.DOMAIN].api.session.put.mock_calls) == 3
async def test_verify_state_update(hass):
"""Test that state update properly."""
await setup_gateway(hass, {"sensors": SENSOR})
assert "climate.climate_1_name" in hass.data[deconz.DOMAIN].deconz_ids
thermostat = hass.states.get('climate.climate_1_name')
assert thermostat.state == 'on'
state_update = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"config": {"on": False}
}
hass.data[deconz.DOMAIN].api.async_event_handler(state_update)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
thermostat = hass.states.get('climate.climate_1_name')
assert thermostat.state == 'off'
async def test_add_new_climate_device(hass):
"""Test successful creation of climate entities."""
await setup_gateway(hass, {})
sensor = Mock()
sensor.name = 'name'
sensor.type = 'ZHAThermostat'
sensor.register_async_callback = Mock()
async_dispatcher_send(hass, 'deconz_new_sensor', [sensor])
await hass.async_block_till_done()
assert "climate.name" in hass.data[deconz.DOMAIN].deconz_ids
async def test_do_not_allow_clipsensor(hass):
"""Test that clip sensors can be ignored."""
await setup_gateway(hass, {}, allow_clip_sensor=False)
sensor = Mock()
sensor.name = 'name'
sensor.type = 'CLIPThermostat'
sensor.register_async_callback = Mock()
async_dispatcher_send(hass, 'deconz_new_sensor', [sensor])
await hass.async_block_till_done()
assert len(hass.data[deconz.DOMAIN].deconz_ids) == 0
async def test_unload_sensor(hass):
"""Test that it works to unload sensor entities."""
await setup_gateway(hass, {"sensors": SENSOR})
await hass.data[deconz.DOMAIN].async_reset()
assert len(hass.states.async_all()) == 0
|
:,2] = -0.5*k*sumxy
K[:,:,3] = theta[2]*np.eye(d1,d2)
return K
else:
return k + measnoise*theta[2]*np.eye(d1,d2)
def kernel(data1,data2,theta,wantderiv=True,measnoise=1.):
theta = np.squeeze(theta)
# Squared exponential and periodic
if np.shape(data1)[0] == len(data1):
d1 = np.shape(data1)[0]
n = 1
else:
(d1,n) = np.shape(data1)
d2 = np.shape(data2)[0]
sumxy = np.zeros((d1,d2))
for d in range(n):
D1 = np.transpose([data1[:,d]]) * np.ones((d1,d2))
D2 = [data2[:,d]] * np.ones((d1,d2))
sumxy += (D1-D2)
k = theta[0]**2 * np.exp(-sumxy**2/(2.0*theta[1]**2)) + np.exp(-2.*np.sin(theta[2]*np.pi*(sumxy))**2/theta[3]**2)
if wantderiv:
K = np.zeros((d1,d2,len(theta)+1))
K[:,:,0] = k + measnoise*theta[4]**2*np.eye(d1,d2)
K[:,:,1] = 2.0 *k /theta[0]
K[:,:,2] = k*sumxy**2/(theta[1]**3)
K[:,:,3] = -4.0/(theta[3]**2)*np.pi*sumxy*np.sin(theta[2]*np.pi*sumxy)*np.cos(theta[2]*np.pi*sumxy)*np.exp(-2.*np.sin(theta[2]*np.pi*(sumxy))**2/theta[3]**2)
K[:,:,4] = 4.0*np.sin(theta[2]*np.pi*sumxy)**2/(theta[3]**3)*np.exp(-2.*np.sin(theta[2]*np.pi*(sumxy))**2)
K[:,:,5] = 2.0*theta[4]*np.eye(d1,d2)
return K
else:
return k + measnoise*theta[3]**2*np.eye(d1,d2)
def predict(xstar,data,k,t,theta,L=None,beta=None):
if L==None:
L = np.linalg.cholesky(k)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L,t))
kstar = kernel2(data,xstar,theta,wantderiv=False,measnoise=0)
f = np.dot(kstar.transpose(), beta)
v = np.linalg.solve(L,kstar)
V = kernel2(xstar,xstar,theta,wantderiv=False,measnoise=0)-np.dot(v.transpose(),v)
#logp = -0.5*np.dot(t.transpose(),beta) - np.sum(np.log(np.diag(L))) - np.shape(data)[0] /2. * np.log(2*np.pi)
return (f,V)
def logPosterior(theta,args):
data,t = args
k = kernel2(data,data,theta,wantderiv=False)
L = np.linalg.cholesky(k)
beta = np.linalg.solve(L.transpose(), np.linalg.solve(L,t))
logp = -0.5*np.dot(t.transpose(),beta) - np.sum(np.log(np.diag(L))) - np.shape(data)[0] /2. * np.log(2*np.pi)
return -logp
def gradLogPosterior(theta,args):
data,t = args
theta = np.squeeze(theta)
d = len(theta)
K = kernel2(data,data,theta,wantderiv=True)
L = np.linalg.cholesky(np.squeeze(K[:,:,0]))
invk = np.linalg.solve(L.transpose(),np.linalg.solve(L,np.eye(np.shape(data)[0])))
dlogpdtheta = np.zeros(d)
for d in range(1,len(theta)+1):
dlogpdtheta[d-1] = 0.5*np.dot(t.transpose(), np.dot(invk, np.dot(np.squeeze(K[:,:,d]), np.dot(invk,t)))) - 0.5*np.trace(np.dot(invk,np.squeeze(K[:,:,d])))
return -dlogpdtheta
def testopt():
theta = np.array([0.5,0.25,0.1]) # GP4
x = np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55*np.array([[-2., 0., 1., 2., -1.]]).transpose()
args = (x,t)
print theta, -logPosterior(theta,args)
newTheta = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4,maxiter=50,disp=1)
print newTheta, -logPosterior(newTheta,args)
#theta = newTheta
xstar = np.reshape(np.linspace(-5,5,100),(100,1))
k = kernel2(x,x,theta,wantderiv=False)
kstar = [kernel2(x,xs*np.ones((1,1)),theta,wantderiv=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs*np.ones((1,1)),xs*np.ones((1,1)),theta,wantderiv=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
#kstarstar = kernel2(xstar,xstar,theta,wantderiv=False)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(),np.linalg.solve(L,np.eye(np.shape(x)[0])))
#invL = np.linalg.inv(L)
#invk = np.dot(invL.T,invL)
mean = np.dot(kstar,np.dot(invk,t))
#print np.shape(kstarstar), np.shape(kstar), np.shape(invk)
var = kstarstar - np.diag(np.dot(kstar,np.dot(invk,kstar.T)))
#print np.shape(var)
#var = kstarstar - np.dot(kstar.transpose(),np.dot(invk,kstar))
var = np.reshape(var,(100,1))
#print mean
pl.figure()
pl.plot(xstar,mean,'-k')
#pl.plot(xstar,mean+2*np.sqrt(var),'x-')
#pl.plot(xstar,mean-2*np.sqrt(var),'x-')
#print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar),np.squeeze(mean-2*np.sqrt(var)),np.squeeze(mean+2*np.sqrt(var)),color='0.75')
pl.plot(x,t,'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
def showpost():
#theta = np.array([0.5,1.,0.0]) # GP1
#theta = np.array([0.5,1.,0.2]) # GP2
#theta = np.array([1.0,1.,0.0]) # GP3
theta = np.array([0.5,0.5,0.0]) # GP4
x = np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55*np.array([[-2., 0., 1., 2., -1.]]).transpose()
xstar = np.reshape(np.linspace(-5,5,100),(100,1))
k = kernel2(x,x,theta,wantderiv=False)
kstar = [kernel2(x,xs*np.ones((1,1)),theta,wantderiv=False) for xs in xstar]
kstar = np.squeeze(kstar)
kstarstar = [kernel2(xs*np.ones((1,1)),xs*np.ones((1,1)),theta,wantderiv=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
#kstarstar = kernel(xstar,xstar,theta,wantderiv=False)
#invk = np.linalg.inv(k)
L = np.linalg.cholesky(k)
invk = np.linalg.solve(L.transpose(),np.linalg.solve(L,np.eye(np.shape(x)[0])))
mean = np.dot(kstar,np.dot(invk,t))
var = kstarstar - np.diag(np.dot(kstar,np.dot(invk,kstar.T)))
var = np.reshape(var,(100,1))
pl.figure()
pl.plot(xstar,mean,'-k')
#pl.plot(xstar,mean+2*np.sqrt(var),'x-')
#pl.plot(xstar,mean-2*np.sqrt(var),'x-')
#print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar),np.squeeze(mea | n-2*np.sqrt(var)),np.squeeze(mean+2*np.sqrt(var)),color='0.75')
pl.plot(x,t,'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
print np.shape(mean), np.shape(var)
def showlength(theta,scale) | :
x = scale*np.array([[-3.5, -2.5, -.5, .4, 2.25]]).transpose()
t = 0.55*np.array([[-2., 0, 1., 2., -1.]]).transpose()
xstar = np.reshape(np.linspace(-5,5,100),(100,1))
k = kernel2(x,x,theta,wantderiv=False)
print k
#print k
kstar = [kernel2(x,xs*np.ones((1,1)),theta,wantderiv=False,measnoise=False) for xs in xstar]
kstar = np.squeeze(kstar)
#print kstar
kstarstar = [kernel2(xs*np.ones((1,1)),xs*np.ones((1,1)),theta,wantderiv=False,measnoise=False) for xs in xstar]
kstarstar = np.squeeze(kstarstar)
#kstarstar = kernel2(xstar,xstar,theta,wantderiv=False)
L = np.linalg.cholesky(k)
#invL = np.linalg.inv(L)
#invk = np.dot(invL.T,invL)
invk = np.linalg.solve(L.transpose(),np.linalg.solve(L,np.eye(np.shape(x)[0])))
#print np.shape(kstar), np.shape(invk), np.shape(t), np.shape(kstarstar), np.shape(np.dot(kstar,np.dot(invk,kstar.T)))
mean = np.dot(kstar,np.dot(invk,t))
var = kstarstar - np.diag(np.dot(kstar,np.dot(invk,kstar.T)))
var = np.reshape(var,(100,1))
pl.ion()
pl.figure()
pl.plot(xstar,mean,'-k')
#pl.plot(xstar,mean+2*np.sqrt(var),'x-')
#pl.plot(xstar,mean-2*np.sqrt(var),'x-')
#print np.shape(xstar), np.shape(mean), np.shape(var)
pl.fill_between(np.squeeze(xstar),np.squeeze(mean-2*np.sqrt(var)),np.squeeze(mean+2*np.sqrt(var)),color='0.75')
pl.plot(x,t,'ko')
pl.axis('tight')
pl.xlabel('x')
pl.ylabel('f(x)')
def runlength():
theta1 = np.array([0.5,1.,0.0])
theta2 = np.array([0.5,0.5,0.0])
showlength(theta1,1.)
showlength(theta2,1.)
showlength(theta1,.5)
showlength(theta2,.5)
def runhp():
theta1 = np.array([0.5,1.0,0.0])
theta2 = np.array([0.5,1.0,0.2])
theta3 = np.array([0.5,1.0,0.4])
theta8 = np.array([0.5,1.0,0.6])
theta4 = np.array([0.25,1.0,0.0])
theta5 = np.array([1.0,1.0,0.0])
theta6 = np.array([0.5,0.5,0.0])
theta7 = np.array([0.5,2.0,0.0])
showlength(theta1,1.)
showlength(theta2,1.)
showlength(theta3,1.)
showlength(theta4,1.)
showlength(theta5,1.)
showlength(theta6,1.)
showlength(theta7,1.)
showlength(theta8,1.)
def test():
data = np.loadtxt("data.txt")
X = data[:,0:-1] # everything except the last column
y = data[:,-1] # just the last column
args = (X,y)
#theta = np.array([ 1.7657065779589087, -1.3841332550882446, -10.162222605402242])
#theta = np.array([ 1.7999382115210827, -14.001391904643032 , -5.577578503745549])
theta = np.zeros(3)
theta[0] = np.random.normal(0,5)
theta[1] = np.random.normal(0,5)
theta[2] = np.random.normal(0,5)
print theta
print np.exp(theta)
print logPosterior(theta,args)
print gradLogPosterior(theta,args)
print so.check_grad(logPosterior, gradLogPosterior, theta, args)
newTheta = so.fmin_ |
#!/usr/bin/env python
from setuptools import setup, find_packages
import chagallpy
setup(
name='chagallpy',
version=chagallpy.__version__,
packages=find_packages(),
license='MIT',
description='CHArming GALLery in PYthon',
long_description_content_type="text/markdown" | ,
long_description=open('README.md').read(),
author='Jan Pipek',
author_email='jan.pipek@gmail.com',
url='https://github.com/janpipek/chagallpy',
install_requires=['wow | p', 'pillow', "jinja2", "pyyaml", "click"],
python_requires="~=3.6",
entry_points={
'console_scripts': [
'chagall = chagallpy:generate'
]
},
include_package_data=True,
package_data={
'resources': ['*.*'],
'templates': ['*.html']
},
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# DTE Chile OpenERP 7
# Copyright (C) 2016 Cesar Lopez Aguillon Mall Connection
# <http://www.mallconnection.org>.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the | implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public | License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import model
import report
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
"""Read and write configuration settings
"""
from milc import cli
def print_config(section, key):
"""Print a single config setting to stdout.
"""
cli.echo('%s.%s{fg_cyan}={fg_reset}%s', section, key, cli.config[section][key])
def show_config():
"""Print the current configuration to stdout.
"""
for section in cli.config:
for key in cli.config[section]:
print_config(section, key)
def parse_config_token(config_token):
"""Split a user-supplied configuration-token into its components.
"""
section = option = value = None
if '=' in config_token and '.' not in config_token:
cli.log.error('Invalid configuration token, the key must be of the form <section>.<option>: %s', config_token)
return section, option, value
# Separate the key (<section>.<option>) from the value
if '=' in config_token:
key, value = config_token.split('=')
else:
key = config_token
# Extract the section and option from the key
if '.' in key:
section, option = key.split('.', 1)
else:
section = key
return section, option, value
def set_config(section, option, value):
"""Set a config key in the running config.
"""
log_string = '%s.%s{fg_cyan}:{fg_reset} %s {fg_cyan}->{fg_reset} %s'
if cli.args.read_only:
log_string += ' {fg_red}(change not written)'
cli.echo(log_string, section, option, cli.config[section][option], value)
if not cli.args.read_only:
if value == 'None':
del cli.config[section][option]
else:
cli.config[section][option] = value
@cli.argument('-ro', '--read-only', arg_only=True, action='store_true', help='Operate in read-only mode.')
@cli.argument('configs', nargs='*', arg_only=True, help='Configuration options to read or write.')
@cli.subcommand("Read and write configuration settings.")
def config(cli):
"""Read and write config settings.
This script iterates over the config_tokens supplied as argument. Each config_token has the following form:
section[.key][=value]
If only a section (EG 'compile') is supplied all keys for that section will be displayed.
If section.key is supplied the value for that single key will be displayed.
If section.key=value is supplied the value for that single key will be set.
If section.key=None is supplied the key will be deleted.
No validation is done to ensure that the supplied section.key is actually used by qmk scripts.
"""
if not cli.args.configs:
return show_config()
# Process config_tokens
save_config = False
for argument i | n cli.args.configs:
# Split on space in case they quoted multiple config tokens
for config_token in argument.split(' '):
section, option, value = parse_config_token(config_token)
# Validation
if option and '.' in option:
cli.log.error('Config keys may not have more than one period! "%s" is not valid.', config_token)
return False
| # Do what the user wants
if section and option and value:
# Write a configuration option
set_config(section, option, value)
if not cli.args.read_only:
save_config = True
elif section and option:
# Display a single key
print_config(section, option)
elif section:
# Display an entire section
for key in cli.config[section]:
print_config(section, key)
# Ending actions
if save_config:
cli.save_config()
return True
|
from django.contrib import admin
from . import models
class OrderItemInline(admin.TabularInline):
fields = ('product', 'quantity')
raw_id_fields = ('product',)
model = models.OrderItem
extra = 1
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'price', 'description', 'stock', 'discontinued')
search_fields = ('name', 'description')
list_filter = ('discontinued',)
fields = ('name', 'description', 'price', 'stock', 'discontinued', 'image')
class ReviewAdmin(admin.ModelAdmin):
list_display = ('user', 'product', 'title' | , 'body', 'rating', 'pub_date')
search_fields = ('title', 'body')
list_filter = ('pub_date', 'rating')
date_hierarchy = 'pub_date'
fields = ('user', 'product', 'title', 'body', 'rating')
raw_id_fields = ('user', 'product')
class OrderAdmin(admin.ModelAdmin):
list_display = ('user', 'street', 'city', 'state', 'total', 'purchase_date')
search_fields = ('street', 'city', 'state')
list_filter = ('purchase_date',)
| date_hierarchy = 'purchase_date'
fields = ('street', 'city', 'state', 'zip_code')
inlines = (OrderItemInline,)
admin.site.register(models.Product, ProductAdmin)
admin.site.register(models.Review, ReviewAdmin)
admin.site.register(models.Order, OrderAdmin)
|
# -*- coding: utf-8 -*-
VERSION = (0, 1, 6, 'final')
if VERSION[-1] != "final": # pragma: no cover
__version__ = '.'.join(map(str, VERSION))
else: # pragma: no cover
__version__ = '.'.join(ma | p(str, VERSIO | N[:-1]))
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from qiita_core.qiita_settings import r_client
from qiita_pet.test.tornado_test_base import TestHandlerBase
class OauthTestingBase(TestHandlerBase):
def setUp(self):
self.token = 'TESTINGOAUTHSTUFF'
self.header = {'Authorization': 'Bearer ' + self.token}
r_client.hset(self.token, 'timestamp', | '12/12/12 12:12:00')
r_client.hset(self.token, 'gran | t_type', 'client')
r_client.expire(self.token, 20)
super(OauthTestingBase, self).setUp()
|
#!/usr/bin/env python
'''
fit best estimate of magnetometer offsets, trying to take into account motor interference
'''
import sys, time, os, math
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--condition",dest="condition", default=None, help="select packets by condition")
parser.add_argument("--noise", type=float, default=0, help="noise to add")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.rotmat import Vector3
def noise():
'''a noise vector'''
from random import gauss
v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1))
v.normalize()
return v * args.noise
def select_data(data):
ret = []
counts = {}
for d in data:
(mag,motor) = d
key = "%u:%u:%u" % (mag.x/20,mag.y/20,mag.z/20)
if key in counts:
counts[key] += 1
else:
counts[key] = 1
if counts[key] < 3:
ret.append(d)
print(len(data), len(ret))
return ret
def radius(d, offsets, motor_ofs):
'''return radius give data point and offsets'''
(mag, motor) = d
return (mag + offsets + motor*motor_ofs).length()
def radius_cmp(a, b, offsets, motor_ofs):
'''return radius give data point and offsets'''
diff = radius(a, offsets, motor_ofs) - radius(b, offsets, motor_ofs)
if diff > 0:
return 1
if diff < 0:
return -1
return 0
def sphere_error(p, data):
from scipy import sqrt
x,y,z,mx,my,mz,r = p
ofs = Vector3(x,y,z)
motor_ofs = Vector3(mx,my,mz)
ret = []
for d in data:
(mag,motor) = d
err = r - radius((mag,motor), ofs, motor_ofs)
ret.append(err)
return ret
def fit_data(data):
import numpy, scipy
from scipy import optimize
p0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
p1, ier = optimize.leastsq(sphere_error, p0[:], args=(data))
if not ier in [1, 2, 3, 4]:
raise RuntimeError("Unable to find solution")
return (Vector3(p1[0], p1[1], p1[2]), Vector3(p1[3], p1[4], p1[5]), p1[6])
def magfit(logfile):
'''find best magnetometer offset fit to a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
data = []
last_t = 0
offsets = Vector3(0,0,0)
motor_ofs = Vector3(0,0,0)
motor = 0.0
# now gather all the data
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
break
if m.get_type() == "PARAM_VALUE" and m.param_id == "RC3_MIN":
rc3_min = float(m.param_value)
if m.get_type() == "SENSOR_OFFSETS":
# update current offsets
offsets = Vector3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)
if m.get_type() == "SERVO_OUTPUT_RAW":
motor_pwm = m.servo1_raw + m.servo2_raw + m.servo3_raw + m.servo4_raw
motor_pwm *= 0.25
rc3_min = mlog.param('RC3_MIN', 1100)
rc3_max = mlog.param('RC3_MAX', 1900)
motor = (motor_pwm - rc3_min) / (rc3_max - rc3_min)
if motor > 1.0:
motor = 1.0
if motor < 0.0:
motor = 0.0
if m.get_type() == "RAW_IMU":
mag = Vector3(m.xmag, m.ymag, m.zmag)
# add data point after subtracting the current offsets
data.append((mag - offsets + noise(), motor))
print("Extracted %u data points" % len(data))
print("Current offsets: %s" % offsets)
data = select_data(data)
# do an initial fit with all data
(offsets, motor_ofs, field_strength) = fit_data(data)
for count in range(3):
# sort the data by the radius
data.sort(lambda a,b : radius_cmp(a,b,offsets,motor_ofs))
print("Fit %u : %s %s field_strength=%6.1f to %6.1f" % (
count, offsets, motor_ofs,
radius(data[0], offsets, motor_ofs), radius(data[-1], offsets, motor_ofs)))
| # discard outliers, keep the middle 3/4
data = data[len(data)/8:-len(data)/8]
# fit again
(offsets, motor_ofs, field_strength) = fit_data(data)
print("Final : %s %s field_strength=%6.1f to %6.1f" % (
offsets, motor_ofs,
radi | us(data[0], offsets, motor_ofs), radius(data[-1], offsets, motor_ofs)))
print "mavgraph.py '%s' 'mag_field(RAW_IMU)' 'mag_field_motors(RAW_IMU,SENSOR_OFFSETS,(%f,%f,%f),SERVO_OUTPUT_RAW,(%f,%f,%f))'" % (
filename,
offsets.x,offsets.y,offsets.z,
motor_ofs.x, motor_ofs.y, motor_ofs.z)
total = 0.0
for filename in args.logs:
magfit(filename)
|
import libtcodpy as libtcod
from menu import Menu
from menu_manager import MenuStatus
from menu_game import MenuGame
from ui.frame_main_menu import FrameMainMenu
class MenuMain(Menu):
def __init__(self, width, height):
Menu.__init__(self, width, height)
self.menu_frame = FrameMainMenu(width, height)
def update(self, delta):
self.menu_frame.update(delta)
key = libtcod.console_check_for_keypress(True) #libtcod.console_check_for_keypress
if key.c == ord("a"):
return MenuGame(self.width, self.height)
if key.c == ord("b"):
return MenuStatus.Exit
return MenuStatus.Okay
de | f dra | w(self):
#print("drawing MenuMain")
self.menu_frame.draw() |
from django.contrib import admin
from django.co | ntrib.admin import ModelAdmin
from paywall.models import PaymentEntry
@admin.register(P | aymentEntry)
class PaymentEntryAdmin(ModelAdmin):
pass
|
nd averageFlag == True:
count = 0
for curr_Key in right_data:
if count > 14: break
try:
average_right_data[curr_Key] /= initial_Gap_Interval
except:
pass
count += 1
averageFlag = False
elif iteration_Count >= initial_Gap_Interval and averageFlag == False:
count = 0
for curr_Key in right_data:
if count > 14: break
try:
right_data[curr_Key] /= average_right_data[curr_Key]
except:
pass
count += 1
if recognitionFlag != 1:
for eachID in data_repository_right["id"]:
fingerCount = 0 #Finger Recognised count
for max_x, max_y, max_z, min_x, min_y, min_z, start_angle_x, start_angle_y, start_angle_z, right_x, right_y, right_z, right_angle_x, right_angle_y, right_angle_z in zip(list(range(0,5)), list(range(5, 10)), list(range(10, 15)), list(range(15, 20)), list(range(20, 25)), list(range(25, 30)), list(range(30, 35)), list(range(35, 40)), list(range(40, 45)), list(range(0, 5)), list(range(5, 10)),list(range(10, 15)),list(range(15, 20)),list(range(20, 25)),list(range(25, 30))):
if (right_data[right_x] > data_repository_right[str(max_x)][eachID] - acc_tolerance)\
and (right_data[right_x] < data_repository_right[str(max_x)][eachID] + acc_tolerance)\
and (right_data[right_y] > data_repository_right[str(max_y)][eachID] - acc_tolerance)\
and (right_data[right_y] < data_repository_right[str(max_y)][eachID] + acc_tolerance)\
and (right_data[right_z] > data_repository_right[str(max_z)][eachID] - acc_tolerance)\
and (right_data[right_z] < data_repository_right[str(max_z)][eachID] + acc_tolerance)\
and (right_data[right_angle_x] < (data_repository_right[str(start_angle_x)][eachID] + angle_tolerance))\
and (right_data[right_angle_x] > (data_repository_right[str(start_angle_x)][eachID] - angle_tolerance))\
and (right_data[right_angle_y] < (data_repository_right[str(start_angle_y)][eachID] + angle_tolerance))\
and (right_data[right_angle_y] > (data_repository_right[str(start_angle_y)][eachID] - angle_tolerance))\
and (right_data[right_angle_z] < (data_repository_right[str(start_angle_z)][eachID] + angle_tolerance))\
and (right_data[right_angle_z] > (data_repository_right[str(start_angle_z)][eachID] - angle_tolerance)):
fingerCount += 1
if fingerCount == 3:
print("Initial condition true")
else:
print("not matched", "\t", fingerCount)
#print(data_repository_right, end="\n\n")
#print(right_data, end="\n\n")
# ----------------RECOGNITION----------------------------
i=0
j=0
pos=0
match = False
while(i<len(data_repository_right.get(0))):
while(j+15<60):
#If current data of Thumb (angles and accln) is greater than min and less than max value
if(right_data.get(j) < data_repository_right.get(j)[i]) and (right_data.get(j) > data_repository_right.get(j+15)[i]):
pos = i
match = True
else:
match = False
j = j+5
if (j==15):
j=30
i+=1
if match:
shortcut = data_repository_right.get("shortcuts")[pos]
#Implement Shortcut
if recognitionFlag == 1 and iteration_Count > initial_Gap_Interval:
if recognitionCount > 5:
print(data_repository_right)
print("Ok Recognized")
recognitionFlag = 0
try:
with open('DataRepositoryRight.json', 'w') as outfile:
json.dump(data_repository_right, outfile)
except:
print("Could not write DataRepositoryRight.json")
#return
else: print("Repeat", recognitionCount)
curr_time = time.time()
for x_values, y_values, z_values in zip(list(range(5)), list(range(5, 10)),list(range(10, 15))):
#only x, y, z acceleration values of each finger
if math.fabs(right_data[x_values]) > movement_Sensitivity_x and math.fabs(right_data[y_values]) > movement_Sensitivity_y and math.fabs(right_data[z_values]) > movement_Sensitivity_z:
if recognitionMode == False:
print("Recognition period ON", "True")
start_time = curr_time
store_gesture(False, "right",name="Dummy", shortcuts="dummy", curr_id= curr_id)
recognitionMode = True
elif recognitionMode == True and recognitionGapCount > recognition_Gap_Interval:
recognitionMode = False
time_period = curr_time - start_time
store_gesture(True, "right", time=time_period , curr_id=curr_id)
print("Recognition period OFF", "False")
recognitionCount += 1
recognitionGapCount = 0
break
#----------------------------------------END----------------
pre_right_data = copy.deepcopy(right_data)
pre_left_data = copy.deepcopy(left_data)
iteration_Count += 1
if recognitionMode == True:
recognitionGapCount += 1
def initialize_data_repository_right():
global data_repository_right
data_repository_right["id"].append(0)
data_repository_right["name"].append(" ")
data_repository_right["shortcuts"].append(" ")
data_repository_right["time_period"].append(0)
for i in list(range(60)):
data_repository_right[str(i)].append(0)
def store_gesture(recognitionModeEnd, hand="right", time= 0, name="Dummy", shortcuts="dummy", curr_id = 0):
if hand == "right":
if recognitionModeEnd == False:
data_repository_right["id"][curr_id] = curr_id
data_repository_right["name"][curr_id] = name
data_repository_rig | ht["shortcu | ts"][curr_id] = shortcuts
for i in list(range(15)): # Max Acceleration
# val = get_data_from_Data_Repository(str(i), curr_id)
# if val < right_data[i]:
data_repository_right[str(i)][curr_id] = right_data[i]
for i, j in zip(list(range(15,30)), list(range(15))): #Min Acceleration
# val = get_data_from_Data_Repository(str(i), curr_id)
# if val > right_data[j] or val == 0:
data_repository_right[str(i)][curr_id] = right_data[j]
for i, j in zip(list(range(30, 45)), list(range(15, 30))): #Start Index
# val = get_data_from_Data_Repository(str(i),curr_id)
#if val == 0:
# data_repository_right[str(i)][curr_id] = right_data[j]
#else:
data_repository_right[str(i)][curr_id] = right_data[j] #Average
#--- |
import logging
# noinspection PyUnresolvedReferences
import feature #noqa
logging.basicConf | ig(level=logging.DEBUG)
class Rollout(object):
__version__ = '0.3.5'
def __init__(self, feature_storage=None, user_storage=None, undefined_feature_access=False):
"""
Manage feature flags for groups, users, or on a percentage basis. Use your own
user models and persistence with replaceable modules.
:param feature_storage: Object to manage storage of feature definitions
:type feature_storage: pyrollout.storage.FeatureStorageManager
:par | am user_storage: Object to manage storage of users
:type user_storage: pyrollout.storage.UserStorageManager
:param undefined_feature_access: Should undefined features be allowed (default:True) or denied (False) access?
:type undefined_feature_access: bool
"""
if feature_storage is None:
from storage.memory import MemoryFeatureStorage
self.feature_storage = MemoryFeatureStorage()
else:
self.feature_storage = feature_storage
if user_storage is None:
from storage.memory import MemoryUserStorage
self.user_storage = MemoryUserStorage()
else:
self.user_storage = user_storage
self.default_undefined_feature = undefined_feature_access
def add_feature(self, feature):
"""
Add a feature to be handled by this instance
:param feature: New feature to add
:type feature: pyrollout.feature.Feature
"""
self.feature_storage.set_feature_config(feature.name, feature_data=feature)
def can(self, user, feature_name):
"""
Check whether user has access to the given feature.
:param user: User object to check, must be compatible with user storage manager in use
:type user: dict or object
:param feature_name: Name of feature to check against
:type feature_name: basestring
:return: True if user has access, False otherwise
:rtype: bool
"""
feature = self.feature_storage.get_feature_config(feature_name)
if feature is None:
return self.default_undefined_feature
return feature.can(self.user_storage, user)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('widgetbox', '0010_html'),
]
operations = [
migrations.AddField(
| model_name= | 'button',
name='extra_css_classes',
field=models.CharField(max_length=200, blank=True),
preserve_default=True,
),
]
|
#!/usr/bin/env python
impor | t os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wellspring.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| |
import sys
sys.path.append("../../")
from | unittes | t.mock import patch, MagicMock
MockRPi = MagicMock()
MockSpidev = MagicMock()
modules = {
"RPi": MockRPi,
"RPi.GPIO": MockRPi.GPIO,
"spidev": MockSpidev
}
patcher = patch.dict("sys.modules", modules)
patcher.start()
from gfxlcd.driver.ssd1306.spi import SPI
from gfxlcd.driver.ssd1306.ssd1306 import SSD1306
class TestNJU6450(object):
def test_initialize(self):
SSD1306(128, 64, SPI())
|
#! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2010-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring
"""
Produce the k-mer abundance distribution for the given file.
% python scripts/abundance-dist.py [ -z -s ] <htname> <data> <histout>
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import csv
import khmer
import argparse
import textwrap
import os
from khmer import __version__
from khmer.kfile import check_input_files
from khmer.khmer_args import (info, sanitize_help, ComboFormatter,
_VersionStdErrAction)
def get_parser():
epilog = """\
Example::
load-into-countgraph.py -x 1e7 -N 2 -k 17 counts \\
tests/test-data/test-abund-read-2.fa
abundance-dist.py counts tests/test-data/test-abund-read-2.fa test-dist
"""
parser = argparse.ArgumentParser(
description="Calculate abundance distribution of the k-mers in "
"the sequence file using a pre-made k-mer countgraph.",
formatter_class=ComboFormatter, epilog=textwrap.dedent(epilog))
parser.add_argument('input_count_graph_filename', help='The name of the'
' input k-mer countgraph file.')
parser.add_argument('input_sequence_filename', help='The name of the input'
' FAST[AQ] sequence file.')
parser.add_argument('output_histogram_filename', help='The columns are: '
'(1) k-mer abundance, (2) k-mer count, (3) cumulative '
'count, (4) fraction of total distinct k-mers.')
parser.add_argument('-z', '--no-zero', dest='output_zero', default=True,
action='store_false',
help='Do not output zero-count bins')
parser.add_argument('-s', '--squash', dest='squash_output', default=False,
action='store_true',
| help='Overwrite existing output_histogram_filename')
parser.a | dd_argument('-b', '--no-bigcount', dest='bigcount', default=True,
action='store_false',
help='Do not count k-mers past 255')
parser.add_argument('--version', action=_VersionStdErrAction,
version='khmer {v}'.format(v=__version__))
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Continue even if specified input files '
'do not exist or are empty.')
return parser
def main():
info('abundance-dist.py', ['counting'])
args = sanitize_help(get_parser()).parse_args()
infiles = [args.input_count_graph_filename,
args.input_sequence_filename]
for infile in infiles:
check_input_files(infile, False)
print('Counting graph from', args.input_count_graph_filename,
file=sys.stderr)
countgraph = khmer.load_countgraph(
args.input_count_graph_filename)
if not countgraph.get_use_bigcount() and args.bigcount:
print("WARNING: The loaded graph has bigcount DISABLED while bigcount"
" reporting is ENABLED--counts higher than 255 will not be "
"reported.",
file=sys.stderr)
countgraph.set_use_bigcount(args.bigcount)
kmer_size = countgraph.ksize()
hashsizes = countgraph.hashsizes()
tracking = khmer._Nodegraph( # pylint: disable=protected-access
kmer_size, hashsizes)
print('K:', kmer_size, file=sys.stderr)
print('outputting to', args.output_histogram_filename, file=sys.stderr)
if args.output_histogram_filename in ('-', '/dev/stdout'):
pass
elif os.path.exists(args.output_histogram_filename):
if not args.squash_output:
print('ERROR: %s exists; not squashing.' %
args.output_histogram_filename,
file=sys.stderr)
sys.exit(1)
print('** squashing existing file %s' %
args.output_histogram_filename, file=sys.stderr)
print('preparing hist...', file=sys.stderr)
abundances = countgraph.abundance_distribution(
args.input_sequence_filename, tracking)
total = sum(abundances)
if 0 == total:
print("ERROR: abundance distribution is uniformly zero; "
"nothing to report.", file=sys.stderr)
print("\tPlease verify that the input files are valid.",
file=sys.stderr)
sys.exit(1)
if args.output_histogram_filename in ('-', '/dev/stdout'):
countgraph_fp = sys.stdout
else:
countgraph_fp = open(args.output_histogram_filename, 'w')
countgraph_fp_csv = csv.writer(countgraph_fp)
# write headers:
countgraph_fp_csv.writerow(['abundance', 'count', 'cumulative',
'cumulative_fraction'])
sofar = 0
for _, i in enumerate(abundances):
if i == 0 and not args.output_zero:
continue
sofar += i
frac = sofar / float(total)
countgraph_fp_csv.writerow([_, i, sofar, round(frac, 3)])
if sofar == total:
break
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
# Generated by Django 3.0.7 on 2020-08-03 08:42
from django.db import migrations, models
import weblate.utils.validators
class Migration(migrations.Migration):
dependencies = [
("trans", "0093_auto_20200730_1432"),
]
| operations = [
migrations.AddField(
model_name="project",
name="language_aliases",
field=models.CharField(
default="",
blank=True,
help_text="Comma-separated list of language code mappings, for example: en_GB:en,en_US:en",
max_length=200,
| validators=[weblate.utils.validators.validate_language_aliases],
verbose_name="Language aliases",
),
),
]
|
from django.conf import settings
from django.test import TestCase, TransactionTestCase
from django.test.simple import DjangoTestSuiteRunner
from django.test.runner import reorder_suite
from django.utils.importlib import import_module
from django.utils.unittest.loader import defaultTestLoader
class DiscoveryDjangoTestSuiteRunner(DjangoTestSuiteRunner):
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = None
discovery_root = settings.TEST_DISCOVERY_ROOT
if test_labels:
suite = defaultTestLoader.loadTestsFromNames(test_labels)
# if single named module has no tests, do discovery within it
if not suite.countTestCases() and len(test_labels) == 1:
suite = None
discovery_root = import_module(test_labels[ | 0]).__path__[0]
if suite is None:
suite = defaultTestLoader.discover(
discovery_root,
top_level_dir=settings.BASE_ | PATH,
)
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (
TestCase,
TransactionTestCase,
))
|
# ----------------------------------------------------------------------
# author : Martin Ruchti
# contact : martin.ruchti@tum.de
# ----------------------------------------------------------------------
from __future__ import print_function, absolute_import, division
from numpy import *
from .variables import *
'''
this scheme is an implementation of the newton raphson scheme
the scheme is created as a modified version of 'static_scheme.py' and follows the implementation of the
residual based newton raphson strategy in KratosMultiphysics
'''
class NewtonRaphsonStrategy:
def __init__(self, model_part, scheme, builder_and_solver, max_iteration, epsAbs, ratioRel = 1e-10):
self.model_part = model_part
self.scheme = scheme
self.builder_and_solver = builder_and_solver
#self.adjoint_builder_and_solver = adjointbuilder_and_solv
self.max_iteration_number = max_iteration
self.epsilon = epsAbs
self.relativeRatio = ratioRel
#allocate matrices
self.A = zeros((0, 0))
self.b = zeros((0))
self.dx = zeros((0))
#file
self.file = 0
def Initialize(self):
# find list of dofs
self.builder_and_solver.SetupDofSet()
# allocate memory for the system
self.A, self.x, self.b = self.builder_and_solver.SetupSystem(
self.A, self.dx, self.b)
# def Solve(self): Removed to allow for recording of number of iterations if desired
def Solve(self,RecordIteration = 0):
print("=================================================================")
print("start solving process...")
self.Initialize()
# do prediction once
self.scheme.Predict()
# initialize parameters for NR - strategy
iteration_number = 1
is_converged = False
error_L2_norm = 0.0
init_L2_norm = 0.0
ratio = 1.0
dragLift = [0,0]
# first solve
self.A, self.dx, self.b = self.builder_and_solver.BuildAndSolve(self.A, self.x, self.b)
# check for convergence
error_L2_norm = 0.0
for i in range(0,len(self.dx)):
error_L2_norm += (self.dx[i])**2
#scale error with number of nodes
error_L2_norm = sqrt(error_L2_norm)/len(self.dx)
init_L2_norm = error_L2_norm
if( error_L2_norm <= self.epsilon ):
is_converged = True
print("coverged after step: ",iteration_number)
print("error is: ",error_L2_norm)
#self.builder_and_solver.ComputeReactions(self.A, self.x, self.b, dragLift)
else:
print("not converged, error is: ",error_L2_norm)
print("ratio is: ", 1)
print("-----------------------------------------------------------------")
# call scheme to do update
self.scheme.Update(self.builder_and_solver.dofset, self.dx)
#iterate if not converged
while | (not is_converged and iteration_number < self.max_iteration_number):
# do build and solve
self.A, self.dx, self.b = self.builder_and_solver.BuildAndSolve(self.A, self.x, self. | b)
# call scheme to do update
self.scheme.Update(self.builder_and_solver.dofset, self.dx)
#check for convergence
error_L2_norm = 0.0
for i in range(0,len(self.dx)):
error_L2_norm += (self.dx[i])**2
#scale error with number of nodes
error_L2_norm = sqrt(error_L2_norm)/len(self.dx)
#compute relative error
ratio = error_L2_norm / init_L2_norm
if( error_L2_norm <= self.epsilon or ratio <= self.relativeRatio ):
is_converged = True
else:
print("not converged, error is: ",error_L2_norm)
print("ratio is: ",ratio)
print("-----------------------------------------------------------------")
iteration_number += 1
# New lines to return iteration number if set in input
if(RecordIteration==1):
return iteration_number
# if(iteration_number == self.max_iteration_number):
# print("*********maximum iterations reached*********")
# print("error is: ",error_L2_norm)
# print("ratio is: ",ratio)
# self.builder_and_solver.ComputeReactions(self.A, self.x, self.b, dragLift)
# print("solving process done!")
# print("=================================================================")
# elif(iteration_number > 1):
# print("coverged after step: ",iteration_number)
# print("error is: ",error_L2_norm)
# print("ratio is: ",ratio)
# self.builder_and_solver.ComputeReactions(self.A, self.x, self.b, dragLift)
# print("solving process done!")
# print("=================================================================")
#
# if(self.file != 0):
# self.WriteDragforceToFile(dragLift)
def SpyMatrix(self):
try:
import matplotlib.pylab as pl
pl.spy(self.A)
pl.show()
except:
raise Exception(
"error in function Spy. Probably matplotlib not installed")
def WriteDragforceToFile(self, dragLift):
#time = self.model_part.ProcessInfo[TIME]
#output 20 digits
#output = str(time) + " " + str.format("{0:.20f}", dragLift[0]) + " " + str.format("{0:.20f}", dragLift[1]) + "\n"
#self.WriteToFile(output)
return
#file operations
def OpenFile(self, filename):
self.file = open(filename, "w")
def WriteToFile(self, data):
self.file.write(data)
self.file.flush()
def CloseFile(self):
self.file.close() |
jango.forms.models import inlineformset_factory
from django.db import transaction
from django.db.models import F
from django.forms.util import ErrorList
from geonode.tasks.deletion import delete_layer
from geonode.services.models import Service
from geonode.layers.forms import LayerForm, LayerUploadForm, NewLayerUploadForm, LayerAttributeForm
from geonode.base.forms import CategoryForm
from geonode.layers.models import Layer, Attribute, UploadSession
from geonode.base.enumerations import CHARSETS
from geonode.base.models import TopicCategory
from geonode.utils import default_map_config
from geonode.utils import GXPLayer
from geonode.utils import GXPMap
from geonode.layers.utils import file_upload, is_raster, is_vector
from geonode.utils import resolve_object, llbbox_to_mercator
from geonode.people.forms import ProfileForm, PocForm
from geonode.security.views import _perms_info_json
from geonode.documents.models import get_related_documents
from geonode.utils import build_social_links
from geonode.geoserver.helpers import cascading_delete, gs_catalog
from geonode.geoserver.helpers import ogc_server_settings
if 'geonode.geoserver' in settings.INSTALLED_APPS:
from geonode.geoserver.helpers import _render_thumbnail
CONTEXT_LOG_FILE = ogc_server_settings.LOG_FILE
logger = logging.getLogger("geonode.layers.views")
DEFAULT_SEARCH_BATCH_SIZE = 10
MAX_SEARCH_BATCH_SIZE = 25
GENERIC_UPLOAD_ERROR = _("There was an error while attempting to upload your data. \
Please try again, or contact and administrator if the problem continues.")
METADATA_UPLOADED_PRESERVE_ERROR = _("Note: this layer's orginal metadata was \
populated and preserved by importing a metadata XML file. This metadata cannot be edited.")
_PERMISSION_MSG_DELETE = _("You are not permitted to delete this layer")
_PERMISSION_MSG_GENERIC = _('You do not have permissions for this layer.')
_PERMISSION_MSG_MODIFY = _("You are not permitted to modify this layer")
_PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this layer's metadata")
_PERMISSION_MSG_VIEW = _("You are not permitted to view this layer")
def log_snippet(log_file):
if not os.path.isfile(log_file):
return "No log file at %s" % log_file
with open(log_file, "r") as f:
f.seek(0, 2) # Seek @ EOF
fsize = f.tell() # Get Size
f.seek(max(fsize - 10024, 0), 0) # Set pos @ last n chars
return f.read()
def _resolve_layer(request, typename, permission='base.view_resourcebase',
msg=_PERMISSION_MSG_GENERIC, **kwargs):
"""
Resolve the layer by the provided typename (which may include service name) and check the optional permission.
"""
service_typename = typename.split(":", 1)
if Service.objects.filter(name=service_typename[0]).exists():
service = Service.objects.filter(name=service_typename[0])
return resolve_object(request,
Layer,
{'typename': service_typename[1]
if service[0].method != "C" else typename},
permission=permission,
permission_msg=msg,
**kwargs)
else:
return resolve_object(request,
Layer,
{'typename': typename},
permission=permission,
permission_msg=msg,
**kwargs)
# Basic Layer Views #
@login_required
def layer_upload(request, template='upload/layer_upload.html'):
if request.method == 'GET':
mosaics = Layer.objects.filter(is_mosaic=True).order_by('name')
ctx = {
'mosaics': mosaics,
'charsets': CHARSETS,
'is_layer': True,
}
return render_to_response(template, RequestContext(request, ctx))
elif request.method == 'POST':
form = NewLayerUploadForm(request.POST, request.FILES)
tempdir = None
errormsgs = []
out = {'success': False}
if form.is_valid():
title = form.cleaned_data["layer_title"]
# Replace dots in filename - GeoServer REST API upload bug
# and avoid any other invalid characters.
# Use the title if possible, otherwise default to the filename
if title is not None and len(title) > 0:
name_base = title
else:
name_base, __ = | os.path.splitext(
form.cleaned_data["ba | se_file"].name)
name = slugify(name_base.replace(".", "_"))
try:
# Moved this inside the try/except block because it can raise
# exceptions when unicode characters are present.
# This should be followed up in upstream Django.
tempdir, base_file = form.write_files()
saved_layer = file_upload(
base_file,
name=name,
user=request.user,
overwrite=False,
charset=form.cleaned_data["charset"],
abstract=form.cleaned_data["abstract"],
title=form.cleaned_data["layer_title"],
metadata_uploaded_preserve=form.cleaned_data["metadata_uploaded_preserve"]
)
except Exception as e:
exception_type, error, tb = sys.exc_info()
logger.exception(e)
out['success'] = False
out['errors'] = str(error)
# Assign the error message to the latest UploadSession from
# that user.
latest_uploads = UploadSession.objects.filter(
user=request.user).order_by('-date')
if latest_uploads.count() > 0:
upload_session = latest_uploads[0]
upload_session.error = str(error)
upload_session.traceback = traceback.format_exc(tb)
upload_session.context = log_snippet(CONTEXT_LOG_FILE)
upload_session.save()
out['traceback'] = upload_session.traceback
out['context'] = upload_session.context
out['upload_session'] = upload_session.id
else:
out['success'] = True
if hasattr(saved_layer, 'info'):
out['info'] = saved_layer.info
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
upload_session = saved_layer.upload_session
upload_session.processed = True
upload_session.save()
permissions = form.cleaned_data["permissions"]
if permissions is not None and len(permissions.keys()) > 0:
saved_layer.set_permissions(permissions)
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
for e in form.errors.values():
errormsgs.extend([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=status_code)
def layer_detail(request, layername, template='layers/layer_detail.html'):
layer = _resolve_layer(
request,
layername,
'base.view_resourcebase',
_PERMISSION_MSG_VIEW)
# assert False, str(layer_bbox)
config = layer.attribute_config()
# Add required parameters for GXP lazy-loading
layer_bbox = layer.bbox
bbox = [float(coord) for coord in list(layer_bbox[0:4])]
config["srs"] = getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:900913')
config["bbox"] = bbox if config["srs"] != 'EPSG:900913' \
else llbbox_to_mercator([float(coord) for coord in bbox])
config["title"] |
elf.taps)).flatten())
self.mtimecurve_i_stems.setData([],[])
self.mtimecurve_i.setData([],[])
# Configure plots.
if self.mtoverlay:
self.mplots['mTIME'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mTIME'].setMouseEnabled(x=False, y=False)
self.mplots['mTIME'].showAxis('right', False)
# Set plot limits and reset axis zoom.
self.plot_auto_limit(self.plots['TIME'], xMin=0, xMax=ntaps)
self.plot_auto_limit(self.mplots['mTIME'], xMin=0, xMax=ntaps)
def update_step_curves(self):
ntaps = len(self.taps)
if((ntaps < 1) and (not self.iir)):
return
# Set Data.
if self.iir:
stepres = self.step_response(self.b,self.a)
ntaps = 50
else:
stepres = self.step_response(self.taps)
if(type(stepres[0]) == np.complex128):
self.steprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.real.shape[0], dtype=int),
stepres.real)).flatten())
self.steprescurve.setData(np.arange(ntaps), stepres.real)
self.steprescurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.imag.shape[0], dtype=int),
stepres.imag)).flatten())
self.steprescurve_i.setData(np.arange(ntaps), stepres.imag)
else:
self.steprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.shape[0], dtype=int),
stepres)).flatten())
self.steprescurve.setData(np.arange(ntaps), stepres)
self.steprescurve_i_stems.setData([],[])
self.steprescurve_i.setData([],[])
if self.mtstep:
if(type(stepres[0]) == np.complex128):
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.real.shape[0], dtype=int),
stepres.real)).flatten())
self.mtimecurve.setData(np.arange(ntaps), stepres.real)
self.mtimecurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.imag.shape[0], dtype=int),
stepres.imag)).flatten())
self.mtimecurve_i.setData(np.arange(ntaps), stepres.imag)
else:
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(stepres.shape[0], dtype=int),
stepres)).flatten())
self.mtimecurve.setData(np.arange(ntaps), stepres)
self.mtimecurve_i_stems.setData([],[])
self.mtimecurve_i.setData([],[])
# Configure plots.
if self.mtoverlay:
self.mplots['mTIME'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mTIME'].setMouseEnabled(x=False, y=False)
self.mplots['mTIME'].showAxis('right', False)
# Set plot limits and reset axis zoom.
self.plot_auto_limit(self.plots['STEPRES'], xMin=0, xMax=ntaps)
self.plot_auto_limit(self.mplots['mTIME'], xMin=0, xMax=ntaps)
def update_imp_curves(self):
ntaps = len(self.taps)
if((ntaps < 1) and (not self.iir)):
return
# Set Data.
if self.iir:
impres = self.impulse_response(self.b, self.a)
ntaps = 50
else:
impres = self.impulse_response(self.taps)
if(type(impres[0]) == np.complex128):
self.imprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.real.shape[0], dtype=int),
impres.real)).flatten())
self.imprescurve.setData(np.arange(ntaps), impres.real)
self.imprescurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.imag.shape[0], dtype=int),
impres.imag)).flatten())
| self.imprescurve_i.setData(np.arange(ntaps), impres.imag)
else:
| self.imprescurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.shape[0], dtype=int),
impres)).flatten())
if self.mtimpulse:
if(type(impres[0]) == np.complex128):
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.real.shape[0], dtype=int),
impres.real)).flatten())
self.mtimecurve.setData(np.arange(ntaps), impres.real)
self.mtimecurve_i_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.imag.shape[0], dtype=int),
impres.imag)).flatten())
self.mtimecurve_i.setData(np.arange(ntaps), impres.imag)
else:
self.mtimecurve_stems.setData(np.repeat(np.arange(ntaps), 2),
np.dstack((np.zeros(impres.shape[0], dtype=int),
impres)).flatten())
self.mtimecurve.setData(np.arange(ntaps), impres)
self.mtimecurve_i_stems.setData([],[])
self.mtimecurve_i.setData([],[])
# Configure plots.
if self.mtoverlay:
self.mplots['mTIME'].setMouseEnabled(x=True, y=True)
else:
self.mplots['mTIME'].setMouseEnabled(x=False, y=False)
self.mplots['mTIME'].showAxis('right', False)
# Set plot limits and reset axis zoom.
self.plot_auto_limit(self.plots['IMPRES'], xMin=0, xMax=ntaps)
self.plot_auto_limit(self.mplots['mTIME'], xMin=0, xMax=ntaps)
def plot_secondary(self):
if (self.mfoverlay):
if self.last_mfreq_plot == "freq":
self.mfmagresponse = True
self.update_freq_curves(True)
elif self.last_mfreq_plot == "phase":
self.mfphaseresponse = True
self.update_phase_curves(True)
elif self.last_mfreq_plot == "group":
self.mfgroupdelay = True
self.update_group_curves(True)
elif self.last_mfreq_plot == "pdelay":
self.mfphasedelay = True
self.update_pdelay_curves(True)
self.mplots['mFREQ'].showAxis('right', True)
else:
self.mplots['mFREQ'].setMouseEnabled(x=False, y=False)
self.mplots['mFREQ'].showAxis('right', False)
self.mfreqcurve2.setData([],[])
def update_freq_curves(self, secondary=False):
npts = len(self.fftdB)
if(npts < 1):
return
# Set Data.
if self.iir:
self.freqcurve.setData(self.freq[:npts-1], self.fftdB[:npts-1])
else:
self.freqcurve.setData(self.freq[:int(npts//2)], self.fftdB[:int(npts//2)])
if self.mfmagresponse:
curve = self.mfreqcurve
if secondary:
curve = self.mfreqcurve2
if self.iir:
curve.setData(self.freq[:npts-1], self.fftdB[:npts-1])
else:
curve.setData(self.freq[:int(npts//2)], self.fftdB[:int(npts//2)])
# Set axes to new scales.
# Set |
from django.conf.urls.defaults import *
from corehq import AccountingAdminInterfaceDispatcher
from cor | ehq.apps.accounting.views import *
urlpatterns = patterns('corehq.apps.accounting.views',
url(r'^$', 'accounting_default', name='accounting_default'),
url(r'^accounts/(\d+)/$', ManageBillingAccountView.as_view(), | name=ManageBillingAccountView.urlname),
url(r'^accounts/new/$', NewBillingAccountView.as_view(), name=NewBillingAccountView.urlname),
url(r'^subscriptions/(\d+)/$', EditSubscriptionView.as_view(), name=EditSubscriptionView.urlname),
url(r'^accounts/new_subscription/$', NewSubscriptionViewNoDefaultDomain.as_view(),
name=NewSubscriptionViewNoDefaultDomain.urlname),
url(r'^accounts/new_subscription/(\d+)/$', NewSubscriptionView.as_view(), name=NewSubscriptionView.urlname),
url(r'^software_plans/new/$', NewSoftwarePlanView.as_view(), name=NewSoftwarePlanView.urlname),
url(r'^software_plans/(\d+)/$', EditSoftwarePlanView.as_view(), name=EditSoftwarePlanView.urlname),
url(AccountingAdminInterfaceDispatcher.pattern(), AccountingAdminInterfaceDispatcher.as_view(),
name=AccountingAdminInterfaceDispatcher.name()),
url(r'^pricing_table/(?P<product>[\w-]+)/(?P<locale>[\w-]+)/$', 'pricing_table_json', name='pricing_table_json'),
)
|
(name):
out = ''
word_start = True
for c in name:
if c == '_':
out += '-'
word_start = True
elif word_start:
out += c.upper()
word_start = False
else:
out += c.lower()
return out
def _req_to_item(req):
item = dict()
item['method'] = req.method
item['path'] = req.path
query = req.META.get('QUERY_STRING')
if query:
item['query'] = query
raw_headers = list()
content_length = req.META.get('CONTENT_LENGTH')
if content_length:
raw_headers.append(('CONTENT_LENGTH', content_length))
content_type = req.META.get('CONTENT_TYPE')
if content_type:
raw_headers.append(('CONTENT_TYPE', content_type))
for k, v in req.META.iteritems():
if k.startswith('HTTP_'):
raw_headers.append((k[5:], v))
# undjangoify the header names
headers = list()
for h in raw_headers:
headers.append((_convert_header_name(h[0]), h[1]))
if orig_headers:
# if this option is set, then we assume the exact headers are magic prefixed
tmp = list()
for h in headers:
if h[0].lower().startswith('eb9bf0f5-'):
tmp.append((h[0][9:], h[1]))
headers = tmp
else:
# otherwise, use the blacklist to clean things up
tmp = list()
for h in headers:
if not _ignore_header(h[0]):
tmp.append(h)
headers = tmp
item['headers'] = headers
if len(req.body) > 0:
try:
# if the body is valid utf-8, then store as text
item['body'] = req.body.decode('utf-8')
except:
# else, store as binary
item['body-bin'] = b64encode(req.body)
forwardedfor = req.META.get('HTTP_X_FORWARDED_FOR')
if forwardedfor:
ip_address = forwardedfor.split(',')[0].strip()
else:
ip_address = req.META['REMOTE_ADDR']
item['ip_address'] = ip_address
return item
def _convert_item(item, responded=False):
out = copy.deepcopy(item)
created = datetime.datetime.fromtimestamp(item['created']).isoformat()
if len(created) > 0 and created[-1] != 'Z':
created += 'Z'
out['created'] = created
if responded:
out['state'] = 'responded'
else:
out['state'] = 'response-pending'
return out
def root(req):
return HttpResponseNotFound('Not Found\n')
def create(req):
if req.method == 'POST':
host = req.META.get('HTTP_HOST')
if not host:
return HttpResponseBadRequest('Bad Request: No \'Host\' header\n')
inbox_id = req.POST.get('id')
if inbox_id is not None and len(inbox_id) > 64:
return HttpResponseBadRequest('Bad Request: Id length must not exceed 64\n')
ttl = req.POST.get('ttl')
if ttl is not None:
ttl = int(ttl)
if ttl is None:
ttl = 3600
response_mode = req.POST.get('response_mode')
if not response_mode:
response_mode = 'auto'
if response_mode not in ('auto', 'wait-verify', 'wait'):
return HttpResponseBadRequest('Bad Request: response_mode must be "auto", "wait-verify", or "wait"\n')
try:
inbox_id = db.inbox_create(inbox_id, ttl, response_mode)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectExists:
return HttpResponse('Conflict: Inbox already exists\n', status=409)
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['id'] = inbox_id
out['base_url'] = 'http://' + host + '/i/' + inbox_id + '/'
out['ttl'] = ttl
out['response_mode'] = response_mode
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else:
return HttpResponseNotAllowed(['POST'])
def inbox(req, inbox_id):
if req.method == 'GET':
host = req.META.get('HTTP_HOST')
if not host:
return HttpResponseBadRequest('Bad Request: No \'Host\' header\n')
try:
inbox = db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['id'] = inbox_id
out['base_url'] = 'http://' + host + '/i/' + inbox_id + '/'
out['ttl'] = inbox['ttl']
response_mode = inbox.get('response_mode')
if not response_mode:
response_mode = 'auto'
out['response_mode'] = response_mode
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
elif req.method == 'DELETE':
try:
db.inbox_delete(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
# we'll push a 404 to any long polls because we're that cool
publish(grip_prefix + 'inbox-%s' % inbox_id, HttpResponseFormat(code=404, headers={'Content-Type': 'text/html'}, body='Not Found\n'))
return HttpResponse('Deleted\n')
else:
return HttpResponseNotAllowed(['GET', 'DELETE'])
def refresh(req, inbox_id):
if req.method == 'POST':
ttl = req.POST.get('ttl')
if ttl is not None:
ttl = int(ttl)
try:
db.inbox_refresh(inbox_id, ttl)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
return Ht | tpResponse('Refreshed\n')
else:
return HttpResponseNotAllowed(['POST'])
def respond(req, inbox_id, item_id):
if req.method = | = 'POST':
try:
content = json.loads(req.body)
except:
return HttpResponseBadRequest('Bad Request: Body must be valid JSON\n')
try:
code = content.get('code')
if code is not None:
code = int(code)
else:
code = 200
reason = content.get('reason')
headers = content.get('headers')
if 'body-bin' in content:
body = b64decode(content['body-bin'])
elif 'body' in content:
body = content['body']
else:
body = ''
except:
return HttpResponseBadRequest('Bad Request: Bad format of response\n')
try:
db.request_remove_pending(inbox_id, item_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
publish(grip_prefix + 'wait-%s-%s' % (inbox_id, item_id), HttpResponseFormat(code=code, reason=reason, headers=headers, body=body), id='1', prev_id='0')
return HttpResponse('Ok\n')
else:
return HttpResponseNotAllowed(['POST'])
def hit(req, inbox_id):
if len(req.grip.last) > 0:
for channel, last_id in req.grip.last.iteritems():
break
set_hold_longpoll(req, Channel(channel, last_id))
return HttpResponse('Service Unavailable\n', status=503, content_type='text/html')
try:
inbox = db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
response_mode = inbox.get('response_mode')
if not response_mode:
response_mode = 'auto'
# pubsubhubbub verify request?
hub_challenge = req.GET.get('hub.challenge')
if response_mode == 'wait' or (response_mode == 'wait-verify' and hub_challenge):
respond_now = False
else:
respond_now = True
item = _req_to_item(req)
if hub_challenge:
item['type'] = 'hub-verify'
else:
item['type'] = 'normal'
try:
item_id, prev_id, item_created = db.inbox_append_item(inbox_id, item)
db.inbox_clear_expired_items(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
item['id'] = item_id
item['created'] = item_created
item = _convert_item(item, respond_now)
hr_headers = dict()
hr_headers['Content-Type'] = 'application/json'
hr = dict()
hr['last_cursor'] = item_id
hr['items'] = [item]
hr_body = json.dumps(hr) + '\n'
hs_body = json.dumps(item) + '\n'
formats = list()
formats.append(HttpResponseFormat(headers=hr_headers, body=hr_body))
formats.append(HttpStreamFormat(hs_body))
|
put_shapes[0]
# Explicitly unroll the recurrence instead of using scan
cell_out, hid_out = unroll_scan(
fn=step_fun,
sequences=sequences,
outputs_info=[cell_init, hid_init],
go_backwards=self.backwards,
non_sequences=non_seqs,
n_steps=input_shape[1])
else:
# Scan op iterates over first dimension of input and repeatedly
# applies the step function
cell_out, hid_out = theano.scan(
fn=step_fun,
sequences=sequences,
outputs_info=[cell_init, hid_init],
go_backwards=self.backwards,
truncate_gradient=self.gradient_steps,
non_sequences=non_seqs,
strict=True)[0]
return hid_out, cell_out
class GRUStateReuseMixin(StateReuseMixin):
@property
def state_names(self) -> tuple:
return 'hidden'
@args_from_opt(2)
def _make_recurrent_layer(self, l_prev, state_layers, n_hid_unit,
grad_clip=5, unroll=False):
hid_lay = state_layers[0]
raw_lay = self.GRULayer(l_prev, n_hid_unit,
hid_init=hid_lay,
grad_clipping=grad_clip,
name='GRU_raw',
unroll_scan=unroll)
return raw_lay
class GRULayer(L.layers.GRULayer):
"""
Copy of standard lasagne.layers.LSTMLayer
with overwritten get_output_for such that it returns cell state
"""
def get_output_for(self, inputs, **kwargs):
"""
Compute this layer's output function given a symbolic input variable
Parameters
----------
inputs : list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``. When the hidden state of this layer is to be
pre-filled (i.e. was set to a :class:`Layer` instance) `inputs`
should have length at least 2, and `inputs[-1]` is the hidden state
to prefill with.
Returns
-------
layer_output : theano.TensorType
Symbolic output variable.
"""
unroll_scan = L.utils.unroll_scan
Layer = L.layers.Layer
# Retrieve the layer input
input = inputs[0]
# Retrieve the mask when it is supplied
mask = None
hid_init = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
if s | elf.hid_init_incoming_index > 0:
hid_init = inputs[self.hid_init_incoming_index]
# Treat all dimensions after the second as flattened feature dimensions
if input.ndim > 3:
input = T.flatten(input, 3)
# Because scan iterates over the first dimension we dimshuffle to
# (n_time_steps, n_batch, n_feature | s)
input = input.dimshuffle(1, 0, 2)
seq_len, num_batch, _ = input.shape
# Stack input weight matrices into a (num_inputs, 3*num_units)
# matrix, which speeds up computation
W_in_stacked = T.concatenate(
[self.W_in_to_resetgate, self.W_in_to_updategate,
self.W_in_to_hidden_update], axis=1)
# Same for hidden weight matrices
W_hid_stacked = T.concatenate(
[self.W_hid_to_resetgate, self.W_hid_to_updategate,
self.W_hid_to_hidden_update], axis=1)
# Stack gate biases into a (3*num_units) vector
b_stacked = T.concatenate(
[self.b_resetgate, self.b_updategate,
self.b_hidden_update], axis=0)
if self.precompute_input:
# precompute_input inputs*W. W_in is (n_features, 3*num_units).
# input is then (n_batch, n_time_steps, 3*num_units).
input = T.dot(input, W_in_stacked) + b_stacked
# At each call to scan, input_n will be (n_time_steps, 3*num_units).
# We define a slicing function that extract the input to each GRU gate
def slice_w(x, n):
return x[:, n * self.num_units:(n + 1) * self.num_units]
# Create single recurrent computation step function
# input__n is the n'th vector of the input
def step(input_n, hid_previous, *args):
# Compute W_{hr} h_{t - 1}, W_{hu} h_{t - 1}, and W_{hc} h_{t - 1}
hid_input = T.dot(hid_previous, W_hid_stacked)
if self.grad_clipping:
input_n = theano.gradient.grad_clip(
input_n, -self.grad_clipping, self.grad_clipping)
hid_input = theano.gradient.grad_clip(
hid_input, -self.grad_clipping, self.grad_clipping)
if not self.precompute_input:
# Compute W_{xr}x_t + b_r, W_{xu}x_t + b_u, and W_{xc}x_t + b_c
input_n = T.dot(input_n, W_in_stacked) + b_stacked
# Reset and update gates
resetgate = slice_w(hid_input, 0) + slice_w(input_n, 0)
updategate = slice_w(hid_input, 1) + slice_w(input_n, 1)
resetgate = self.nonlinearity_resetgate(resetgate)
updategate = self.nonlinearity_updategate(updategate)
# Compute W_{xc}x_t + r_t \odot (W_{hc} h_{t - 1})
hidden_update_in = slice_w(input_n, 2)
hidden_update_hid = slice_w(hid_input, 2)
hidden_update = hidden_update_in + resetgate * hidden_update_hid
if self.grad_clipping:
hidden_update = theano.gradient.grad_clip(
hidden_update, -self.grad_clipping, self.grad_clipping)
hidden_update = self.nonlinearity_hid(hidden_update)
# Compute (1 - u_t)h_{t - 1} + u_t c_t
hid = (
1 - updategate) * hid_previous + updategate * hidden_update
return hid
def step_masked(input_n, mask_n, hid_previous, *args):
hid = step(input_n, hid_previous, *args)
# Skip over any input with mask 0 by copying the previous
# hidden state; proceed normally for any input with mask 1.
hid = T.switch(mask_n, hid, hid_previous)
return hid
if mask is not None:
# mask is given as (batch_size, seq_len). Because scan iterates
# over first dimension, we dimshuffle to (seq_len, batch_size) and
# add a broadcastable dimension
mask = mask.dimshuffle(1, 0, 'x')
sequences = [input, mask]
step_fun = step_masked
else:
sequences = [input]
step_fun = step
if not isinstance(self.hid_init, Layer):
# Dot against a 1s vector to repeat to shape (num_batch, num_units)
hid_init = T.dot(T.ones((num_batch, 1)), self.hid_init)
# The hidden-to-hidden weight matrix is always used in step
non_seqs = [W_hid_stac |
__author__ = 'davedash'
|
class WheeljackException(Exception):
pass
class RepoNotFoundException(WheeljackException):
"""Exception thrown when we interact with an undefined Repo."""
pas | s
class RepoAlreadyInstalledException(WheeljackException):
"""Exception thrown when we try to re-install a repo."""
pass
class ReposConfigException(WheeljackException):
"""Exception raised when there is a configuration error."""
pass
class WheeljackCodeDirectoryMissing(WheeljackException):
"""Raised if we are missing our base directory."""
pass
class GitNotRepoException(WheeljackException):
"""Raise if we interact with a non Git-dir in a Git-ish manner."""
pass
class GitNoOriginRemoteException(WheeljackException):
pass
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet load on startup.
Verify that a fujicoind node can maintain list of wallets loading on startup
"""
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_equal,
)
class WalletStartupTest(FujicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
self.start_nodes()
def run_test(self):
self.log.info('Should start without any wallets')
assert_equal(self.nodes[0].listwallets(), [])
assert_equal(self.nodes[0].listwalletdir(), {'wallets': []})
self.log.info('New default wallet should load by default when there are no other wallets')
self.nodes[0].createwallet(wallet_name='', load_on_startup=False)
self.restart_node(0)
assert_equal(self.nodes[0].listwallets(), [''])
self.log.info('Test load on startup behavior')
self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet( | filename='w4', load_on_startup=True)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
self.nodes[0].loadwallet(filename='')
| self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
if __name__ == '__main__':
WalletStartupTest().main()
|
from sqlalchemy import Column, ForeignKey, Integer, String, Text, DateTime, Table
from sqlalchemy.orm import relationship, backref
from models import DecBase
from models.document import Document
from models.keyword import Keyword
from jsonschema import *
from json_schemas import *
from models.collection_version import CollectionVersion
from timestamp.timestampfile import TimestampFile
import time
import json
# Define foreign keys required f | or joining defined tables together
collection_keywords = Table('collection_keywords', DecBase.metadata,
Column('keyword_id', Integer, ForeignKey('keyword.id')),
Column('collection_address', String, ForeignKey('collection.address'))
)
collecti | on_docs = Table('collection_docs', DecBase.metadata,
Column('document_address', String, ForeignKey('document.hash')),
Column('collection_address_docs', String, ForeignKey('collection.address'))
)
hash_association = Table('collection_hashes', DecBase.metadata,
Column('hash', String, ForeignKey('collection_version.root_hash')),
Column('collection_address', String, ForeignKey('collection.address'))
)
class Collection(DecBase):
""" A Collection is the fundamental unit of organization in the FreeJournal network.
A Collection is a uniquely identifiable set of documents. Each collection is associated
with and signed by a BitMessage broadcast channel address. Each collection contains
a list of documents, a Bitcoin address for ranking, and a version. Messages on the network
called DocIndex messages share the state of a collection at a given version.
This class stores the latest version of each collection the FreeJournal node decides to mirror.
It also stores old timestamps and Merkle trees for bookkeeping purposes (@todo).
Attributes:
title: Title of collection (as in message spec)
description: Collection description (as in message spec)
address: Bitmessage address uniquely ID'ing collection (as in message spec)
btc: Bitcoin address for rating documents (as in message spec)
keywords: Keywords as list of Keyword class for searching (as in message spec)
documents: List of document classes included in the collection (as in message spec)
latest_broadcast_date: The date that this collection was last seen broadcasted in the Main Channel
creation_date: Earliest known timestamp of collection, or if none earliest approximation of creation date of
current version of collection
oldest_date: Earliest known timestamp of collection, or if none earliest approximation of creation date of
any version of collection
latest_btc_tx: Latest Bitcoin transaction timestamping merkle belonging to this collection
oldest_btc_tx: Oldest Bitcoin transaction timestamping merkle belonging to this collection
accesses: Number of times this collection is accessed by a user of this node (for cache pruning)
votes: Latest vote count from the Bitcoin network, used to rank collection
votes_last_checked: Latest poll of Bitcoin network for collection votes, to coordinate internal repolling
"""
__tablename__ = 'collection'
title = Column(Text, nullable=False)
description = Column(String)
address = Column(String, primary_key=True)
btc = Column(String)
keywords = relationship("Keyword", secondary=collection_keywords, backref='collection')
documents = relationship("Document", secondary=collection_docs, backref='collection')
latest_broadcast_date = Column(DateTime, nullable=False)
creation_date = Column(DateTime, nullable=False)
oldest_date = Column(DateTime, nullable=False)
latest_btc_tx = Column(String)
oldest_btc_tx = Column(String)
accesses = Column(Integer, nullable=False, default=0)
votes = Column(Integer, nullable=False, default=0)
votes_last_checked = Column(DateTime)
version_list = relationship(CollectionVersion, backref="collection", lazy='dynamic', secondary=hash_association)
def to_json(self):
"""
Encodes a Collection as a json representation so it can be sent through the bitmessage network
:return: the json representation of the given Collection
"""
json_docs = []
for doc in self.documents:
json_docs.append({"address": doc.collection_address, "description": doc.description, "title": doc.title,
"hash": doc.hash, "filename": doc.filename, "accesses": doc.accesses})
json_keywords = []
for key in self.keywords:
json_keywords.append({"id": key.id, "name": key.name})
json_representation = {"type_id": 1,
"title": self.title,
"description": self.description,
"keywords": json_keywords,
"address": self.address,
"documents": json_docs,
"btc": self.btc,
"latest_broadcast_date": self.latest_broadcast_date.strftime("%A, %d. %B %Y %I:%M%p"),
"creation_date": self.creation_date.strftime("%A, %d. %B %Y %I:%M%p"),
"oldest_date": self.oldest_date.strftime("%A, %d. %B %Y %I:%M%p"),
"latest_btc_tx": self.latest_btc_tx,
"oldest_btc_tx": self.oldest_btc_tx,
"accesses": self.accesses,
"votes": self.votes,
"votes_last_checked": self.votes_last_checked.strftime("%A, %d. %B %Y %I:%M%p")}
try:
validate(json_representation, coll_schema)
return json.dumps(json_representation, sort_keys=True)
except ValidationError as m:
return None
def get_latest_version(self):
latest_version = self.version_list.order_by(CollectionVersion.collection_version.desc()).first()
if latest_version is None:
return 0
else:
return latest_version.collection_version
def get_latest_collection_version(self):
latest_version = self.version_list.order_by(CollectionVersion.collection_version.desc()).first()
return latest_version
|
import hashlib
import requests
import threading
import json
import sys
import traceback
import base64
import electrum_vtc as electrum
from electrum_vtc.plugins import BasePlugin, hook
from electrum_vtc.i18n import _
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
msg.encode('utf8'))
return base64.b64encode(encrypted)
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.storage.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.print_error("set", wallet.basename(), "nonce to", nonce)
wallet.storage.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if not wallet in self.wallets:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
t = threading.Thread(target=self.do_request,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise BaseException(response.status_code, response.text)
response = response.json()
if "error" in response:
raise BaseException(response["error"])
return response
def push_thread(self, wallet):
wallet_id = self.wallets[wallet][2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.iteritems():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.print_error('cannot encode', repr(key), repr(value))
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, wallet, force):
wallet_id = self.wallets[wallet][2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.print_error("asking for labels since nonce", nonce)
try:
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
self.print_error('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.print_error('error: no json', key)
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.print_error("received %d labels" % len(response))
# do not write to disk because we're in a daemon thread
wallet.storage.put('labels', wallet.labels)
self.set_nonce(wallet, response["nonce"] + 1)
self | .on_pulled(wallet)
except Exception as e:
traceback.print_exc(file=sys.stderr)
| self.print_error("could not retrieve labels")
def start_wallet(self, wallet):
nonce = self.get_nonce(wallet)
self.print_error("wallet", wallet.basename(), "nonce is", nonce)
mpk = wallet.get_fingerprint()
if not mpk:
return
password = hashlib.sha1(mpk).digest().encode('hex')[:32]
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).digest().encode('hex')
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread, args=(wallet, False))
t.setDaemon(True)
t.start()
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
|
"""
Integration tests for mac_timezone
If using parallels, make sure Time sync is turned off. Otherwise, parallels will
keep changing your date/time settings while the tests are ru | nning. To turn off
Time sync do the following:
- Go to actions -> configure
- Select options at the top and 'More Options' on the left
- Set time to 'Do not sync'
"""
import datetime
import pytest
from tests.support.case import ModuleCas | e
from tests.support.unit import skipIf
@pytest.mark.flaky(max_runs=4)
@pytest.mark.skip_unless_on_darwin
@pytest.mark.skip_if_binaries_missing("systemsetup")
@pytest.mark.skip_if_not_root
class MacTimezoneModuleTest(ModuleCase):
"""
Validate the mac_timezone module
"""
USE_NETWORK_TIME = False
TIME_SERVER = "time.apple.com"
TIME_ZONE = ""
CURRENT_DATE = ""
CURRENT_TIME = ""
def setUp(self):
"""
Get current settings
"""
self.USE_NETWORK_TIME = self.run_function("timezone.get_using_network_time")
self.TIME_SERVER = self.run_function("timezone.get_time_server")
self.TIME_ZONE = self.run_function("timezone.get_zone")
self.CURRENT_DATE = self.run_function("timezone.get_date")
self.CURRENT_TIME = self.run_function("timezone.get_time")
self.run_function("timezone.set_using_network_time", [False])
self.run_function("timezone.set_zone", ["America/Denver"])
def tearDown(self):
"""
Reset to original settings
"""
self.run_function("timezone.set_time_server", [self.TIME_SERVER])
self.run_function("timezone.set_using_network_time", [self.USE_NETWORK_TIME])
self.run_function("timezone.set_zone", [self.TIME_ZONE])
if not self.USE_NETWORK_TIME:
self.run_function("timezone.set_date", [self.CURRENT_DATE])
self.run_function("timezone.set_time", [self.CURRENT_TIME])
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_get_set_date(self):
"""
Test timezone.get_date
Test timezone.set_date
"""
# Correct Functionality
self.assertTrue(self.run_function("timezone.set_date", ["2/20/2011"]))
self.assertEqual(self.run_function("timezone.get_date"), "2/20/2011")
# Test bad date format
self.assertEqual(
self.run_function("timezone.set_date", ["13/12/2014"]),
"ERROR executing 'timezone.set_date': Invalid Date/Time Format: 13/12/2014",
)
@pytest.mark.slow_test
def test_get_time(self):
"""
Test timezone.get_time
"""
text_time = self.run_function("timezone.get_time")
self.assertNotEqual(text_time, "Invalid Timestamp")
obj_date = datetime.datetime.strptime(text_time, "%H:%M:%S")
self.assertIsInstance(obj_date, datetime.date)
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_set_time(self):
"""
Test timezone.set_time
"""
# Correct Functionality
self.assertTrue(self.run_function("timezone.set_time", ["3:14"]))
# Test bad time format
self.assertEqual(
self.run_function("timezone.set_time", ["3:71"]),
"ERROR executing 'timezone.set_time': Invalid Date/Time Format: 3:71",
)
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_get_set_zone(self):
"""
Test timezone.get_zone
Test timezone.set_zone
"""
# Correct Functionality
self.assertTrue(self.run_function("timezone.set_zone", ["Pacific/Wake"]))
self.assertEqual(self.run_function("timezone.get_zone"), "Pacific/Wake")
# Test bad time zone
self.assertEqual(
self.run_function("timezone.set_zone", ["spongebob"]),
"ERROR executing 'timezone.set_zone': Invalid Timezone: spongebob",
)
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_get_offset(self):
"""
Test timezone.get_offset
"""
self.assertTrue(self.run_function("timezone.set_zone", ["Pacific/Wake"]))
self.assertIsInstance(self.run_function("timezone.get_offset"), (str,))
self.assertEqual(self.run_function("timezone.get_offset"), "+1200")
self.assertTrue(self.run_function("timezone.set_zone", ["America/Los_Angeles"]))
self.assertIsInstance(self.run_function("timezone.get_offset"), (str,))
self.assertEqual(self.run_function("timezone.get_offset"), "-0700")
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_get_set_zonecode(self):
"""
Test timezone.get_zonecode
Test timezone.set_zonecode
"""
self.assertTrue(self.run_function("timezone.set_zone", ["America/Los_Angeles"]))
self.assertIsInstance(self.run_function("timezone.get_zonecode"), (str,))
self.assertEqual(self.run_function("timezone.get_zonecode"), "PDT")
self.assertTrue(self.run_function("timezone.set_zone", ["Pacific/Wake"]))
self.assertIsInstance(self.run_function("timezone.get_zonecode"), (str,))
self.assertEqual(self.run_function("timezone.get_zonecode"), "WAKT")
@pytest.mark.slow_test
def test_list_zones(self):
"""
Test timezone.list_zones
"""
zones = self.run_function("timezone.list_zones")
self.assertIsInstance(self.run_function("timezone.list_zones"), list)
self.assertIn("America/Denver", self.run_function("timezone.list_zones"))
self.assertIn("America/Los_Angeles", self.run_function("timezone.list_zones"))
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_zone_compare(self):
"""
Test timezone.zone_compare
"""
self.assertTrue(self.run_function("timezone.set_zone", ["America/Denver"]))
self.assertTrue(self.run_function("timezone.zone_compare", ["America/Denver"]))
self.assertFalse(self.run_function("timezone.zone_compare", ["Pacific/Wake"]))
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_get_set_using_network_time(self):
"""
Test timezone.get_using_network_time
Test timezone.set_using_network_time
"""
self.assertTrue(self.run_function("timezone.set_using_network_time", [True]))
self.assertTrue(self.run_function("timezone.get_using_network_time"))
self.assertTrue(self.run_function("timezone.set_using_network_time", [False]))
self.assertFalse(self.run_function("timezone.get_using_network_time"))
@skipIf(
True,
"Skip until we can figure out why modifying the system clock causes ZMQ errors",
)
@pytest.mark.destructive_test
def test_get_set_time_server(self):
"""
Test timezone.get_time_server
Test timezone.set_time_server
"""
self.assertTrue(
self.run_function("timezone.set_time_server", ["spongebob.com"])
)
self.assertEqual(self.run_function("timezone.get_time_server"), "spongebob.com")
|
# -*- coding: utf-8 -*-
# Scrapy settings for appstore project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'appstore'
SPIDER_MODULES = ['appstore.spiders']
NEWSPIDER_MODULE = 'appstore.spiders'
ITEM_PIPELINE | S = {
'appstore.pipelines.AppstorePipeline': 300,
}
DOWNLOAD_DELAY=5
# Crawl responsibly by identifying yourse | lf (and your website) on the user-agent
#USER_AGENT = 'appstore (+http://www.yourdomain.com)'
|
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fi | t(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
| staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.