prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/python
import sys, os
import tornado.ioloop
import tornado.web
import logging
import logging.handlers
import re
from urllib import unquote
import config
from vehiclenet import *
reload(sys)
sys.setdefaultencoding('utf8')
def deamon(chdir = False):
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
print 'fork #1 failed: %d (%s)' % (e.errno, e.strerror)
os._exit(1)
def init():
WeatherHandler.cache()
class DefaultHandler(tornado.web.RequestHandler):
def get(self):
self.write('VehicleNet Say Hello!')
class LogHandler(tornado.web.RequestHandler):
def get(self):
log_filename = 'logs/logging'
if not os.path.exists(log_filename):
self.write('The log file is empty.')
return
log_file = None
log_file_lines = None
try:
log_file = open(log_filename, 'r')
if log_file is None:
raise Exception('log_file is None')
log_file_lines = log_file.readlines()
if log_file_lines is None:
raise Exception('log_file_lines is None')
except Exception, e:
logger = logging.getLogger('web')
logger.error('Failed to read the log file (logs/logging), error: %s' % e)
finally:
if log_file is not None:
log_file.close()
if log_file_lines is None:
self.write('Failed to read the log file.')
line_limit = 500
for _ in log_file_lines[::-1]:
line_limit -= 1
if line_limit > 0:
self.write(unquote(_) + '<BR/>')
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
routes = [
(r"/", DefaultHandler),
(r"/carlink/weather/findWeather.htm", WeatherHandler),
(r"/carlink/music/findMusic.htm", MusicSearchHandler),
(r"/carlink//music/findMusic.htm", MusicSearchHandler),
(r"/carlink/music/findMusicTop.htm", MusicTopHandler),
(r"/carlink/music/findMusicLrc.htm", LrcSearchHandler),
(r"/carlink/news/findNews.htm", NewsHandler),
]
|
if config.Mode == 'DEBUG':
routes.append((r"/log", LogHandler))
application = tornado.web.Application(routes, **settings)
if __name__ == "__main__":
if '-d' in sys.argv:
deamon()
logdir = 'logs'
if not os.path.exists(logdir):
os.make | dirs(logdir)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler = logging.handlers.TimedRotatingFileHandler(
'%s/logging' % logdir, 'M', 20, 360)
handler.suffix = '%Y%m%d%H%M%S.log'
handler.extMatch = re.compile(r'^\d{4}\d{2}\d{2}\d{2}\d{2}\d{2}')
handler.setFormatter(formatter)
logger = logging.getLogger('web')
logger.addHandler(handler)
if config.Mode == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
init()
application.listen(80)
print 'Server is running, listening on port 80....'
tornado.ioloop.IOLoop.instance().start()
|
"""映射 集合 ... 高级数据结构类型"""
from string import Template
val_dict = {1: 'a', 2: 'b', 3: 'c'}
print(val_dict)
print(val_dict.keys())
print(val_dict.items())
print(val_dict.values())
factory_dict = dict((['x', 1], ['y', 2]))
print(factory_dict)
ddcit = {}.fromkeys(('x', 'y', | 'z'), -24)
ddcit.update(val_dict) # 新值覆盖旧值
print(ddcit)
print(ddcit.get("m", "no such key "))
print(ddcit.setdefault('x', "new value "))
print(type(ddcit.keys()))
for key in ddcit.keys():
s = Template("key is ${key} and value is ${value}")
# 不加 key 和 value 就出错了 为什么
print(s.substitute(key=key, value=ddcit[key]))
# has_key 方法取消了 参见 Python3 文档 https://docs.python.org/3.1/whatsnew/3.0.html#builtins
var_tuple = (1, 'acs')
var_list = [1, 2, 3]
strange_dict = {var_tuple: 11, 1: 'abcd'}
|
# 键成员关系操作
print(1 in strange_dict)
# strange_dict = {var_tuple: 11, 1: 'abcd', var_list: 'acv'}
# 语法上没错误,但是 会包 unhashable type: 'list' 错误 所有基于 dict 的操作都会报错误
# 因为 check key 是否 hashable 的合法性
print(strange_dict[var_tuple])
# print(strange_dict[var_list])
# strange_dict.pop(var_list)
strange_dict.pop(var_tuple)
strange_dict.clear()
del strange_dict
val_dict1 = {1: 'a', '2': "v"}
val_dict2 = {1: 'v'}
# print(val_dict > val_dict2) Python3 不再支持了
print(dict([['x', 1], ['z', 2]]))
# fixed zip(函数) map(lambda 表达式 等价于 zip
print(type(hash((1, 2, 3))))
print(hash((1, 2, 'a')))
# print(hash(([1, 23, 34], 'a')))
# 集合保证元素不重复 ,真正意义上的数学集合(元素不重复)
# 而不是编程意义上的集合
print("------set-----")
var_set = set('aasn223wuerhe')
print(type(var_set))
print(var_set)
print("frozensetr ")
var_frozen_set = frozenset('aaddk2u9m3pq40aiwoe27na')
print(var_frozen_set)
print('a' in var_set)
print('2' in var_frozen_set) # True 数字被当做字符处理
print(2 in var_frozen_set) # False
# 可变集合 的 CRUD
var_set.update("anddipwq")
print(var_set)
var_set.discard("n")
print(var_set)
var_set.remove("a")
print(var_set)
var_set.pop()
print(var_set)
var_set.clear()
print(var_set)
var_set.add("$")
print(var_set)
var_set1 = set('rtyufghvb')
print(var_set1)
var_set2 = set('qwertyuiop')
print(var_set2)
var_set3 = set('qwertyuiop')
print(var_set3)
var_set4 = var_set1
print(var_set4)
var_set5 = set('qwert')
print(var_set5)
# 数学意义上的集合操作
print(var_set1 == var_set2)
print(var_set1 != var_set2)
print(var_set5 < var_set3)
print(var_set5.issubset(var_set3))
print(var_set1 <= var_set4)
print(var_set1.issuperset(var_set4))
print(var_set1 ^ var_set2) # A B 公共集合的剩余部分 A△B
print(var_set1.symmetric_difference(var_set2))
print(var_set1.union(var_set5))
print(var_set1 | var_set5)
print(var_set5 & var_set3)
print(var_set5.intersection(var_set3))
print(var_set3 - var_set5)
print(var_set3.difference(var_set5))
# 混合集合类型操作 根据左边操作数 确定集合是不是可变
immutable_set = frozenset("ansaskwke")
mutable_set = set("24m9sjwe")
immutable_set_1 = immutable_set | mutable_set
print(type(immutable_set_1))
# print(1 | 2) python3 居然支持了 我擦啊
|
# test seasonal.adjust_seasons() options handling
#
# adjust_seasons() handles a variety of optional arguments.
# verify that adjust | _trend() correctly calledfor different option combinations.
#
# No noise in this test set.
#
from __future__ import division
import numpy as np
from seasonal import fit | _trend, adjust_seasons # pylint:disable=import-error
from seasonal.sequences import sine # pylint:disable=import-error
PERIOD = 25
CYCLES = 4
AMP = 1.0
TREND = AMP / PERIOD
LEVEL = 1000.0
SEASONS = sine(AMP, PERIOD, 1)
DATA = LEVEL + np.arange(PERIOD * CYCLES) * TREND + np.tile(SEASONS, CYCLES)
ZEROS = np.zeros(PERIOD * CYCLES)
def iszero(a):
return np.all(np.isclose(a, ZEROS))
def isseasons(a):
return np.all(np.isclose(a, SEASONS))
def test_auto():
adjusted = adjust_seasons(DATA)
assert adjusted.std() < DATA.std()
def test_trend_line():
adjusted = adjust_seasons(DATA, trend="line")
assert adjusted.std() < DATA.std()
def test_explicit_trend():
trend = fit_trend(DATA, kind="line")
adjusted = adjust_seasons(DATA, trend=trend)
assert adjusted.std() < DATA.std()
def test_trend_period():
adjusted = adjust_seasons(DATA, trend="line", period=PERIOD)
assert adjusted.std() < DATA.std()
def test_trend_seasons():
adjusted = adjust_seasons(DATA, trend="line", seasons=SEASONS)
assert adjusted.std() < DATA.std()
def test_trend_spline():
adjusted = adjust_seasons(DATA, trend="spline")
assert adjusted.std() < DATA.std()
def test_period():
adjusted = adjust_seasons(DATA, period=PERIOD)
assert adjusted.std() < DATA.std()
adjusted = adjust_seasons(DATA, period=PERIOD // 2) # no seasonality
assert adjusted is None
def test_seasons():
adjusted = adjust_seasons(DATA, seasons=SEASONS)
assert adjusted.std() < DATA.std()
|
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
| stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
| stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="bodyencodedformlcorpus" value="' + b64encode(comment.body.encode('utf-8')) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
bodyEncodedForMLCorpus = str(request.form["bodyencodedformlcorpus"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusCheckin(bodyEncodedForMLCorpus)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSignupAndCheckin(bodyEncodedForMLCorpus)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusRelapse(bodyEncodedForMLCorpus)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusReinstate(bodyEncodedForMLCorpus)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusTooLate(bodyEncodedForMLCorpus)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
def recordMLCorpusCheckin(aString):
with open("../new-ml-corpus-monthly-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSignupAndCheckin(aString):
with open("../new-ml-corpus-monthly-signup-and-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusRelapse(aString):
with open("../new-ml-corpus-monthly-relapse.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusReinstate(aString):
with open("../new-ml-corpus-monthly-reinstate.tx |
class Solution:
# @pa | ram n, an integer
# @return an integer
def reverseBits(self, n):
reverse = 0
r = n
for i in range(32):
bit = r % 2
reverse += bit << (32-i-1)
r = r / 2
| return reverse
s = Solution()
r = s.reverseBits(43261596)
print(r)
|
import unittest
from rsync_usb.ChunkLocation import ChunkLocation
class ChunkLocationTests(unittest.TestCase):
'''Test TargetHashesWriter and TargetHashesReader'''
def testProperties(self):
pos = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos.path, 'dummy')
self.assertEqual(pos.start_pos, 100)
self.assertEqual(pos.data_len, 10)
def testEndPos(self):
pos = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos.start_pos + pos.data_len - 1, pos.end_pos)
self.assertEqual(pos.end_pos, 109)
def testEqual(self):
pos_a = ChunkLocation('dummy', 100, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos_a, pos_b)
# -- Overlaping chunk tests -----------------------------------------------
def assertOverlaping(self, pos_a, pos_b):
msg = "%s should overlap %s but did not"
self.assertTrue(pos_a.overlaps(pos_b), msg % (str(pos_a), str(pos_b)))
self.assertTrue(pos_b.overlaps(pos_a), msg % (str(pos_b), str(pos_a)))
def assertNotOverlaping(self, pos_a, pos_b):
msg = "%s should not overlap %s but does"
self.assertFalse(pos_a.overlaps(pos_b), msg % (str(pos_a), str(pos_b)))
self.assertFalse(pos_b.overlaps(pos_a), msg % ( | str(pos_b), str(pos_a)))
def testNoOverlapBefore(self):
pos_a = ChunkLocation('du | mmy', 10, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testNoOverlapAfter(self):
pos_a = ChunkLocation('dummy', 1000, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testNoOverlapDifferentPaths(self):
pos_a = ChunkLocation('dummy_a', 100, 10)
pos_b = ChunkLocation('dummy_b', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testOverlapEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapStartsBefore(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ----|=======|---------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 4, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapStartsBeforeAndEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ----|=========|-------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 4, 11)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInside(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: -----|=========|------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 5, 11)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInsideSameStart(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: ------|========|------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 6, 10)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInsideSameEnd(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: -----|========|-------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 5, 10)
self.assertOverlaping(pos_a, pos_b)
def testOverlapEndsAfter(self):
# 0000000000111111111112
# 0123456789001234567890
# A: -------|=======|------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 7, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapEndsAfterAndEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=========|-----
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 6, 11)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
|
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.boolean import boolean
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, "
+ msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0].iterkeys():
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False))
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
| if subkey == subelements[-1]:
lastsubkey = True
if not subkey in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
i | f skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Auth Middleware that handles auth for a service
This module can be installed as a filter | in front of your service to validate
that requests are | coming from a trusted component that has handled
authenticating the call. If a call comes from an untrusted source, it will
redirect it back to be properly authenticated. This is done by sending our a
305 proxy redirect response with the URL for the auth service.
The auth service settings are specified in the INI file (keystone.ini). The ini
file is passed in as the WSGI config file when starting the service. For this
proof of concept, the ini file is in echo/echo/echo.ini.
In the current implementation use a basic auth password to verify that the
request is coming from a valid auth component or service
Refer to: http://wiki.openstack.org/openstack-authn
HEADERS
-------
HTTP_ is a standard http header
HTTP_X is an extended http header
> Coming in from initial call
HTTP_X_AUTH_TOKEN : the client token being passed in
HTTP_X_STORAGE_TOKEN: the client token being passed in (legacy Rackspace use)
to support cloud files
> Used for communication between components
www-authenticate : only used if this component is being used remotely
HTTP_AUTHORIZATION : basic auth password used to validate the connection
> What we add to the request for use by the OpenStack service
HTTP_X_AUTHORIZATION: the client identity being passed in
"""
from webob.exc import HTTPUseProxy, HTTPUnauthorized
class RemoteAuth(object):
# app is the downstream WSGI component, usually the OpenStack service
#
# if app is not provided, the assumption is this filter is being run
# from a separate server.
def __init__(self, app, conf):
# app is the next app in WSGI chain - eventually the OpenStack service
self.app = app
self.conf = conf
# where to redirect untrusted requests to
self.proxy_location = conf.get('proxy_location')
# secret that will tell us a request is coming from a trusted auth
# component
self.remote_auth_pass = conf.get('remote_auth_pass')
print 'Starting Remote Auth middleware'
def __call__(self, env, start_response):
# Validate the request is trusted
# Authenticate the Auth component itself.
headers = [('www-authenticate', 'Basic realm="API Auth"')]
if 'HTTP_AUTHORIZATION' not in env:
# Redirect to proxy (auth component) and show that basic auth is
# required
return HTTPUseProxy(location=self.proxy_location,
headers=headers)(env, start_response)
else:
auth_type, encoded_creds = env['HTTP_AUTHORIZATION'].split(None, 1)
if encoded_creds != self.remote_auth_pass:
return HTTPUnauthorized(headers=headers)(env, start_response)
# Make sure that the user has been authenticated by the Auth Service
if 'HTTP_X_AUTHORIZATION' not in env:
return HTTPUnauthorized()(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return RemoteAuth(app, conf)
return auth_filter
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Client module for connecting to and interacting with SmartyStreets API
"""
import json
import numbers
import requests
from .data import Address, AddressCollection
from .exceptions import SmartyStreetsError, ERROR_CODES
def validate_args(f):
"""
Ensures that *args consist of a consistent type
:param f: any client method with *args parameter
:return: function f
"""
def wrapper(self, args):
arg_types = set([type(arg) for arg in args])
if len(arg_types) > 1:
raise TypeError("Mixed input types are not allowed")
elif list(arg_types)[0] not in (dict, str):
raise TypeError("Only dict and str types accepted")
return f(self, args)
return wrapper
def truncate_args(f):
"""
Ensures that *args do not exceed a set limit or are truncated to meet that limit
:param f: any Client method with *args parameter
:return: function f
"""
def wrapper(self, args):
if len(args) > 100:
if self.truncate_addresses:
args = args[:100]
else:
raise ValueError("This exceeds 100 address at a time SmartyStreets limit")
return f(self, args)
return wrapper
def stringify(data):
"""
Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string
"""
def serialize(k, v):
if k == "candidates":
return int(v)
if isinstance(v, numbers.Number):
if k == "zipcode":
# If values are presented as integers then leading digits may be cut off,
# and these are significant for the zipcode. Add them back.
return str(v).zfill(5)
return str(v)
return v
return [
{
k: serialize(k, v) for k, v in json_dict.items()
}
for json_dict in data
]
class Client(object):
"""
Client class for interacting with the SmartyStreets API
"""
BASE_URL = "https://api.smartystreets.com/"
def __init__(self, auth_id, auth_token, standardize=False, invalid=False, logging=True,
accept_keypair=False, truncate_addresses=False, timeout=None):
"""
Constructs the client
:param auth_id: authentication ID from SmartyStreets
:param auth_token: authentication token
:param standardize: boolean include addresses that match zip+4 in addition to DPV confirmed
addresses
:param invalid: boolean to include address | candidates that may not be deliverable
:param logging: boolean to allow SmartyStreets to log requests
:param accept_keypair: boolean to toggle default keypair behavior
:param truncate_addresses: boolean to silently truncate address lists in excess of the
| SmartyStreets maximum rather than raise an error.
:param timeout: optional timeout value in seconds for requests.
:return: the configured client object
"""
self.auth_id = auth_id
self.auth_token = auth_token
self.standardize = standardize
self.invalid = invalid
self.logging = logging
self.accept_keypair = accept_keypair
self.truncate_addresses = truncate_addresses
self.timeout = timeout
self.session = requests.Session()
self.session.mount(self.BASE_URL, requests.adapters.HTTPAdapter(max_retries=5))
def post(self, endpoint, data):
"""
Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content
"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-standardize-only': 'true' if self.standardize else 'false',
'x-include-invalid': 'true' if self.invalid else 'false',
'x-accept-keypair': 'true' if self.accept_keypair else 'false',
}
if not self.logging:
headers['x-suppress-logging'] = 'true'
params = {'auth-id': self.auth_id, 'auth-token': self.auth_token}
url = self.BASE_URL + endpoint
response = self.session.post(url, json.dumps(stringify(data)),
params=params, headers=headers, timeout=self.timeout)
if response.status_code == 200:
return response.json()
raise ERROR_CODES.get(response.status_code, SmartyStreetsError)
@truncate_args
@validate_args
def street_addresses(self, addresses):
"""
API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection
"""
# While it's okay in theory to accept freeform addresses they do need to be submitted in
# a dictionary format.
if type(addresses[0]) != dict:
addresses = [{'street': arg} for arg in addresses]
return AddressCollection(self.post('street-address', data=addresses))
def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0])
def zipcode(self, *args):
raise NotImplementedError("You cannot lookup zipcodes yet")
|
from i3pystatus.playerctl import Pla | yerctl
class Spotify(Playerctl):
"""
Get Spotify info using playerctl. Based on `Playerctl`_ module.
"""
player_name = | "spotify"
|
from xblock.fields import Scope
class CourseGradingModel(object):
"""
Basically a DAO and Model combo for CRUD operations pertaining to grading policy.
"""
# Within this class, allow access to protected members of client classes.
# This comes up when accessing kvs data and caches during kvs saves and modulestore writes.
def __init__(self, course_descriptor):
self.graders = [
CourseGradingModel.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)
] # weights transformed to ints [0..100]
self.grade_cutoffs = course_descriptor.grade_cutoffs
self.grace_period = CourseGradingModel.convert_set_grace_period(course_descriptor)
@classmethod
def fetch(cls, course_locator):
"""
Fetch the course grading policy for the given course from persistence and return a CourseGradingModel.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_locator)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
model = cls(descriptor)
re | turn model
@staticmethod
def f | etch_grader(course_location, index):
"""
Fetch the course's nth grader
Returns an empty dict if there's no such grader.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
index = int(index)
if len(descriptor.raw_grader) > index:
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
# return empty model
else:
return {"id": index,
"type": "",
"min_count": 0,
"drop_count": 0,
"short_label": None,
"weight": 0
}
@staticmethod
def update_from_json(course_locator, jsondict, user):
"""
Decode the json into CourseGradingModel and save any changes. Returns the modified model.
Probably not the usual path for updates as it's too coarse grained.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_locator)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']]
descriptor.raw_grader = graders_parsed
descriptor.grade_cutoffs = jsondict['grade_cutoffs']
get_modulestore(course_old_location).update_item(descriptor, user.id)
CourseGradingModel.update_grace_period_from_json(course_locator, jsondict['grace_period'], user)
return CourseGradingModel.fetch(course_locator)
@staticmethod
def update_grader_from_json(course_location, grader, user):
"""
Create or update the grader of the given type (string key) for the given course. Returns the modified
grader which is a full model on the client but not on the server (just a dict)
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
# parse removes the id; so, grab it before parse
index = int(grader.get('id', len(descriptor.raw_grader)))
grader = CourseGradingModel.parse_grader(grader)
if index < len(descriptor.raw_grader):
descriptor.raw_grader[index] = grader
else:
descriptor.raw_grader.append(grader)
get_modulestore(course_old_location).update_item(descriptor, user.id)
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
@staticmethod
def update_cutoffs_from_json(course_location, cutoffs, user):
"""
Create or update the grade cutoffs for the given course. Returns sent in cutoffs (ie., no extra
db fetch).
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
descriptor.grade_cutoffs = cutoffs
get_modulestore(course_old_location).update_item(descriptor, user.id)
return cutoffs
@staticmethod
def update_grace_period_from_json(course_location, graceperiodjson, user):
"""
Update the course's default grace period. Incoming dict is {hours: h, minutes: m} possibly as a
grace_period entry in an enclosing dict. It is also safe to call this method with a value of
None for graceperiodjson.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
# Before a graceperiod has ever been created, it will be None (once it has been
# created, it cannot be set back to None).
if graceperiodjson is not None:
if 'grace_period' in graceperiodjson:
graceperiodjson = graceperiodjson['grace_period']
grace_timedelta = timedelta(**graceperiodjson)
descriptor.graceperiod = grace_timedelta
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def delete_grader(course_location, index, user):
"""
Delete the grader of the given type from the given course.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
index = int(index)
if index < len(descriptor.raw_grader):
del descriptor.raw_grader[index]
# force propagation to definition
descriptor.raw_grader = descriptor.raw_grader
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def delete_grace_period(course_location, user):
"""
Delete the course's grace period.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
del descriptor.graceperiod
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def get_section_grader_type(location):
old_location = loc_mapper().translate_locator_to_location(location)
descriptor = get_modulestore(old_location).get_item(old_location)
return {
"graderType": descriptor.format if descriptor.format is not None else 'notgraded',
"location": unicode(location),
}
@staticmethod
def update_section_grader_type(descriptor, grader_type, user):
if grader_type is not None and grader_type != u'notgraded':
descriptor.format = grader_type
descriptor.graded = True
else:
del descriptor.format
del descriptor.graded
get_modulestore(descriptor.location).update_item(descriptor, user.id)
return {'graderType': grader_type}
@staticmethod
def convert_set_grace_period(descriptor):
# 5 hours 59 minutes 59 seconds => converted to iso format
rawgrace = descriptor.graceperiod
if rawgrace:
hours_from_days = rawgrace.days * 24
seconds = rawgrace.seconds
hours_from_seconds = int(seconds / 3600)
hours = hours_from_days + hours_from_seconds
seconds -= hours_from_seconds * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0}
if hours > 0:
graceperiod['hours'] = hours
if minutes > 0:
graceperiod['minutes'] = minutes
if seconds > 0:
graceperiod['seconds'] = seconds
return graceperiod
e |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
if hasattr(symbol, '_tf_api_names_v1'):
del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
export_decorator(_test_function)
def testRaisesExceptionIfInvalidSymbolName(self):
# TensorFlow code is not allowed to export symbols under package
# tf.estimator
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('estimator.invalid')
# All symbols exported by Estimator must be under tf.estimator package.
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('Estimator.invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid.estimator')
def testRaisesExceptionIfInvalidV1SymbolName(self):
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('valid', v1=['estimator.invalid'])
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('estimator.valid', v1=['invalid'])
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function | = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function | , exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
if __name__ == '__main__':
test.main()
|
ol_mode_zone, text='Broad', variable=self.tol_mode,
value='broad',
command=self.change_tol_mode)
# command=lambda: self.event_generate('<<update_all_graphs>>'))
self.tol_mode_broad.grid(row=0, column=1, sticky=W)
self.tol_mode_stct = Radiobutton(
self.tol_mode_zone, text='Strict', variable=tol_mode,
value='strict',
command=self.change_tol_mode)
self.tol_mode_stct.grid(row=0, column=2, sticky=W)
self.tol_rel_ent.bind('<Return>', self.enter_tol_setting)
self.tol_floor_ent.bind('<Return>', self.enter_tol_setting)
self.tol_abs_ent.bind('<Return>', self.enter_tol_setting)
self.change_tol_type(andupdate=False)
def change_tol_type(self, andupdate=True):
if self.tol_type.get() == 'relative':
self.tol_rel_lab.configure(state=NORMAL)
self.tol_rel_ent.configure(state=NORMAL)
self.tol_floor_lab.configure(state=NORMAL)
self.tol_floor_ent.configure(state=NORMAL)
self.tol_abs_lab.configure(state=DISABLED)
self.tol_abs_ent.configure(state=DISABLED)
elif self.tol_type.get() == 'absolute':
self.tol_rel_lab.configure(state=DISABLED)
self.tol_rel_ent.configure(state=DISABLED)
self.tol_floor_lab.configure(state=DISABLED)
self.tol_floor_ent.configure(state=DISABLED)
self.tol_abs_lab.configure(state=NORMAL)
self.tol_abs_ent.configure(state=NORMAL)
if andupdate:
#self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_all_tolerances>>')
self.event_generate('<<update_summary>>')
def enter_tol_setting(self, event):
#self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_all_tolerances>>')
self.event_generate('<<update_summary>>')
def change_tol_mode(self):
self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_summary>>')
class StrengthBox(LabelFrame):
'''The frame that allows users to change between Strength types.'''
def __init__(self, strength_mode,
parent=None, text='Strength', padx=2, pady=2,
heading_font='TkDefaultFont', row=0, column=0, **kw):
LabelFrame.__init__(self, parent, text=text, padx=padx, pady=pady,
font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.columnconfigure(0, weight=1)
self.strength_options = ('Height-Dependent', 'Height-Independent')
self.strength_selector = OptionMenu(
self, strength_mode, *self.strength_options,
command=self.update_summary_event)
self.strength_selector.grid(row=0, column=0, sticky=EW)
def update_summary_event(self, strength_option):
self.event_generate('<<update_summary>>')
class ControlPanel(Frame):
'''Control Panel contains all the stat readouts and the adjustable
settings, including the smoothing parameter box, the summary box, the view
settings, etc.
Inputs include variable names for all of the settings.
'''
def __init__(self, heading_font, input_font, summary_font, current_sp,
view_names, view_pts, view_pandtol, view_spline, view_se,
sp_lim, sp_min, sp_max,
loc_peak, peak_min, peak_max,
tol_type, tol_drop, tol_floor, tol_absolute, tol_mode,
strength_mode, parent=None, platform=platform, **kw):
Frame.__init__(self, parent, relief=SUNKEN, bd=1, padx=7, pady=7)
self.smoothing_box = SmoothingBox(self, heading_font=heading_font,
input_font=input_font,
current_sp=current_sp, row=0)
self.summary_box = SummaryBox(self, row=1, heading_font=heading_font,
summary_font=summary_font)
spacer_text = '--Settings--'
if platform == 'linux':
spacer_text = '\n' + spacer_text
self.set_lab = Label(self, text=spacer_text, pady=0, font=heading_font)
self.set_lab.grid(row=2, column=0)
self.view_box = ViewBox(self, row=3,
view_names_var=view_names,
view_pts_var=view_pts,
view_pandtol_var=view_pandtol,
view_spline_var=view_spline,
view_se_var=view_se,
heading_font=heading_font)
self.smoothing_limits_box = SmoothingLimitsBox(
self, row=4, heading_font=heading_font, input_font=input_font,
sp_lim_state=sp_lim, sp_min=sp_min, sp_max=sp_max)
self.peak_box = LocalPeakBox(parent=self, loc_peak_state=loc_peak,
peak_min=peak_min, peak_max=peak_max,
heading_font=heading_font,
input_font=input_font, row=5)
self.tolerance_box = ToleranceBox(parent=self, tol_type=tol_type,
tol_drop=tol_drop,
tol_floor=tol_floor,
tol_absolute=tol_absolute,
tol_mode=tol_mode,
heading_font=heading_font,
input_font=input_font,
row=6)
self.strength_box = StrengthBox(parent=self,
strength_mode=strength_mode,
heading_font=heading_font, row=7)
def update_summary(self, individual=None,
strength_mode=None, tol_mode=None):
self.summary_box.update_summary(individual, strength_mode, tol_mode)
def activate(self):
self.smoothing_box.activate()
self.active_mode = 'activated'
class FileMenu(Menubutton):
'''Defines the File menu at the top of the screen (and accompanying
functions).
'''
def __init__(self, file_opt, parent=None, row=0, column=0):
Menubutton.__init__(self, parent, text='File')
self.grid(row=row, column=column, sticky=W)
self.file_opt = file_opt
self.parent = parent
self.primary_menu = Menu(self, tearoff=0)
self.open_menu = Menu(self, tearoff=0)
self.open_menu.add_command(label='Horizontal... | ',
command=self.open_horizontal_file)
self.open_menu.add_command(label='Vertical...',
command=self.open_vertical_file)
self.primary_menu.add_cascade(label='Open Data File',
menu=self.open_menu)
self.primary_menu.add_separato | r()
self.primary_menu.add_command(label='Load Smoothing Values...',
command=self.open_sp,
state=DISABLED)
self.primary_menu.add_command(label='Save Smoothing Values...',
command=self.save_sp,
state=DISABLED)
self.primary_menu.add_command(label='Clear Smoothing Values',
command=self.clear_sps,
state=DISABLED)
self.primary_menu.add_separator()
self.primary_menu.add_command(label='Load Previous Settings',
command=self.open_sett)
self.primary_menu.add_command(label='Save Current Settings',
command=self.save_sett)
self.primary_menu.add_command(label='Restore Default Settings',
command=self.reset_sett)
self.primary_menu.add_separator()
self.primary_menu.add_command(label='Output Spline Figures...',
command=self.output_graphs,
|
deque_2),
random_sample_size)
unique_random_sample_keys = {(a, b + offset)
for a, b in random_sample_keys}
return [(data_1[k1], data_2[k2])
for k1, k2
in blocked_sample_keys | unique_random_sample_keys]
class RLRLearner(ActiveLearner, rlr.RegularizedLogisticRegression):
def __init__(self, data_model):
super().__init__(alpha=1)
self.data_model = data_model
self._candidates: List[TrainingExample]
@property
def candidates(self) -> List[TrainingExample]:
return self._candidates
@candidates.setter
def candidates(self, new_candidates):
self._candidates = new_candidates
self.distances = self.transform(self._candidates)
random_pair = random.choice(self._candidates)
exact_match = (random_pair[0], random_pair[0])
self.fit_transform([exact_match, random_pair],
[1, 0])
def transform(self, pairs):
return self.data_model.distances(pairs)
def fit(self, X, y):
self.y = numpy.array(y)
self.X = X
super().fit(self.X, self.y, cv=False)
def fit_transform(self, pairs, y):
self.fit(self.transform(pairs), y)
def pop(self) -> TrainingExample:
if not len(self.candidates):
raise IndexError("No more unlabeled examples to label")
target_uncertainty = self._bias()
probabilities = self.candidate_scores()
distance_to_target = numpy.abs(target_uncertainty - probabilities)
uncertain_index = distance_to_target.argmin()
self.distances = numpy.delete(self.distances, uncertain_index, axis=0)
uncertain_pair = self.candidates.pop(uncertain_index)
return uncertain_pair
def _remove(self, index):
self.distances = numpy.delete(self.distances, index, axis=0)
def mark(self, pairs, y):
self.y = numpy.concatenate([self.y, y])
self.X = numpy.vstack([self.X, self.transform(pairs)])
self.fit(self.X, self.y)
def _bias(self):
positive = numpy.sum(self.y == 1)
n_examples = len(self.y)
bias = 1 - (positive / n_examples if positive else 0)
# When we have just a few examples we are okay with getting
# examples where the model strongly believes the example is
# going to be positive or negative. As we get more examples,
# prefer to ask for labels of examples the model is more
# uncertain of.
uncertainty_weight = min(positive, n_examples - positive)
bias_weight = 10
weighted_bias = 0.5 * uncertainty_weight + bias * bias_weight
weighted_bias /= uncertainty_weight + bias_weight
return weighted_bias
def candidate_scores(self):
return self.predict_proba(self.distances)
def __len__(self):
return len(self.candidates)
class DedupeRLRLearner(DedupeSampler, RLRLearner):
def __init__(self, data_model, data, blocked_proportion, sample_size):
super().__init__(data_model)
self.candidates = self._sample(data, blocked_proportion, sample_size)
class RecordLinkRLRLearner(RecordLinkSampler, RLRLearner):
def __init__(self, data_model, data_1, data_2, blocked_proportion, sample_size):
super.__init__(data_model)
self.candidates = self._sample(data_1, data_2, blocked_proportion, sample_size)
class BlockLearner(object):
def __init__(self, data_model, candidates, *args):
self.data_model = data_model
self.candidates = candidates
self.current_predicates = ()
self._cached_labels = None
self._old_dupes = []
self.block_learner: training.BlockLearner
def fit_transform(self, pairs, y):
dupes = [pair for label, pair in zip(y, pairs) if label]
new_dupes = [pair for pair in dupes if pair not in self._old_dupes]
new_uncovered = (not all(self.predict(new_dupes)))
if new_uncovered:
self.current_predicates = self.bl | ock_learner.learn(dupes,
recall=1.0)
| self._cached_labels = None
self._old_dupes = dupes
def candidate_scores(self):
if self._cached_labels is None:
labels = self.predict(self.candidates)
self._cached_labels = numpy.array(labels).reshape(-1, 1)
return self._cached_labels
def predict(self, candidates):
labels = []
for record_1, record_2 in candidates:
for predicate in self.current_predicates:
keys = predicate(record_2, target=True)
if keys:
if set(predicate(record_1)) & set(keys):
labels.append(1)
break
else:
labels.append(0)
return labels
def _remove(self, index):
if self._cached_labels is not None:
self._cached_labels = numpy.delete(self._cached_labels,
index,
axis=0)
class DedupeBlockLearner(BlockLearner):
def __init__(self, data_model,
candidates,
data,
index_include):
super().__init__(data_model, candidates)
index_data = Sample(data, 50000)
sampled_records = Sample(index_data, 5000)
preds = self.data_model.predicates()
self.block_learner = training.DedupeBlockLearner(preds,
sampled_records,
index_data)
examples_to_index = candidates.copy()
if index_include:
examples_to_index += index_include
self._index_predicates(examples_to_index)
def _index_predicates(self, candidates):
blocker = self.block_learner.blocker
records = core.unique((record for pair in candidates for record in pair))
for field in blocker.index_fields:
unique_fields = {record[field] for record in records}
blocker.index(unique_fields, field)
for pred in blocker.index_predicates:
pred.freeze(records)
class RecordLinkBlockLearner(BlockLearner):
def __init__(self,
data_model,
candidates,
data_1,
data_2,
index_include):
super().__init__(data_model, candidates)
sampled_records_1 = Sample(data_1, 600)
index_data = Sample(data_2, 50000)
sampled_records_2 = Sample(index_data, 600)
preds = self.data_model.predicates(canopies=False)
self.block_learner = training.RecordLinkBlockLearner(preds,
sampled_records_1,
sampled_records_2,
index_data)
examples_to_index = candidates.copy()
if index_include:
examples_to_index += index_include
self._index_predicates(examples_to_index)
def _index_predicates(self, candidates):
blocker = self.block_learner.blocker
A, B = zip(*candidates)
A = core.unique(A)
B = core.unique(B)
for field in blocker.index_fields:
unique_fields = {record[field] for record in B}
blocker.index(unique_fields, field)
for pred in blocker.index_predicates:
pred.freeze(A, B)
class DisagreementLearner(ActiveLearner):
classifier: RLRLearner
blocker: BlockLearner
candidates: List[TrainingExample]
def _common_init(self):
self.learners = (self.classifier, self.blocker)
self.y = numpy.array([])
self.pairs = []
def pop(self) -> TrainingExample:
if not len(self.candidates):
raise IndexError("No more unlabeled examples to label")
probs_l = []
for learner in |
#!/usr/bin/env python2
from gimpfu import *
import time
import re
def preview (image, delay, loops, force_delay, ignore_hidden, restore_hide):
if not image:
raise "No image given."
layers = image.layers
nlayers = len (layers)
visible = []
length = []
i = 0
while i < nlayers:
visible += [pdb.gimp_item_get_visible (layers [i])]
if visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
name = pdb.gimp_item_get_name (layers [i])
l = None
if not force_delay:
l = re.search ("\([0-9]+ms\)", name)
if l:
l = tuple (map (sum, zip (l.span (), tuple ([+1, -3]))))
l = name [slice (*l)]
if not l:
l = delay
length += [float (l) / 1000.0]
i += 1
j = 0
while j < loops:
while i > 0:
i -= 1
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
pdb.gimp_displays_flush ()
time.sleep (length [i])
j += 1
# unhides everything for optimized
if j < loops:
while i < nlayers:
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
i += 1
else:
i = nlayers
i = nlayers
if restore_hide:
while i > 0:
i -= 1
if visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
register(
"preview",
"preview",
"Preview the animation of a gif",
"Roger Bongers",
"Roger Bongers",
"2016",
"Preview...",
"*",
[
(PF_IMAGE, "image", "The image to modify", None),
(PF_INT32, "delay", "The default length in ms o | f each frame", 100),
(PF_INT32, "loops", "The number of times to loop the animation", 1),
(PF_BOOL, "force-delay", "Force the default length on every frame", 0),
(PF_BOOL, "ignore-hidden", "Ignore currently hidden items", 0),
(PF_BOOL, "restore-hide", "Restore the hidden status after preview", 0),
],
[],
pre | view,
menu = "<Image>/Filters/Animation")
main()
|
# coding: utf-8
"""
Provides functions for finding and testing for locally `(k, l)`-connected
graphs.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
_all__ = ['kl_connected_subgraph', 'is_kl_connected']
import copy
import networkx as nx
def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False):
"""Returns the maximum locally `(k, l)`-connected subgraph of ``G``.
A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
graph there are at least `l` edge-disjoint paths of length at most `k`
joining `u` to `v`.
Parameters
----------
G : NetworkX graph
The graph in which to find a maximum locally `(k, l)`-connected
subgraph.
k : integer
The maximum length of paths to consider. A higher number means a looser
connectivity requirement.
l : integer
The number of edge-disjoint paths. A higher number means a stricter
connectivity requirement.
low_memory : bool
If this is ``True``, this function uses an algorithm that uses slightly
more time but less memory.
same_as_graph : bool
If this is ``True`` then return a tuple of the form ``(H, is_same)``,
where ``H`` is the maximum locally `(k, l)`-connected subgraph and
``is_same`` is a Boolean representing whether ``G`` is locally `(k,
l)`-connected (and hence, whether ``H`` is simply a copy of the input
graph ``G``).
Returns
-------
NetworkX graph or two-tuple
If ``same_as_graph`` is ``True``, then this function returns a
two-tuple as described above. Otherwise, it returns only the maximum
locally `(k, l)`-connected subgraph.
See also
--------
is_kl_connected
References
----------
.. [1]: Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
2004. 89--104.
"""
H=copy.deepcopy(G) # subgraph we construct by removing from G
graphOK=True
deleted_some=True # hack to star | t off the while loop
while deleted_some:
deleted_some=False
for edge in H.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
| verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(list(verts))
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if prev!=w:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
H.remove_edge(u,v)
deleted_some=True
if graphOK: graphOK=False
# We looked through all edges and removed none of them.
# So, H is the maximal (k,l)-connected subgraph of G
if same_as_graph:
return (H,graphOK)
return H
def is_kl_connected(G, k, l, low_memory=False):
"""Returns ``True`` if and only if ``G`` is locally `(k, l)`-connected.
A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
graph there are at least `l` edge-disjoint paths of length at most `k`
joining `u` to `v`.
Parameters
----------
G : NetworkX graph
The graph to test for local `(k, l)`-connectedness.
k : integer
The maximum length of paths to consider. A higher number means a looser
connectivity requirement.
l : integer
The number of edge-disjoint paths. A higher number means a stricter
connectivity requirement.
low_memory : bool
If this is ``True``, this function uses an algorithm that uses slightly
more time but less memory.
Returns
-------
bool
Whether the graph is locally `(k, l)`-connected subgraph.
See also
--------
kl_connected_subgraph
References
----------
.. [1]: Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
2004. 89--104.
"""
graphOK=True
for edge in G.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(verts)
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if w!=prev:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
graphOK=False
break
# return status
return graphOK
|
)
assert connection.session_id == -1
# ##################
conn_msg = ConnectMessage( connection )
session_id = conn_msg.prepare( ("root", "root") )\
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
try:
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert False # we expect an exception because we need a db opened
except PyOrientDatabaseException:
assert True
def test_record_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
session_id = connection.session_id
assert session_id != -1
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert res is not 0
assert res > 0
def test_record_create_update(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
# ##################
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prep | are(
(db_name, "admin", "admin", DB_TYPE_GRAPH, "")
).send().fetch_response()
assert len(cluster_info) != 0
try:
create_class = CommandMessage(connection)
cluster = create_class.prepare((QUERY_CMD, "create class my_class "
| "extends V"))\
.send().fetch_response()[0]
except PyOrientCommandException:
# class my_class already exists
pass
# classes are not allowed in record create/update/load
rec = { '@my_class': { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' } }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( cluster, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
rec = { '@my_class': { 'alloggio': 'albergo', 'lavoro': 'ufficio', 'vacanza': 'montagna' } }
update_success = ( RecordUpdateMessage(connection) )\
.prepare( ( cluster, rec_position._rid, rec ) )\
.send().fetch_response()
assert update_success[0] != 0
if connection.protocol <= 21:
return unittest.skip("Protocol {!r} does not works well".format(
connection.protocol )) # skip test
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + rec_position._rid ] )\
.send().fetch_response()
# res = [ ( RecordLoadMessage(connection) ).prepare(
# [ rec_position._rid ]
# ).send().fetch_response() ]
print("%r" % res[0]._rid)
print("%r" % res[0]._class)
print("%r" % res[0]._version)
print("%r" % res[0].alloggio)
print("%r" % res[0].lavoro)
print("%r" % res[0].vacanza)
assert res[0]._rid == '#11:0'
# assert res[0]._class == 'my_class'
assert res[0]._version >= 0
assert res[0].alloggio == 'albergo'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'montagna'
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_record_delete(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
print("Sid: %s" % session_id)
assert session_id == connection.session_id
assert session_id != -1
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_DOCUMENT, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
rec = { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( 1, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
######################## Check Success
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + str(rec_position._rid) ] )\
.send().fetch_response()
import re
assert re.match( '#1:[0-9]', res[0]._rid )
assert res[0]._class is None
assert res[0]._version >= 0
assert res[0].alloggio == 'casa'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'mare'
######################## Delete Rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, rec_position._rid ) )\
.send().fetch_response()
assert deletion is True
# now try a failure in deletion for wrong rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, 11111 ) )\
.send().fetch_response()
assert deletion is False
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_data_cluster_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
print(cluster_info)
assert len(cluster_info) != 0
assert connection.session_id != -1
count_msg = DataClusterCountMessage( connection )
res1 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5) ] ).send().fetch_response()
assert res1 is not 0
assert res1 > 0
count_msg = DataClusterCountMessage( connection |
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations | under the License.
#
from __future__ import unicode_literals
cl | ass InvalidFormula(Exception):
pass
class InvalidFormulaComponent(InvalidFormula):
pass
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
import parlai.core.build_data as build_data
import os
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/dialog_babi/dialog_babi.tar.gz',
'dialog_babi.tar.gz',
'bb36155ccd41eac91f806446c5728ee90374e5596156a9f7c1b86f8342cfc383',
)
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'dialog-bAbI')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOU | RCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_dat | a.mark_done(dpath, version_string=version)
|
# -*- coding: UTF-8 -*-
from django.conf import settings as dsettings
from django.contrib.auth import models as authModels
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse, Http404
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import slugify
from microblog import models, settings
from taggit.models import Tag, TaggedItem
from decorator import decorator
try:
import json
except ImportError:
import simplejson as json
def render_json(f):
"""
decoratore da applicare ad una vista per serializzare in json il risultato.
"""
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json.dumps(d, indent=2)
else:
ct = 'application/json'
j = json.dumps
def wrapper(func, *args, **kw):
try:
result = func(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, HttpResponse):
return result
else:
result = j(result)
status = 200
return HttpResponse(content=result, content_type=ct, status=status)
return decorator(wrapper, f)
def post_list(request):
return render(request, 'microblog/post_list.html', {})
def category(request, category):
category = get_object_or_404(models.Category, name=category)
return render_to_response(
'microblog/category.html',
{
'category': category,
},
context_instance=RequestContext(request)
)
def post_list_by_year(request, year, month=None):
return render_to_response(
'microblog/list_by_year.html',
{
'year': year,
'month': month,
},
context_instance=RequestContext(request)
)
def tag(request, tag):
tag = get_object_or_404(Tag, name=tag)
return render_to_response(
'microblog/tag.html',
{
'tag': tag,
},
context_instance=RequestContext(request)
)
def author(request, author):
user = [
u for u in authModels.User.objects.all()
if slugify('%s-%s' % (u.first_name, u.last_name)) == author
]
if not user:
raise Http404()
else:
user = user[0]
return render_to_response(
'microblog/author.html',
{
'author': user,
},
context_instance=RequestContext(request)
)
def _paginate_posts(post_list, request):
if settings.MICROBLOG_POST_LIST_PAGINATION:
paginator = Paginator(post_list, settings.MICROBLOG_POST_PER_PAGE)
try:
page = int(request.GET.get("page", "1"))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (EmptyPage, InvalidPage):
posts = paginator.page(1)
else:
paginator = Paginator(post_list, len(post_list) or 1)
posts = paginator.page(1)
return posts
def _posts_list(request, featured=False):
if settings.MICROBLOG_LANGUAGE_FALLBACK_ON_POST_LIST:
lang = None
else:
lang = request.LANGUAGE_CODE
return models.Post.objects\
.byLanguage(lang)\
.byFeatured(featured)\
.published()
def _post_detail(request, content):
if not settings.MICROBLOG_POST_FILTER([content.post], request.user):
raise Http404()
return render_to_response(
'microblog/post_detail.html',
{
'post': content.post,
'content': content
},
context_instance=RequestContext(request)
)
def _trackback_ping(request, content):
def success():
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>0</error></response>')
return HttpResponse(content=x, content_type='text/xml')
def failure(message=''):
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>1</error><message>%s</message></response>') % message
return HttpResponse(content=x, content_type='text/xml', status=400)
if request.method != 'POST':
return failure('only POST method is supported')
if not request.POST.get('url'):
return failure('url argument is mandatory')
t = {
'url': request.POST['url'],
'blog_name': request.POST.get('blog_name', ''),
'title': request.POST.get('title', ''),
'excerpt': request.POST.get('excerpt', ''),
}
from microblog.mo | deration import moderate
if not moderate(request, 'trackback', t['title'], url=t['url']):
return failure('moderated')
content.new_trackback(**t)
return success()
@render_json
def _comment_count(request, content):
post = content.post
if settings.MICROBLOG_COMMENT == 'comment':
import django_comments as comments
from django.contrib.contenttypes.models import ContentType
model = comments.get_model()
q = model.objects.filter(
| content_type=ContentType.objects.get_for_model(post),
object_pk=post.id,
is_public=True
)
return q.count()
else:
import httplib2
from urllib import quote
h = httplib2.Http()
params = {
'forum_api_key': settings.MICROBLOG_COMMENT_DISQUS_FORUM_KEY,
'url': content.get_url(),
}
args = '&'.join('%s=%s' % (k, quote(v)) for k, v in params.items())
url = settings.MICROBLOG_COMMENT_DISQUS_API_URL + 'get_thread_by_url?%s' % args
resp, page = h.request(url)
if resp.status != 200:
return -1
page = json.loads(page)
if not page['succeeded']:
return -1
elif page['message'] is None:
return 0
else:
return page['message']['num_comments']
def _post404(f):
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except models.PostContent.DoesNotExist:
raise Http404()
return wrapper
if settings.MICROBLOG_URL_STYLE == 'date':
def _get(slug, year, month, day):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndDate(slug, year, month, day)
@_post404
def post_detail(request, year, month, day, slug):
return _post_detail(
request,
content=_get(slug, year, month, day)
)
@_post404
def trackback_ping(request, year, month, day, slug):
return _trackback_ping(
request,
content=_get(slug, year, month, day)
)
@_post404
def comment_count(request, year, month, day, slug):
return _comment_count(
request,
content = _get(slug, year, month, day)
)
elif settings.MICROBLOG_URL_STYLE == 'category':
def _get(slug, category):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndCategory(slug, category)
@_post404
def post_detail(request, category, slug):
return _post_detail(
request,
content=_get(slug, category),
)
@_post404
def trackback_ping(request, category, slug):
return _trackback_ping(
request,
content=_get(slug, category),
)
@_post404
def comment_count(request, category, slug):
return _comment_count(
request,
content=_get(slug, category),
)
|
#!/usr/bin/env python
import os
import numpy as np
import math
import fnmatch
from my_spectrogram import my_specgram
from collections import OrderedDict
from scipy.io import wavfile
import matplotlib.pylab as plt
from pylab import rcParams
from sklearn.model_selection import train_test_split
rcParams['figure.figsize'] = 6, 3
SCRIPT_DIR = os.getcwd()
INPUT_FOLDER = 'Input_audio_wav_16k/'
OUTPUT_FOLDER = 'Input_spectrogram_16k/'
languages = os.listdir(INPUT_FOLDER)
languages.sort()
audio_dict = OrderedDict()
for l in languages:
audio_dict[l] = sorted(os.listdir(INPUT_FOLDER + l))
def plot_spectrogram(audiopath, plotpath=None, NFFT_window=0.025,
noverlap_window=0.023, freq_min=None, freq_max=None,
axis='off'):
fs, data = wavfile.read(audiopath)
data = data / data.max()
center = data.mean() * 0.2
data = data + np.random.normal(center, abs(center * 0.5), len(data))
NFFT = pow(2, int(math.log(int(fs*NFFT_window), 2) + 0.5)) # 25ms window, nearest power of 2
noverlap = int(fs*noverlap_window)
fc = int(np.sqrt(freq_min*freq_max))
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
Pxx, freqs, bins, im = my_specgram(data, NFFT=NFFT, Fs=fs,
Fc=fc, detrend=None,
window=np.hanning(NFFT),
noverlap=noverlap, cmap='Greys',
xextent=None,
pad_to=None, sides='default',
scale_by_freq=None,
minfreq=freq_min, maxfreq=freq_max)
plt.axis(axis)
im.axes.axis('tight')
im.axes.get_xaxis().set_visible(False)
im.axes.get_yaxis().set_visible(False)
if plotpath:
plt.savefig(plotpath, bbox_inches='tight',
transparent=False, pad_inches=0, dpi=96)
else:
plt.show()
plt.clf()
# same as training but no added noise
def plot_spectrogram_val(audiopath, plotpath=None, NFFT_window=0.025,
noverlap_window=0.023, freq_min=None, freq_max=None,
axis='off'):
fs, data = wavfile.read(audiopath)
data = data / data.max()
NFFT = pow(2, int(math.log(int(fs*NFFT_window), 2) + 0.5)) # 25ms window, nearest power of 2
noverlap = int(fs*noverlap_window)
fc = int(np.sqrt(freq_min*freq_max))
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
Pxx, freqs, bins, im = my_specgram(data, NFFT=NFFT, Fs=fs,
Fc=fc, detrend=None,
| window=np.hanning(NFFT),
noverlap=noverlap, cmap='Greys',
xextent=None,
pad_to=None, sides='default',
scale_by_freq=None,
| minfreq=freq_min, maxfreq=freq_max)
plt.axis(axis)
im.axes.axis('tight')
im.axes.get_xaxis().set_visible(False)
im.axes.get_yaxis().set_visible(False)
if plotpath:
plt.savefig(plotpath, bbox_inches='tight',
transparent=False, pad_inches=0, dpi=96)
else:
plt.show()
plt.clf()
# create spectrograms of randomly drawn samples from each language
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result[0]
random_wav = []
for key in audio_dict:
random_wav.append(sorted(np.random.choice(audio_dict[key], 500, replace=False)))
training_list = []
validation_list = []
for i in range(0, len(random_wav)):
x_train, x_val = train_test_split(random_wav[i],
test_size=0.4,
random_state=42)
training_list.append(x_train)
validation_list.append(x_val)
if not os.path.exists(OUTPUT_FOLDER + 'Training'):
os.makedirs(OUTPUT_FOLDER + 'Training')
print('Successfully created a training folder!')
print('Populating training folder with spectrograms...')
for i in range(0, len(training_list)):
if not os.path.exists(OUTPUT_FOLDER + 'Training/' + str(languages[i])):
os.makedirs(OUTPUT_FOLDER + 'Training/' + str(languages[i]))
print('Successfully created a {} training folder!'.format(languages[i]))
print('Populating {} training folder with spectrograms...'.format(languages[i]))
for j in range(0, len(training_list[i])):
for k in range(0, 3):
plot_spectrogram(find(training_list[i][j], INPUT_FOLDER),
plotpath=OUTPUT_FOLDER + 'Training/' +
str(languages[i]) + '/' +
str(training_list[i][j][:-4]) + '_' +
str(k) + '.jpeg',
NFFT_window=0.025, noverlap_window=0.023,
freq_min=0, freq_max=5500)
print('Done with {}.'.format(training_list[i][j][:-4]))
if not os.path.exists(OUTPUT_FOLDER + 'Validation'):
os.makedirs(OUTPUT_FOLDER + 'Validation')
print('Successfully created a validation folder!')
print('Populating validation folder with spectrograms...')
for i in range(0, len(validation_list)):
if not os.path.exists(OUTPUT_FOLDER + 'Validation/' + str(languages[i])):
os.makedirs(OUTPUT_FOLDER + 'Validation/' + str(languages[i]))
print('Successfully created a {} validation folder!'.format(languages[i]))
print('Populating {} validation folder with spectrograms...'.format(languages[i]))
for j in range(0, len(validation_list[i])):
for k in range(0, 1):
plot_spectrogram_val(find(validation_list[i][j], INPUT_FOLDER),
plotpath=OUTPUT_FOLDER + 'Validation/' +
str(languages[i]) + '/' +
str(validation_list[i][j][:-4]) + '_' +
str(k) + '.jpeg',
NFFT_window=0.025, noverlap_window=0.023,
freq_min=0, freq_max=5500)
print('Done with {}.'.format(validation_list[i][j][:-4]))
|
b' not in self.override_flags:
self.argparser.add_argument('-b', '--doublequote', dest='doublequote', action='store_true',
help='Whether or not double quotes are doubled in the input CSV file.')
if 'p' not in self.override_flags:
self.argparser.add_argument('-p', '--escapechar', dest='escapechar',
help='Character used to escape the delimiter if --quoting 3 ("Quote None") is specified and to escape the QUOTECHAR if --doublequote is not specified.')
if 'z' not in self.override_flags:
self.argparser.add_argument('-z', '--maxfieldsize', dest='maxfieldsize', type=int,
help='Maximum length of a single field in the input CSV file.')
if 'e' not in self.override_flags:
self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8',
help='Specify the encoding the input CSV file.')
if 'S' not in self.override_flags:
self.argparser.add_argument('-S', '--skipinitialspace', dest='skipinitialspace', default=False, action='store_true',
help='Ignore whitespace immediately following the delimiter.')
if 'H' not in self.override_flags:
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Specifies that the input CSV file has no header row. Will create default headers.')
if 'v' not in self.override_flags:
self.argparser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print detailed tracebacks when errors occur.')
# Output
if 'l' not in self.override_flags:
self.argparser.add_argument('-l', '--linenumbers', dest='line_numbers', action='store_true',
help='Insert a column of line numbers at the front of the output. Useful when piping to grep or as a simple primary key.')
# Input/Output
if 'zero' not in self.override_flags:
self.argparser.add_argument('--zero', dest='zero_based', action='store_true',
help='When interpreting or displaying column numbers, use zero-based numbering instead of the default 1-based numbering.')
def _extract_csv_reader_kwargs(self):
"""
Extracts those from the command-line arguments those would should be passed through to the input CSV reader(s).
"""
kwargs = {}
if self.args.encoding:
kwargs['encoding'] = self.args.encoding
if self.args.tabs:
kwargs['delimiter'] = '\t'
elif self.args.delimiter:
kwargs['delimiter'] = self.args.delimiter
if self.args.quotechar:
kwargs['quotechar'] = self.args.quotechar
if self.args.quoting:
kwargs['quoting'] = self.args.quoting
if self.args.doublequote:
kwargs['doublequote'] = self.args.doublequote
if self.args.escapechar:
kwargs['escapechar'] = self.args.escapechar
if self.args.maxfieldsize:
kwargs['maxfieldsize'] = self.args.maxfieldsize
if self.args.skipinitialspace:
kwargs['skipinitialspace'] = self.args.skipinitialspace
return kwargs
def _extract_csv_writer_kwargs(self):
"""
Extracts those from the command-line arguments those would should be passed through to the output CSV writer.
"""
kwargs = {}
if 'l' not in self.override_flags and self.args.line_numbers:
kwargs['line_numbers'] = True
return kwargs
def _install_exception_handler(self):
"""
Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions.
"""
def handler(t, value, traceback):
if self.args.verbose:
sys.__excepthook__(t, value, traceback)
else:
# Special case handling for Unicode errors, which behave very strangely
# when cast with unicode()
if t == UnicodeDecodeError:
sys.stderr.write('Your file is not "%s" encoded. Please specify the correct encoding with the -e flag. Use the -v flag to see the complete error.\n' % self.args.encoding)
else:
sys.stderr.write('%s\n' % unicode(value).encode('utf-8'))
sys.excepthook = handl | er
def print_column_names(self):
"""
Pretty-prints the names and indices of all columns to a file-like object (usually sys.stdout).
"""
if self.args.no_header_row:
raise RequiredHeaderError, 'You cannot use --no-header-row with the -n or --names options.'
f = self.args.file
output = self.output_file
try:
| zero_based=self.args.zero_based
except:
zero_based=False
rows = CSVKitReader(f, **self.reader_kwargs)
column_names = rows.next()
for i, c in enumerate(column_names):
if not zero_based:
i += 1
output.write('%3i: %s\n' % (i, c))
def match_column_identifier(column_names, c, zero_based=False):
"""
Determine what column a single column id (name or index) matches in a series of column names.
Note that integer values are *always* treated as positional identifiers. If you happen to have
column names which are also integers, you must specify them using a positional index.
"""
if isinstance(c, basestring) and not c.isdigit() and c in column_names:
return column_names.index(c)
else:
try:
c = int(c)
if not zero_based:
c -= 1
# Fail out if neither a column name nor an integer
except:
raise ColumnIdentifierError('Column identifier "%s" is neither an integer, nor a existing column\'s name.' % c)
# Fail out if index is 0-based
if c < 0:
raise ColumnIdentifierError('Column 0 is not valid; columns are 1-based.')
# Fail out if index is out of range
if c >= len(column_names):
raise ColumnIdentifierError('Index %i is beyond the last named column, "%s" at index %i.' % (c, column_names[-1], len(column_names) - 1))
return c
def parse_column_identifiers(ids, column_names, zero_based=False, excluded_columns=None):
"""
Parse a comma-separated list of column indices AND/OR names into a list of integer indices.
Ranges of integers can be specified with two integers separated by a '-' or ':' character. Ranges of
non-integers (e.g. column names) are not supported.
Note: Column indices are 1-based.
"""
columns = []
# If not specified, start with all columns
if not ids:
columns = range(len(column_names))
if columns and not excluded_columns:
return columns
if not columns:
for c in ids.split(','):
c = c.strip()
try:
columns.append(match_column_identifier(column_names, c, zero_based))
except ColumnIdentifierError:
if ':' in c:
a,b = c.split(':',1)
elif '-' in c:
a,b = c.split('-',1)
else:
raise
try:
if a:
a = int(a)
else:
a = 1
if b:
b = int(b) + 1
else:
b = len(column_names)
except ValueError:
raise ColumnIdentifierError("Invalid range %s. Ranges must be two integers separated by a - or : character.")
for x in range(a,b):
columns.append(match_column_identifier(column_names, x, zero_based))
excludes = []
if excluded_columns:
for c in excluded_c |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS specific tests. These are implicitly run by test_psutil.py."""
import psutil
from test_psutil import *
class SunOSSpecificTestCase(unittest.TestCase):
def test_swap_memory(self):
out = sh('swap -l -k')
lines = out.strip().split('\n')[ | 1:]
if not lines:
raise ValueError('no swap device(s) configured')
total = free = 0
for line in lines:
line = line.split()
t, f = line[-2:]
t = t.replace('K', '')
f = f.replace('K', '')
total += int(int(t) * 1024)
free += int(int(f) * 1024)
used = total - free
psutil_swap = psutil.swap_memory()
self.assertEqual(psutil_swap.total, total)
| self.assertEqual(psutil_swap.used, used)
self.assertEqual(psutil_swap.free, free)
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(SunOSSpecificTestCase))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
|
#!/u | sr/bin/env | python
# Blink an LED using the RPi.GPIO library.
import RPi.GPIO as GPIO
from time import sleep
# Use GPIO numbering:
GPIO.setmode(GPIO.BCM)
# Set pin GPIO 14 to be output:
GPIO.setup(14, GPIO.OUT)
try:
while True:
GPIO.output(14, GPIO.HIGH)
sleep(.5)
GPIO.output(14, GPIO.LOW)
sleep(.5)
# If we get a Ctrl-C, clean up so we don't get warnings from other programs:
except KeyboardInterrupt:
GPIO.cleanup()
|
#!/usr/bin/python3
import os
import sys
import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lutris.util.wineregistry import WineRegistry
PREFIXES_PATH = os.path.expanduser("~/Games/wine/prefixes")
def get_registries():
registries = []
directories = os.listdir(PREFIXES_PATH)
directories.append(os.path.expanduser("~/.wine"))
for prefix in directories:
for path in os.listdir(os.path.join(PREFIXES_PATH, prefix)):
if path.endswith(".reg"):
registries.append(os.path.join(PREFIXES_PATH, prefix, path))
return registries
def check_registry(registry_path):
with open(registry_path, 'r') as registry_file:
original_content = registry_file.read()
try:
registry = WineRegistry(registry_path)
except:
sys.stderr.write("Error parsing {}\n".format(registry_path))
raise
content = registry.render()
if content != original_cont | ent:
wrong_path = os.path.join(os.path.dirname(__file__), 'error.reg')
with open(wrong_path, 'w') as wrong_reg:
wrong_reg.write(content)
print("Content of parsed registry doesn't match: {}".format(registry_path))
subprocess.call(["meld", registry_path, wrong_path])
sys.exit(2)
registries = get_registries()
for registry in registries:
check_regist | ry(registry)
print("All {} registry files validated!".format(len(registries)))
|
from __future__ import print_function
from numpy import pi, arange, sin
import numpy as np
import time
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
Plot, DataRange1d, DatetimeAxis,
ColumnDataSource, PanTool, WheelZoomTool
)
from bokeh.resources import INLINE
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.a | range(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
plot = Plot(x_range=xdr, y_range=ydr, min_border=80)
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
plot.add_glyph(source, circle)
plot.add_layout(DatetimeAxis(), 'below')
plot.add_layout(DatetimeAxis(), 'left')
plot.add_tool | s(PanTool(), WheelZoomTool())
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "dateaxis.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Date Axis Example"))
print("Wrote %s" % filename)
view(filename)
|
import numpy as np
from square import Squ | are
from constants import SQUARE_SIZE, BOARD_SIZE
class ChessboardFrame():
def __init__(self, img):
self.img = img
def square_at(self, i):
y = BOARD_SIZE - ((i / 8) % 8) * SQUARE_SIZE - SQUARE_SIZE
x = (i % 8) * SQUARE_SIZE
ret | urn Square(i, self.img[y:y+SQUARE_SIZE, x:x+SQUARE_SIZE, :])
|
cl | ass Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
stack = []
length = len(num) - k
for c in num:
while k and stack and stack[-1] > c:
stack.pop()
k -= 1
stack.append(c)
return ''.join(stack[:length]).lstrip('0') | or '0' |
""" Manage the TVTK scenes. """
# Enthought library imports.
from tvtk.pyface.tvtk_scene import TVTKScene
from pyface.workbench.api import WorkbenchWindow
from traits.api import HasTraits, List, Instance, Property
from traits.api import implements, on_trait_change
from tvtk.plugins.scene.scene_editor import SceneEditor
# Local imports.
from i_scene_manager import ISceneManager
class SceneManager(HasTraits):
""" Manage the TVTK scenes. """
implements(ISceneManager)
#### 'SceneManager' interface #############################################
# The currently active scene (None, if no scene is active).
current_scene = Property(Instance(TVTKScene))
# A list of all open scenes.
scenes = List(TVTKScene)
# The workbench window that the manager is in (there is one scene manager
# per workbench window).
window = Instance(WorkbenchWindow)
#### Private interface ####################################################
# Shadow trait for the 'current_scene' property.
_current_scene = Instance(TVTKScene)
###########################################################################
# 'SceneManager' interface.
###########################################################################
#### Trait properties #####################################################
def _get_current_scene(self):
""" Property getter. """
scene_count = len(self.scenes)
if scene_count == 0:
scene = None
elif scene_count == 1:
scene = self.scenes[0]
else:
scene = self._current_scene
return scene
def _set_current_scene(self, scene):
""" Property setter. """
self._current_scene = scene
return
#### Trait change handlers ################################################
@on_trait_change('window:editor_opened')
def _on_editor_opened(self, obj, trait_name, old, new):
""" Dynam | ic trait change handler. """
if isinstance(new, SceneEditor):
self.scenes.append(new.scene)
return
@on_trait_change('window:editor_closing')
def _on_editor_closed(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.scenes.remove(new.scene)
return
@on_trait_change('window:active_editor')
def _on_active_editor_changed(s | elf, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.current_scene = new.scene
else:
self.current_scene = None
return
#### EOF ######################################################################
|
#! /usr/bin/env | python
import sys
g = {}
n = {}
f | or line in sys.stdin:
(n1, n2, p, q, t, tg, x) = line.strip().split(' ')
t = int(t)
x = float(x)
key = ' '.join((n1,n2,p,q))
if not key in n:
n[key] = 0
g[key] = 0
n[key] += t
g[key] += x*t
for key in n:
print key, n[key], g[key]/n[key]
|
ete>',self.remove_filter)
flsb.config(command=self.node_list.yview)
flsb.grid(row=r, column=4, sticky='NWS')
r += 1
tk.Button(self, text='Clear',command=self.remove_filter).grid(
row=r, column=1, sticky='W')
tk.Button(self, text='?', command=self.filter_help
).grid(row=r, column=4, stick='NESW', padx=2)
r += 1
line2 = tk.Canvas(self, height=15, width=200)
line2.create_line(0,13,250,13)
line2.create_line(0,15,250,15)
line2.grid(row=r, column=1, columnspan=4, sticky='NESW')
r += 1
self.lbl_attr = tk.Label(self, text='Attributes',
wraplength=200, anchor=tk.SW, justify=tk.LEFT)
self.lbl_attr.grid(row=r, column=1, columnspan=4, sticky='NW')
r += 1
self.tbl_attr = PropertyTable(self, {})
self.tbl_attr.grid(row=r, column=1, columnspan=4, sticky='NESW')
assert r == bottom_row, "Set bottom_row to %d" % r
self. | _build_menu()
def _build_menu(self):
self.menubar = tk.Menu(self)
self.config(menu=self.menubar)
view = tk.Menu(self.menubar, tearoff=0)
view.add_command(label='Undo', command=self.canvas.undo, accele | rator="Ctrl+Z")
self.bind_all("<Control-z>", lambda e: self.canvas.undo()) # Implement accelerator
view.add_command(label='Redo', command=self.canvas.redo)
view.add_separator()
view.add_command(label='Center on node...', command=self.center_on_node)
view.add_separator()
view.add_command(label='Reset Node Marks', command=self.reset_node_markings)
view.add_command(label='Reset Edge Marks', command=self.reset_edge_markings)
view.add_command(label='Redraw Plot', command=self.canvas.replot)
view.add_separator()
view.add_command(label='Grow display one level...', command=self.grow_all)
self.menubar.add_cascade(label='View', menu=view)
def center_on_node(self):
node = NodeDialog(self, "Name of node to center on:").result
if node is None: return
self.canvas.center_on_node(node)
def reset_edge_markings(self):
for u,v,k,d in self.canvas.dispG.edges(data=True, keys=True):
token = d['token']
if token.is_marked:
self.canvas.mark_edge(u,v,k)
def reset_node_markings(self):
for u,d in self.canvas.dispG.nodes(data=True):
token = d['token']
if token.is_marked:
self.canvas.mark_node(u)
def add_node(self, event=None):
node = self.node_entry.get()
if node.isdigit() and self.canvas.dataG.has_node(int(node)):
node = int(node)
if self.canvas.dataG.has_node(node):
self.node_list.insert(tk.END, node)
self.node_entry.delete(0, tk.END)
else:
tkm.showerror("Node not found", "Node '%s' not in graph."%node)
def add_filter(self, event=None, filter_lambda=None):
if filter_lambda is None:
filter_lambda = self.filter_entry.get()
if self.canvas.add_filter(filter_lambda):
# We successfully added the filter; add to list and clear entry
self.filter_list.insert(tk.END, filter_lambda)
self.filter_entry.delete(0, tk.END)
def filter_help(self, event=None):
msg = ("Enter a lambda function which returns True if you wish\n"
"to show nodes with ONLY a given property.\n"
"Parameters are:\n"
" - u, the node's name, and \n"
" - d, the data dictionary.\n\n"
"Example: \n"
" d.get('color',None)=='red'\n"
"would show only red nodes.\n"
"Example 2:\n"
" str(u).is_digit()\n"
"would show only nodes which have a numerical name.\n\n"
"Multiple filters are ANDed together.")
tkm.showinfo("Filter Condition", msg)
def remove_filter(self, event=None):
all_items = self.filter_list.get(0, tk.END)
if event is None:
# When no event passed, this function was called via the "clear"
# button.
items = all_items
else:
# Remove currently selected item
items = (self.filter_list.get(tk.ANCHOR),)
for item in items:
self.canvas.remove_filter(item)
idx = all_items.index(item)
self.filter_list.delete(idx)
all_items = self.filter_list.get(0, tk.END)
def grow_all(self):
"""Grow all visible nodes one level"""
for u, d in self.canvas.dispG.copy().nodes.items():
if not d['token'].is_complete:
self.canvas.grow_node(u)
def get_node_list(self):
"""Get nodes in the node list and clear"""
# See if we forgot to hit the plus sign
if len(self.node_entry.get()) != 0:
self.add_node()
nodes = self.node_list.get(0, tk.END)
self.node_list.delete(0, tk.END)
return nodes
def onBuildNew(self):
nodes = self.get_node_list()
if len(nodes) == 2:
self.canvas.plot_path(nodes[0], nodes[1], levels=self.level)
else:
self.canvas.plot(nodes, levels=self.level)
def onAddToExisting(self):
"""Add nodes to existing plot. Prompt to include link to existing
if possible"""
home_nodes = set(self.get_node_list())
self.canvas.plot_additional(home_nodes, levels=self.level)
def buildNewShortcut(self, event=None):
# Add node intelligently then doe a build new
self.node_entry.event_generate('<Return>') # Resolve current
self.onBuildNew()
def goto_path(self, event):
frm = self.node_entry.get()
to = self.node_entry2.get()
self.node_entry.delete(0, tk.END)
self.node_entry2.delete(0, tk.END)
if frm == '':
tkm.showerror("No From Node", "Please enter a node in both "
"boxes to plot a path. Enter a node in only the first box "
"to bring up nodes immediately adjacent.")
return
if frm.isdigit() and int(frm) in self.canvas.dataG.nodes():
frm = int(frm)
if to.isdigit() and int(to) in self.canvas.dataG.nodes():
to = int(to)
self.canvas.plot_path(frm, to, levels=self.level)
def onNodeSelected(self, node_name, node_dict):
self.tbl_attr.build(node_dict)
self.lbl_attr.config(text="Attributes of node '%s'"%node_name)
def onEdgeSelected(self, edge_name, edge_dict):
self.tbl_attr.build(edge_dict)
self.lbl_attr.config(text="Attributes of edge between '%s' and '%s'"%
edge_name[:2])
@property
def level(self):
try:
l = int(self.level_entry.get())
except ValueError:
tkm.showerror("Invalid Level", "Please specify a level between "
"greater than or equal to 0")
raise
return l
class TkPassthroughViewerApp(ViewerApp):
def __init__(self, graph, **kwargs):
ViewerApp.__init__(self, graph,
NodeTokenClass=TkPassthroughNodeToken,
EdgeTokenClass=TkPassthroughEdgeToken, **kwargs)
class PropertyTable(tk.Frame):
"""A pure Tkinter scrollable frame that actually works!
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
def __init__(self, parent, property_dict, *args, **kw):
tk.Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
self.vscrollbar = vscrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)
vscrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=tk.FALSE)
self.canvas = canvas = tk.Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, e |
ory_id)[0]['isthing']
if not is_thing:
continue
ids_with_ann.append(item['image_id'])
ids_with_ann = set(ids_with_ann)
valid_inds = []
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _pan2json(self, results, outfile_prefix):
"""Convert panoptic results to COCO panoptic json style."""
label2cat = dict((v, k) for (k, v) in self.cat2label.items())
pred_annotations = []
outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
for idx in range(len(self)):
img_id = self.img_ids[idx]
segm_file = self.data_infos[idx]['segm_file']
pan = results[idx]
pan_labels = np.unique(pan)
segm_info = []
for pan_label in pan_labels:
sem_label = pan_label % INSTANCE_OFFSET
# We reserve the length of self.CLASSES for VOID label
if sem_label == len(self.CLASSES):
continue
# convert sem_label to json label
cat_id = label2cat[sem_label]
is_thing = self.categories[cat_id]['isthing']
mask = pan == pan_label
area = mask.sum()
segm_info.append({
'id': int(pan_label),
'category_id': cat_id,
'isthing': is_thing,
'area': int(area)
})
# evaluation script uses 0 for VOID label.
pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID
pan = id2rgb(pan).astype(np.uint8)
mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file))
record = {
'image_id': img_id,
'segments_info': segm_info,
'file_name': segm_file
}
pred_annotations.appen | d(record)
pan_json_results = dict(annotations=pred_annotations)
return pa | n_json_results
def results2json(self, results, outfile_prefix):
"""Dump the panoptic results to a COCO panoptic style json file.
Args:
results (dict): Testing results of the dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.panoptic.json"
Returns:
dict[str: str]: The key is 'panoptic' and the value is
corresponding filename.
"""
result_files = dict()
pan_results = [result['pan_results'] for result in results]
pan_json_results = self._pan2json(pan_results, outfile_prefix)
result_files['panoptic'] = f'{outfile_prefix}.panoptic.json'
mmcv.dump(pan_json_results, result_files['panoptic'])
return result_files
def evaluate_pan_json(self,
result_files,
outfile_prefix,
logger=None,
classwise=False):
"""Evaluate PQ according to the panoptic results json file."""
imgs = self.coco.imgs
gt_json = self.coco.img_ann_map # image to annotations
gt_json = [{
'image_id': k,
'segments_info': v,
'file_name': imgs[k]['segm_file']
} for k, v in gt_json.items()]
pred_json = mmcv.load(result_files['panoptic'])
pred_json = dict(
(el['image_id'], el) for el in pred_json['annotations'])
# match the gt_anns and pred_anns in the same image
matched_annotations_list = []
for gt_ann in gt_json:
img_id = gt_ann['image_id']
if img_id not in pred_json.keys():
raise Exception('no prediction for the image'
' with id: {}'.format(img_id))
matched_annotations_list.append((gt_ann, pred_json[img_id]))
gt_folder = self.seg_prefix
pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
pq_stat = pq_compute_multi_core(matched_annotations_list, gt_folder,
pred_folder, self.categories,
self.file_client)
metrics = [('All', None), ('Things', True), ('Stuff', False)]
pq_results = {}
for name, isthing in metrics:
pq_results[name], classwise_results = pq_stat.pq_average(
self.categories, isthing=isthing)
if name == 'All':
pq_results['classwise'] = classwise_results
classwise_results = None
if classwise:
classwise_results = {
k: v
for k, v in zip(self.CLASSES, pq_results['classwise'].values())
}
print_panoptic_table(pq_results, classwise_results, logger=logger)
return parse_pq_results(pq_results)
def evaluate(self,
results,
metric='PQ',
logger=None,
jsonfile_prefix=None,
classwise=False,
**kwargs):
"""Evaluation in COCO Panoptic protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Only
support 'PQ' at present. 'pq' will be regarded as 'PQ.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to print classwise evaluation results.
Default: False.
Returns:
dict[str, float]: COCO Panoptic style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
# Compatible with lowercase 'pq'
metrics = ['PQ' if metric == 'pq' else metric for metric in metrics]
allowed_metrics = ['PQ'] # todo: support other metrics like 'bbox'
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
outfile_prefix = os.path.join(tmp_dir.name, 'results') \
if tmp_dir is not None else jsonfile_prefix
if 'PQ' in metrics:
eval_pan_results = self.evaluate_pan_json(result_files,
outfile_prefix, logger,
classwise)
eval_results.update(eval_pan_results)
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
def parse_pq_results(pq_results):
"""Parse the Panoptic Quality results."""
result = dict()
result['PQ'] = 100 * pq_results['All']['pq']
result['SQ'] = 100 * pq_results['All']['sq']
result['RQ'] = 100 * pq_results['All']['rq']
result['PQ_th'] = 100 * pq_results['Things']['pq']
result['SQ_th'] = 100 * pq_results['Things']['sq']
result['RQ_th'] = 100 * pq_results['Things']['rq']
result['PQ_st'] = 100 * pq_results['Stuff']['pq']
result['SQ_st'] = 100 * pq_results['Stuff']['sq']
result['RQ_st'] = 100 * pq_results['Stuff']['rq']
return result
def print_panoptic_table(pq_results, classwise_results=None, logger=None):
"""Print the panoptic evaluation results table.
Args:
pq_results |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# =================================================================
# =================================================================
# NOTE: notify message MUST follow these rules:
#
# - Messages must be wrappered with _() for translation
#
# - Replacement variables must be wrappered with brackets
#
# - Replacement variables must be from the following list:'
# {instance_id}
# {instance_name}
# {host_name}
# {source_host_name}
# {target_host_name}
# {error}
from paxes_nova import _
PAUSE_SUCCESS = (_("Pause of virtual machine {instance_name} on host "
"{host_name} was successful."))
PAUSE_ERROR = (_("Pause of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
SUSPEND_SUCCESS = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} was successful."))
SUSPEND_ERROR = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESUME_SUCCESS = (_("Resume of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESUME_ERROR = (_("Resume of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DEPLOY_SUCCESS = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} was successful."))
DEPLOY_ERROR = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
START_SUCCESS = (_("Start of virtual machine {instance_name} on host "
"{host_name} was successful."))
START_ERROR = (_("Start of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
STOP_SUCCESS = | (_("Stop of virtual machine {instance_name} on host "
"{host_name} was successful."))
STOP_ERROR = (_("Stop of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESTART_SUCCESS = (_("Restart of v | irtual machine {instance_name} on host "
"{host_name} was successful."))
RESTART_ERROR = (_("Restart of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
LPM_SUCCESS = (_("Migration of virtual machine {instance_name} from host "
"{source_host_name} to host {target_host_name} was "
"successful."))
LPM_ERROR = (_("Migration of virtual machine {instance_name} to host "
"{target_host_name} failed with exception: {error}"))
LPM_ERROR_DEST = (_("Migration of virtual machine {instance_name} to host "
"{host_name} failed with exception: {error}"))
DELETE_ERROR = (_("Delete of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DELETE_SUCCESS = (_("Delete of virtual machine {instance_name} on host "
"{host_name} was successful. "))
RESIZE_ERROR = (_("Resize of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESIZE_SUCCESS = (_("Resize of virtual machine {instance_name} on host "
"{host_name} was successful."))
CAPTURE_SUCCESS = (_("Capture of virtual machine {instance_name} on host "
"{host_name} was successful"))
CAPTURE_ERROR = (_("Capture of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
ATTACH_SUCCESS = (_("Volume {volume_id} was successfully attached to "
"virtual machine {instance_name}."))
ATTACH_ERROR = (_("Volume {volume_id} could not be attached to "
"virtual machine {instance_name}. Error message: {error}"))
DETACH_SUCCESS = (_("Volume {volume_id} was successfully detached from "
"virtual machine {instance_name}."))
DETACH_ERROR = (_("Volume {volume_id} could not be detached from "
"virtual machine {instance_name}. Error message: {error}"))
|
"""
WSGI config for crowd_server project.
It exposes the WSGI callable as a module-level variable named ``a | pplication``.
Fo | r more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crowd_server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
# -*- coding: utf-8 -*-
# author: Alfred
import os
import re
DB_MODULE_PATTERN = re.compile(r'db2charts_models\.(?P<module>.*)_models')
class DB2ChartsRouter(object):
def db_for_module(self, module):
match = DB_MODULE_PAT | TERN.match(module)
if match:
return match.groupdict()['module']
return None
def db_for_read(self, model, **hints):
return self.db_for_module(model.__module__)
def db_for_write(self, model, **hints):
return self.db_for_module(model.__module__)
def allow_migrate(self, db, app_label, model=None, **hints):
retur | n False |
rror Occured',
500: 'A Serverside Error Occured Handling the Request.',
}
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
# Priorities
class D7SMSPriority(object):
"""
D7 Networks SMS Message Priority
"""
LOW = 0
MODERATE = 1
NORMAL = 2
HIGH = 3
D7NETWORK_SMS_PRIORITIES = (
D7SMSPriority.LOW,
D7SMSPriority.MODERATE,
D7SMSPriority.NORMAL,
D7SMSPriority.HIGH,
)
class NotifyD7Networks(NotifyBase):
"""
A wrapper for D7 Networks Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'D7 Networks'
# The services URL
service_url = 'https://d7networks.com/'
# All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'
# D7 Networks single notification URL
notify_url = 'http://rest-api.d7networks.com/secure/send'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{user}:{password}@{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'user': {
'name': _('Username'),
'type': 'string',
'required': True,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
'required': True,
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'min': D7SMSPriority.LOW,
'max': D7SMSPriority.HIGH,
'values': D7NETWORK_SMS_PRIORITIES,
# The website identifies that the default priority is low; so
# this plugin will honor that same default
'default': D7SMSPriority.LOW,
},
'batch': {
'name': _('Batch Mode'),
'type': 'bool',
'default': False,
},
'to': {
'alias_of': 'targets',
},
'source': {
# Originating address,In cases where the rewriting of the sender's
# address is supported or permitted by the SMS-C. This is used to
# transmit the message, this number is transmitted as the
# originating address and is completely optional.
'name': _('Originating Address'),
'type': 'string',
'map_to': 'source',
},
'from': {
'alias_of': 'source',
},
})
def __init__(self, targets=None, priority=None, source=None, batch=False,
**kwargs):
"""
Initialize D7 Networks Object
"""
super(NotifyD7Networks, self).__init__(**kwargs)
# The Priority of the message
if priority not in D7NETWORK_SMS_PRIORITIES:
self.priority = self.template_args['priority']['default']
else:
self.priority = priority
# Prepare Batch Mode Flag
self.batch = batch
# Setup our source address (if defined)
self.source = None \
if not isinstance(source, six.string_types) else source.strip()
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to noti | fy.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Depending on whether we are set to | batch mode or single mode this
redirects to the appropriate handling
"""
# error tracking (used for function return)
has_error = False
auth = '{user}:{password}'.format(
user=self.user, password=self.password)
if six.PY3:
# Python 3's versio of b64encode() expects a byte array and not
# a string. To accomodate this, we encode the content here
auth = auth.encode('utf-8')
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',
'Authorization': 'Basic {}'.format(base64.b64encode(auth))
}
# Our URL varies depending if we're doing a batch mode or not
url = self.notify_batch_url if self.batch else self.notify_url
# use the list directly
targets = list(self.targets)
while len(targets):
if self.batch:
# Prepare our payload
payload = {
'globals': {
'priority': self.priority,
'from': self.source if self.source else self.app_id,
},
'messages': [{
'to': self.targets,
'content': body,
}],
}
# Reset our targets so we don't keep going. This is required
# because we're in batch mode; we only need to loop once.
targets = []
else:
# We're not in a batch mode; so get our next target
# Get our target(s) to notify
target = targets.pop(0)
# Prepare our payload
payload = {
'priority': self.priority,
'content': body,
'to': target,
'from': self.source if self.source else self.app_id,
}
# Some Debug Logging
self.logger.debug(
'D7 Networks POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('D7 Networks Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
data=dumps(payload),
headers=headers,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.created, requests.codes.ok):
# We had a problem
status_str = \
|
os
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
def _whoami(depth=1):
"""
String of function name to recycle code
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s08.html
Returns
-------
string
Return function name
"""
return sys._getframe(depth).f_code.co_name
def p_gen_script_info(jg : JobGeneration):
return """#
# Generating function: """+_whoami(2)+"""
# Platform: """+get_platform_id()+"""
# Job id: """+jg.getUniqueID()+"""
#
"""
def get_platform_autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "cheyenne_intel"
def get_platform_resources():
"""
Return information about hardware
"""
r = JobPlatformResources()
r.num_cores_per_node = 36
# Physical number of nodes, maybe the limit is different
r.num_nodes = 4032
r.num_cores_per_socket = 18
# 12h limit
r.max_wallclock_seconds = 60*60*12
return r
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
job_id = jg.getUniqueID()
p = jg.parallelization
time_str = p.get_max_wallclock_seconds_hh_mm_ss()
# Available queues:
# premium (only use this in extreme cases)
# regular
# economy
queue = 'economy'
# Use regular queue if we need more than 32 nodes
# Otherwise, the job doesn't seem to be scheduled
if p.num_nodes >= 32:
queue = 'premium'
elif p.num_nodes >= 16:
queue = 'regular'
#
# See https://www.lrz.de/services/compute/linux-cluster/batch_parallel/example_jobs/
#
content = """#! /bin/bash
#
## project code
#PBS -A NCIS0002
#PBS -q """+queue+"""
## wall-clock time (hrs:mins:secs)
#PBS -l walltime="""+time_str+"""
## select: number of nodes
## ncpus: number of CPUs per node
## mpiprocs: number of ranks per node
#PBS -l select="""+str(p.num_nodes)+""":ncpus="""+str(p.num_cores_per_node)+""":mpiprocs="""+str(p.num_ranks_per_node)+""":ompthreads="""+str(p.num_threads_per_rank)+"\n"
#"default": 2301000
#"turbo": 2301000
#"rated": 2300000
#"slow": 1200000
if p.force_turbo_off:
content += "#PBS -l select=cpufreq=2300000\n"
content += """#
#PBS -N """+job_id[0:100]+"""
#PBS -o """+jg.p_job_stdout_filepath+"""
#PBS -e """+jg.p_job_stderr_filepath+"""
#source /etc/profile.d/modules.sh
#module load openmpi
"""+("module load mkl" if jg.compile.mkl==True or jg.compile.mkl=='enable' else "")+"""
"""+p_gen_script_info(jg)+"""
echo
echo "hostname"
hostname
echo
echo
echo "lscpu -e"
lscpu -e
echo
echo
echo "CPU Frequencies (uniquely reduced):"
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u
echo
"""
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
"""
# if jg.compile.sweet_mpi != 'enable':
if True:
#
# https://software.intel.com/en-us/node/522691
if p.core_oversubscription:
if p.core_affinity != None:
if p.core_affinity == 'compact':
content += "export KMP_AFFINITY=granularity=fine,compact\n"
elif p.core_affinity == 'scatter':
content += "export KMP_AFFINITY=granularity=fine,scatter\n"
else:
Exception("Affinity '"+str(p.core_affinity)+"' not supported")
else:
#raise Exception("Please specify core_affinity!")
content += "# No core affinity selected\n"
else:
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "export KMP_AFFINITY=granularity=fine,compact,1,0\n"
elif p.core_affinity == 'scatter':
content += "export KMP_AFFINITY=granularity=fine,scatter\n"
else:
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
else:
#raise Exception("Please specify core_affinity!")
content += "# No core affinity selected\n"
if p.core_affinity != None:
content += "export KMP_AFFINITY=\"verbose,$KMP_AFFINITY\"\n"
re | turn content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
content += """
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
"""
return content
def | jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
mpiexec = ""
#
# Only use MPI exec if we are allowed to do so
# We shouldn't use mpiexec for validation scripts
#
if not p.mpiexec_disabled:
# Use mpiexec_mpt for Intel MPI
#mpiexec = "mpiexec_mpt -n "+str(p.num_ranks)
# Use mpiexec for GNU
if jg.compile.sweet_mpi == 'enable':
mpiexec = "mpiexec_mpt -n "+str(p.num_ranks)
mpiexec += " omplace "
mpiexec += " -nt "+str(p.num_threads_per_rank)+" "
mpiexec += " -tm intel"
mpiexec += " -vv"
if mpiexec[-1] != ' ':
mpiexec += ' '
#
# Fix the mess on Cheyenne!
#
# We prefix the current LD_LIBRARY_PATH with the one from the shell where the job was submitted
# This is required since Cheyenne scripts mess around with the existing path in a way
# which results in e.g. the system-wide installed fftw to be loaded.
#
# What we basically accomplish here is to suggest to really first
# lookup the MULE local_software/local/lib directory, then the system libraries
#
sweet_ld_library_path = os.getenv('MULE_LD_LIBRARY_PATH')
if sweet_ld_library_path == None:
raise Exception("Environment variable MULE_LD_LIBRARY_PATH not found!")
content = """
# Make sure that MULE library path is really known
export LD_LIBRARY_PATH=\""""+sweet_ld_library_path+""":$LD_LIBRARY_PATH\"
echo
echo "LD_LIBRARY_PATH"
echo "${LD_LIBRARY_PATH}"
echo
echo
echo "ldd"
ldd $EXEC
echo
E=\""""+mpiexec+"""${EXEC} ${PARAMS}\"
echo
echo "Executing..."
echo "$E"
$E || exit 1
"""
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = """
echo
echo "CPU Frequencies (uniquely reduced):"
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u
echo
"""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = ""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
content = """
SCONS="scons """+jg.compile.g |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<lang>[a-z]{2})?$', views.index, name='index'),
url(r'^sign/$', views.sig | n, name='sign'),
url(r'^c | onfirm/([0-9a-z]{64})/$', views.confirm, name='confirm'),
]
|
: None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
'randomSP': 0,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : False,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '0',
},
'anomalyParams': {
u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None
},
'trainSPNetOnlyIfRequested': False,
},
'dataSource': 'fillIn | BySubExperiment',
'errorMetric': 'fillInBySubExperiment'
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is | not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'grok',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : { u'info': u'testSpatialClassification',
u'streams': [ { u'columns': [u'*'],
u'info': u'spatialClassification',
u'source': config['dataSource']}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'classification', u'predictionSteps': [0]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field='classification', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': config['errorMetric'],
'window': 100,
'steps': 0}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the |
se:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = data_flow_ops.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
h | as_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
# The following two functions are copied from
# tensorflow/python/eager/backprop.py. We do not directly use them as they are
# not exported a | nd subject to change at any time.
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(grad.indices,
g.indices),
g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
"""Aggregates gradients containing `IndexedSlices`s."""
if len(grads) < 1:
return None
elif len(grads) == 1:
return grads[0]
else:
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a de |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_context import context
CONF = cfg.CONF
class RequestContext(context.RequestContext):
"""User security context object
Stores information about the security context under which the user
accesses the system | , as well as additional request information.
"""
def __init__(self, project=None, **kwargs):
if project:
kwargs['tenant'] = project
self.project = project
super(RequestContext, self).__init__(**kwargs)
def to_dict(self):
out_dict = super(RequestContext, self).to_dict()
out_dict['roles'] = self.roles
if out_dict.get('tenant'):
out_dict['project'] = out_dict['tenant']
out_dict.pop('tenant')
return o | ut_dict
@classmethod
def from_dict(cls, values):
return cls(**values)
def get_context():
"""A helper method to get a blank context (useful for tests)."""
return RequestContext(user_id=None,
project_id=None,
roles=[],
is_admin=False,
overwrite=False)
|
2, blank=True)),
('estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_pibic', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('parceria', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('cadastro', ['Pesquisa'])
# Adding model 'Extensao'
db.create_table('cadastro_extensao', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.CharField')(max_length=20)),
('financiador', self.gf('django.db.models.fields.CharField')(max_length=20)),
('estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_pibex', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('parceria', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('cadastro', ['Extensao'])
# Adding model 'Atividade'
db.create_table('cadastro_atividade', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('docente', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cadastro.Docente'])),
('afastamento', self.gf('django.db.models.fields.BooleanField')(default=True)),
('cargo', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('comissoes', self.gf('django.db.models.fields.IntegerField')()),
('semestre', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('cadastro', ['Atividade'])
# Adding M2M table for field disciplinas on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_disciplinas')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('disciplina', models.ForeignKey(orm['cadastro.disciplina'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'disciplina_id'])
# Adding M2M table for field pesquisa on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_pesquisa')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('pesquisa', models.ForeignKey(orm['cadastro.pesquisa'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'pesquisa_id'])
# Adding M2M table for field extensao on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_extensao')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('extensao', models.ForeignKey(orm['cadastro.extensao'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'extensao_id'])
def backwards(self, orm):
# Deleting model 'Docente'
db.delete_table('cadastro_docente')
# Deleting model 'Disciplina'
db.delete_table('cadastro_disciplina')
# Deleting model 'Pesquisa'
db.delete_table('cadastro_pesquisa')
# Deleting model 'Extensao'
db.delete_table('cadastro_extensao')
# Deleting model 'Atividade'
db.delete_table('cadastro_atividade')
# Removing M2M table for field disciplinas on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_disciplinas'))
# Removing M2M table for field pesquisa on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_pesquisa'))
# Removing M2M table for field extensao on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_extensao'))
models = {
'cadastro.atividade': {
'Meta': {'object_name': 'Atividade'},
'afastamento': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cargo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'comissoes': ('django.db.models.fields.IntegerField', [], {}),
'disciplinas': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Disciplina']", 'symmetrical': 'False'}),
'docente': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Docente']"}),
'extensao': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Extensao']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pesquisa': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Pesquisa']", 'symmetrical': 'False'}),
'semestre': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'cadastro.disciplina': {
'Meta': {'object_name': 'Disciplina'},
'cargahoraria': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
| 'estudantes': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multicampia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nivel' | : ('django.db.models.fields.CharField', [], {'max_length': '11'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '11'})
},
'cadastro.docente': {
'Meta': {'object_name': 'Docente'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matricula': ('django.db.models.fields.CharField', [], {'max_length': '7', 'unique': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True'})
},
'cadastro.extensao': {
'Meta': {'object_name': 'Extensao'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'bolsistas_pibex': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'bolsistas_ppq': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'estudantes_graduacao': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'estudantes_pos': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'financiador': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parceria': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parceria_inter': ('django.db.models.fields.CharField', [] |
ams)
return estimator
def log_and_get_hooks(eval_batch_size):
"""Convenience function for hook and logger creation."""
# Create hooks that log information about the training and metric values
train_hooks = hooks_helper.get_train_hooks(
FLAGS.hooks,
model_dir=FLAGS.model_dir,
batch_size=FLAGS.batch_size, # for ExamplesPerSecondHook
tensors_to_log={"cross_entropy": "cross_entropy"}
)
run_params = {
"batch_size": FLAGS.batch_size,
"eval_batch_size": eval_batch_size,
"number_factors": FLAGS.num_factors,
"hr_threshold": FLAGS.hr_threshold,
"train_epochs": FLAGS.train_epochs,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name="recommendation",
dataset_name=FLAGS.dataset,
run_params=run_params,
test_id=FLAGS.benchmark_test_id)
return benchmark_logger, train_hooks
def parse_flags(flags_obj):
"""Convenience function to turn flags into params."""
num_gpus = flags_core.get_num_gpus(flags_obj)
num_devices = FLAGS.num_tpu_shards if FLAGS.tpu else num_gpus or 1
batch_size = (flags_obj.batch_size + num_devices - 1) // num_devices
eval_divisor = (rconst.NUM_EVAL_NEGATIVES + 1) * num_devices
eval_batch_size = flags_obj.eval_batch_size or flags_obj.batch_size
eval_batch_size = ((eval_batch_size + eval_divisor - 1) //
eval_divisor * eval_divisor // num_devices)
return {
"train_epochs": flags_obj.train_epochs,
"batches_per_step": num_devices,
"use_seed": flags_obj.seed is not None,
"batch_size": batch_size,
"eval_batch_size": eval_batch_size,
"learning_rate": flags_obj.learning_rate,
"mf_dim": flags_obj.num_factors,
"model_layers": [int(layer) for layer in flags_obj.layers],
"mf_regularization": flags_obj.mf_regularization,
"mlp_reg_layers": [float(reg) for reg in flags_obj.mlp_regularization],
"num_neg": flags_obj.num_neg,
"num_gpus": num_gpus,
"use_tpu": flags_obj.tpu is not None,
"tpu": flags_obj.tpu,
"tpu_zone": flags_obj.tpu_zone,
"tpu_gcp_project": flags_obj.tpu_gcp_project,
"beta1": flags_obj.beta1,
"beta2": flags_obj.beta2,
"epsilon": flags_obj. | epsilon,
"match_mlperf": flags_obj.ml_perf,
"use_xla_for_gpu": flags_obj.use_xla_for_gpu,
"epochs_between_evals": FLAGS.epochs_between_evals,
} |
def main(_):
with logger.benchmark_context(FLAGS), \
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
def run_ncf(_):
"""Run NCF training and eval loop."""
if FLAGS.download_if_missing and not FLAGS.use_synthetic_data:
movielens.download(FLAGS.dataset, FLAGS.data_dir)
if FLAGS.seed is not None:
np.random.seed(FLAGS.seed)
params = parse_flags(FLAGS)
total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals
if FLAGS.use_synthetic_data:
producer = data_pipeline.DummyConstructor()
num_users, num_items = data_preprocessing.DATASET_TO_NUM_USERS_AND_ITEMS[
FLAGS.dataset]
num_train_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
num_eval_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
else:
num_users, num_items, producer = data_preprocessing.instantiate_pipeline(
dataset=FLAGS.dataset, data_dir=FLAGS.data_dir, params=params,
constructor_type=FLAGS.constructor_type,
deterministic=FLAGS.seed is not None)
num_train_steps = (producer.train_batches_per_epoch //
params["batches_per_step"])
num_eval_steps = (producer.eval_batches_per_epoch //
params["batches_per_step"])
assert not producer.train_batches_per_epoch % params["batches_per_step"]
assert not producer.eval_batches_per_epoch % params["batches_per_step"]
producer.start()
params["num_users"], params["num_items"] = num_users, num_items
model_helpers.apply_clean(flags.FLAGS)
estimator = construct_estimator(model_dir=FLAGS.model_dir, params=params)
benchmark_logger, train_hooks = log_and_get_hooks(params["eval_batch_size"])
target_reached = False
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_LOOP)
for cycle_index in range(total_training_cycle):
assert FLAGS.epochs_between_evals == 1 or not mlperf_helper.LOGGER.enabled
tf.logging.info("Starting a training cycle: {}/{}".format(
cycle_index + 1, total_training_cycle))
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_EPOCH,
value=cycle_index)
train_input_fn = producer.make_input_fn(is_training=True)
estimator.train(input_fn=train_input_fn, hooks=train_hooks,
steps=num_train_steps)
tf.logging.info("Beginning evaluation.")
eval_input_fn = producer.make_input_fn(is_training=False)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_START,
value=cycle_index)
eval_results = estimator.evaluate(eval_input_fn, steps=num_eval_steps)
tf.logging.info("Evaluation complete.")
hr = float(eval_results[rconst.HR_KEY])
ndcg = float(eval_results[rconst.NDCG_KEY])
loss = float(eval_results["loss"])
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_TARGET,
value={"epoch": cycle_index, "value": FLAGS.hr_threshold})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_ACCURACY,
value={"epoch": cycle_index, "value": hr})
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_HP_NUM_NEG,
value={"epoch": cycle_index, "value": rconst.NUM_EVAL_NEGATIVES})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_STOP, value=cycle_index)
# Benchmark the evaluation results
benchmark_logger.log_evaluation_result(eval_results)
# Log the HR and NDCG results.
tf.logging.info(
"Iteration {}: HR = {:.4f}, NDCG = {:.4f}, Loss = {:.4f}".format(
cycle_index + 1, hr, ndcg, loss))
# If some evaluation threshold is met
if model_helpers.past_stop_threshold(FLAGS.hr_threshold, hr):
target_reached = True
break
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_STOP,
value={"success": target_reached})
producer.stop_loop()
producer.join()
# Clear the session explicitly to avoid session delete error
tf.keras.backend.clear_session()
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_FINAL)
def define_ncf_flags():
"""Add flags for running ncf_main."""
# Add common flags
flags_core.define_base(export_dir=False)
flags_core.define_performance(
num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=True,
max_train_steps=False,
dtype=False,
all_reduce_alg=False
)
flags_core.define_device(tpu=True)
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(
model_dir="/tmp/ncf/",
data_dir="/tmp/movielens-data/",
train_epochs=2,
batch_size=256,
hooks="ProfilerHook",
tpu=None
)
# Add ncf-specific flags
flags.DEFINE_enum(
name="dataset", default="ml-1m",
enum_values=["ml-1m", "ml-20m"], case_sensitive=False,
help=flags_core.help_wrap(
"Dataset to be trained and evaluated."))
flags.DEFINE_boolean(
name="download_if_missing", default=True, help=flags_core.help_wrap(
"Download data to data_dir if it is not already present."))
flags.DEFINE_integer(
name="eval_batch_size", default=None, help=flags_core.help_wrap(
"The batch size used for evaluation. This should generally be larger"
"than the training batch size as the lack of back propagation during"
"evaluation can allow for larger batch sizes to fit in memory. If not"
"specified, the training batch size (--batch_size) will be used."))
flags.DEFINE_integer(
name="num_factors", default=8,
help=flags_core.help_wrap("The Embedding size of MF model."))
# Set the default as a list of strings to be consiste |
#!/usr/bin/env python
# coding:utf-8 vi:et:ts=2
# parabridge persistent settings module.
# Copyright 2013 Grigory Petrov
# See LICENSE for details.
import xmlrpclib
import socket
import sqlite3
import uuid
import info
SQL_CREATE = """
CREATE TABLE IF NOT EXISTS task (
guid TEXT UNIQUE,
name TEXT UNIQUE,
src TEXT,
dst TEXT);
CREATE TABLE IF NOT EXISTS index_last (
guid TEXT,
file TEXT,
index_last INTEGER);
"""
SQL_TASK_ADD = """INSERT INTO task (guid, name, src, dst)
VALUES (:guid, :name, :src, :dst)"""
SQL_TASK_LIST = """SELECT * FROM task"""
SQL_TASK_DEL_BY_NAME = """DELETE FROM task WHERE name = :name"""
SQL_TASK_GUID_BY_NAME = """SELECT guid FROM task WHERE name = :name"""
SQL_INDEX_LAST_DEL = """DELETE FROM index_last WHERE guid = :guid"""
SQL_INDEX_LAST_UPDATE = """UPDATE index_last SET index_last = :index_last
WHERE guid = :guid AND file = :file"""
SQL_INDEX_LAST_ADD = """INSERT INTO index_last (guid, file, index_last)
VALUES (:guid, :file, :index_last)"""
SQL_INDEX_LAST_GET = """SELECT index_last FROM index_last WHERE
guid = :guid AND file = :file"""
class Settings( object ):
def __init__( self ):
self._init_f = False
self._notify_f = False
def init( self, f_notify = False ):
self._notify_f = f_notify
self._init_f = True
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.executescript( SQL_CREATE )
## Notify daemon process so it can read updated settings.
def notifyIfNeeded( self ):
if not self._notify_f:
return
try:
xmlrpclib.ServerProxy( info.COMM_ADDR ).cfg_changed()
except socket.error:
pass
def taskAdd( self, s_name, s_src, s_dst ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
try:
mValues = {
'guid': str( uuid.uuid4() ),
'name': s_name,
'src': s_src,
'dst': s_dst }
oConn.execute( SQL_TASK_ADD, mValues )
except sqlite3.IntegrityError:
## Name not unique.
return False
else:
return True
finally:
self.notifyIfNeeded()
def indexLastSet( self, s_guid, s_file, n_index ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
mArgs = {
'guid': s_guid,
'file': s_file,
'index_last': n_index }
oRet = oConn.execute( SQL_INDEX_LAST_UPDATE, mArgs )
if oRet.rowcount > 0:
return
## No record for guid and name pair: add one.
oConn.execute( SQL_INDEX_LAST_ADD, mArgs )
def indexLastGet( self, s_guid, s_file ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.row_factory = sqlite3.Row
mArgs = { 'guid': s_guid, 'file': s_file }
lRet = oConn.execute( SQL_INDEX_LAST_GET, mArgs ).fetchall()
if 0 == len( lRet ):
return None
if len( lRet ) > 1:
raise Exception( "Consistency error." )
return lRet[ 0 ][ 'index_last' ]
def taskDelByName( self, s_name ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.row_factory = sqlite3.Row
try:
mArgs = { 'name': s_name }
oRow = oConn.execute( SQL_TASK_GUID_BY_NAME, mArgs ).fetchone()
if oRow is No | ne:
return False
mArgs[ 'guid' ] = oRow[ 'guid' ]
oRet = oConn.execute( SQL_TASK_DEL_BY_NAME, mArgs )
if | 0 == oRet.rowcount:
raise Exception( "Consistency error" )
oConn.execute( SQL_INDEX_LAST_DEL, mArgs )
return True
finally:
self.notifyIfNeeded()
def taskList( self ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
try:
oConn.row_factory = sqlite3.Row
return oConn.execute( SQL_TASK_LIST ).fetchall()
finally:
self.notifyIfNeeded()
instance = Settings()
|
from bt_proximity import BluetoothRSSI
import time
import sys
import datetime
#////////////////////////////////
BT_ADDR = 'xx:xx:xx:xx:xx:xx'#/// Enter your bluetooth address here!
#////////////////////////////////
# ----------------------- DO NOT EDIT ANYTHING BELOW THIS LINE --------------------------- #
def write(records, count):
f = open("test_records.txt", "a+") # open records for append. If not present create
for i in range(count): # write out each record
f.write(str(records[i][0]) + "," + str(records[i][1]) + '\n')
f.close()
def time_diff(start_time):
current_time = datetime.datetime.now() # get current time
diff = (current_time - start_time).total_seconds() # get difference of startime and current time
return str(round(diff,2))
def main(start_time):
records = [] | # initialize array of records
count = 0 # initialize count
addr = BT_ADDR # assign BT_ADDR
num = 10 # amount of records to be recorded
while(count < num):
btrssi = BluetoothRSSI(add | r=addr)
time_e = time_diff(start_time) # get seconds elapsed
record = (btrssi.get_rssi(), time_e) # create record
records.append(record) # add record to records array
count += 1
time.sleep(.5) # wait time to get next record
write(records, count) # write out records
if __name__ == '__main__':
main()
|
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from Plugins.Plugin import PluginDescriptor
def getUpgradeVersion():
import os
try:
r = os.popen("fpupgrade --version").read()
except IOError:
return None
if r[:16] != "FP update tool v":
return None
else:
return int(r[16:17])
class FPUpgrade(Screen):
skin = """
<screen position="150,200" size="450,200" title="FP upgrade required" >
<widget name="text" position="0,0" size="550,50" font="Regular;20" />
<widget name="oldversion_label" position="10,100" size="290,25" font="Regular;20" />
<widget name="newversion_label" position="10,125" size="290,25" font="Regular;20" />
<widget name="oldversion" position="300,100" size="50,25" font="Regular;20" />
<widget name="newversion" position="300,125" size="50,25" font="Regular;20" />
</screen>"""
def __init__(self, session):
self.skin = FPUpgrade.skin
Screen.__init__(self, session)
from Tools.StbHardware import getFPVersion
version = str(getFPVersion() or "N/A")
newversion = str(getUpgradeVersion() or "N/A")
self["text"] = Label(_("Your frontprocessor firmware must be upgraded.\nPress OK to start upgrade."))
self["oldversion_label"] = Label(_("Current version:"))
self["newversion_label"] = Label(_("New version:"))
self["oldversion"] = Label(version)
self["newversion"] = Label(newversion)
| self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.ok,
"cancel": self.close,
})
def ok(self):
self.close(4)
class SystemMessage(Screen):
skin = """
<screen position="150,200" size="450,200" title="System Message" >
<widget source="text" position="0,0" size="450,200" font="Regular;20" halign="center" valign="center" render="Label" />
<ePixmap pixmap="icons/input_error.png" position="5, | 5" size="53,53" alphatest="on" />
</screen>"""
def __init__(self, session, message):
from Components.Sources.StaticText import StaticText
Screen.__init__(self, session)
self["text"] = StaticText(message)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.ok,
})
def ok(self):
self.close()
def Plugins(**kwargs):
from Tools.StbHardware import getFPVersion
version = getFPVersion()
newversion = getUpgradeVersion() or 0
list = []
if version is not None and version < newversion:
list.append(PluginDescriptor(name=_("FP Upgrade"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = True, fnc=(8, FPUpgrade)))
try:
msg = open("/proc/stb/message").read()
list.append(PluginDescriptor(name=_("System Message Check"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = True, fnc=(9, SystemMessage, msg)))
except:
pass
return list
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WAR | RANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for | more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import pwd
from django.db import transaction
from django.contrib.auth.models import User as DjangoUser
from storageadmin.models import User
from system import users
@transaction.atomic
def change_password(username, password):
try:
duser = DjangoUser.objects.get(username=username)
duser.set_password(password)
duser.save()
except:
sys.exit('username: %s does not exist in the admin database' %
username)
try:
User.objects.get(username=username)
except:
sys.exit('username: %s does not exist in the database' % username)
try:
pwd.getpwnam(username)
except KeyError:
sys.exit('username: %s does not exist in the system' % username)
try:
users.usermod(username, password)
users.smbpasswd(username, password)
except:
sys.exit('Low level error occured while changing password of user: %s'
% username)
def main():
if (len(sys.argv) < 3 or
(len(sys.argv) > 1 and sys.argv[1] == '-h')):
sys.exit('Usage: pwreset <username> <new_password>')
try:
change_password(sys.argv[1], sys.argv[2])
except:
sys.exit('Error changing password for user: %s. Check the username '
'and try again.' % sys.argv[1])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.views.i18n import JavaScriptCatalog
from demo.apps.app import application
js_info_dict = {
'packages': ('base', ),
}
urlpatterns = [
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript_catalog'),
# Admin
url(r'^' + settings.ADMIN_URL, admin.site.urls),
# Apps
url(r'', include(application.urls)),
]
if settings.DEBUG:
# Add the Debug T | oolbar’s URLs to the project’s URLconf
import debug_toolbar
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)), ]
# In DEBUG mode, serve media files through Django.
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views import static
urlpatterns += staticfil | es_urlpatterns()
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += [
url(r'^%s/(?P<path>.*)$' % media_url, static.serve,
{'document_root': settings.MEDIA_ROOT}),
]
|
"""
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP, STATE_ON)
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynetio==0.1.6']
_LOGGER = logging.getLogger(__name__)
ATTR_START_DATE = 'start_date'
ATTR_TOTAL_CONSUMPTION_KWH = 'total_energy_kwh'
CONF_OUTLETS = 'outlets'
DEFAULT_PORT = 1234
DEFAULT_USERNAME = 'admin'
DEPENDENCIES = ['http']
Device = namedtuple('device', ['netio', 'entitie | s'])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = '/api/netio/{host}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.str | ing,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Netio platform."""
from pynetio import Netio
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if not DEVICES:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all Netio switches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(
DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_entities(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = 'api:netio'
@callback
def get(self, request, host):
"""Request handler."""
hass = request.app['hass']
data = request.query
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
hass.async_add_job(dev.async_update_ha_state())
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a Netio linked switch."""
def __init__(self, netio, outlet, name):
"""Initialize the Netio switch."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Return the device's name."""
return self._name
@property
def available(self):
"""Return true if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self, **kwargs):
"""Turn switch on."""
self._set(True)
def turn_off(self, **kwargs):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = '1' if value else '0'
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self.outlet - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return the switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Update the state."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Return the total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
|
#!/usr/bin/env python3.7
from multiprocessing import Process
import time
import os
from printerState import main as printerStateMain
from server import main as serverMain
from websocket import main as websocketServerMain
servicesTemplate = {
'server': {
'name': 'Server',
'run': serverMain,
'running': False
},
'printerState': {
'name': 'Printer State',
'run': printerStateMain,
'running': False
},
'websocketServer': {
'name': 'Websocket server',
'run': websocketServerMain,
'running': False
}
}
class ServiceManager:
def __init__(self, services, autoStart=False):
self.log('Creating processes')
self.services = services
for serviceName in services:
newProcess = Process(target=self.services[serviceName]['run'])
newProcess.daemon = True
self.services[serviceName]['process'] = newProcess
if (autoStart):
newProcess.start()
self.log('Creating and starting process for {0} with pid {1}'.format(self.services[serviceName]['name'], newProcess.pid))
self.services[serviceName]['running'] = True
else:
self.log('Creating process for {0}'.format(self.services[serviceName]['name']))
self.services[serviceName]['running'] = False
def updateServiceState(self):
servicesRunning = []
servicesStopped = []
for serviceName in self.services:
self.services[serviceName]['running'] | = se | lf.services[serviceName]['process'].is_alive()
if(self.services[serviceName]['running']):
servicesRunning.append(self.services[serviceName]['name'])
else:
servicesStopped.append(self.services[serviceName]['name'])
if(len(servicesStopped) != 0):
self.log('Services stopped: {0}'.format(','.join(servicesStopped)))
def restartStoppedServices(self):
for serviceName in self.services:
if (not self.services[serviceName]['running']):
self.startService(serviceName)
def startService(self, serviceName):
if(self.services[serviceName]['running']):
self.log('Cant start service which is already running', 'warning')
else:
self.services[serviceName]['process'].terminate()
self.services[serviceName]['process'] = Process(target=self.services[serviceName]['run'])
self.services[serviceName]['process'].start()
self.log('Creating and starting process for {0} with pid {1}'.format(
self.services[serviceName]['name'],
self.services[serviceName]['process'].pid))
self.services[serviceName]['running'] = True
def loop(self):
while True:
self.updateServiceState()
self.restartStoppedServices()
time.sleep(4)
def log(self, message, level='info'):
print('{0}-[Service Manager][{2}] {1}'.format(round(time.time()), message, level))
def main():
services = ServiceManager(servicesTemplate, autoStart=True)
services.loop()
if __name__ == '__main__':
main()
|
nd(OPEN_COMMAND) # epub 열기
def on_post_save(self, view):
if not get_setting('auto_save'):
return
view.run_command(SAVE_COMMAND) # epub 저장
###
### TextCommand
###
class EpubMakerOpenCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return is_valid_format(self.view.file_name())
def run(self, edit):
def extract(workpath, namelist):
os.makedirs(workpath)
for name in namelist:
filepath = os.path.join(workpath, name)
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname): # 디렉토리가 존재하지 않는지
os.makedirs(dirname)
if os.path.isdir(filepath): # 디렉토리인지
continue
else:
with open(filepath, 'wb') as descriptor:
descriptor.write(epub.read(name))
def close_views(workpath, namelist):
activewindow = sublime.active_window()
activeview = activewindow.active_view()
for name in namelist:
if name.startswith(workpath): # 절대경로 인지
filepath = name
else:
filepath = os.path.join(workpath, name)
for window in sublime.windows():
for view in window.views():
if view.file_name() == filepath:
view.set_scratch(True)
window.focus_view(view)
window.run_command('close_file')
break
activewindow.focus_view(activeview)
def close_folders(workpath):
for window in sublime.windows():
for folder in window.folders():
if folder == workpath:
window.run_command('remove_folder', {'dirs': [folder]})
break
window.run_command('refresh_folder_list')
# 압축 해제
epubpath = self.view.file_name()
try:
epub = zipfile.ZipFile(epubpath)
except Exception as e:
sublime.error_message('압축을 해제하는 중 오류가 발생했습니다')
print(PACKAGE_NAME + ':open: \'' + epubpath + '\'의 압축을 해제하는 중 오류가 발생했습니다')
return
# workspace 생성
global WORKSPACES_PATH
workpath = os.path.join(WORKSPACES_PATH, os.path.splitext(os.path.basename(epubpath))[0])
namelist = epub.namelist()
close_views(workpath, namelist + [get_sumblime_project_path(workpath), get_epub_identifier_path(workpath), get_epub_summary_path(workpath), get_preview_path(workpath)])
close_folders(workpath)
if not os.path.exists(workpath):
extract(workpath, namelist)
elif not sublime.ok_cancel_dialog('이전에 작업하던 ePub입니다.\n이어서 작업하시겠습니까?'):
shutil.rmtree(workpath)
extract(workpath, namelist)
# 프로젝트 파일 생성
idpath = create_epub_identifier(workpath, epubpath)
projectpath = create_sublime_project(workpath)
summarypath = create_epub_summary(workpath, epubpath)
# epub 뷰 닫음
view = self.view
window = view.window()
view.set_scratch(True)
window.focus_view(view)
window.run_command('close_file')
# 생성된 프로젝트 오픈
if is_windows():
sumlpath = os.path.join(os.path.dirname(sublime.__file__), 'subl.exe')
else:
sumlpath = os.path.join(os.path.dirname(os.path.dirname(sublime.__file__)), 'SharedSupport', 'bin', 'subl')
cmd = '"' + sumlpath + '" --project "' + projectpath + '" --add "' + summarypath + '"'
if get_setting('new_window'):
cmd += ' --new-window'
subprocess.Popen(cmd, shell=True)
window.run_command('refresh_folder_list')
sublime.status_message('Opend ePub ' + epubpath)
print(PACKAGE_NAME + ':open: \'' + epubpath + '\' -> \'' + workpath + '\'')
class EpubMakerSaveCommand(sublime_plugin.TextCommand):
def run(self, edit):
workpath = get_work_path(self.view)
if workpath is None:
return
# epub-identifier 찾기
idpath = get_epub_identifier_path(workpath)
if not os.path.exists(idpath):
sublime.error_message('\'' + idpath + '\'를 찾을 수 없습니다')
print(PACKAGE_NAME + ':save: \'' + idpath + '\'를 찾을 수 없습니다')
return
if get_setting('require_confirm_save'):
if not sublime.ok_cancel_dialog('변경된 내용을 ePub에도 반영 하시겠습니까?'):
return
# epub-identifier 읽기
idfile = open(idpath, 'r')
epubid = json.loads(idfile.read())
idfile.close()
epubpath = None
if get_setting('overwite_original'):
epubpath = epubid['src_path']
if not epubpath is None and get_setting('backup_original'):
def backup(path):
try:
shutil.copy(path, set_extension(path, get_setting('backup_extension')))
except Exception as e:
sublime.error_message('\'' + epubpath + '\'을 백업하는 중 오류가 발생했습니다')
print(PACKAGE_NAME + ':save: \'' + epubpath + '\'을 백업하는 중 오류가 발생했습니다')
backup(epubpath)
if epubpath is None:
epubpath = set_extension(os.path.join(workpath, '..', os.path.basename(workpath)), 'epub')
epub = zipfile.ZipFile(epubpath, 'w')
# ePub OCF에 따라 mimetype을 제일 먼저 압축없이 압축파일에 포함
epub.writestr('mimetype', 'application/epub+zip', zipfile.ZIP_STORED)
# 이후 디렉토리와 파일을 추가
for root, dirs, files in os.walk(workpath):
if root == workpath:
continue
epub.write(root, root[len(workpath + os.sep):], zipfile.ZIP_STORED)
for f in files:
if is_ignore_file(f) or f == 'mimetype' or f.startswith(PREVIEW_PREFIX):
continue
f = os.path.join(root, f)
epub.write(f, f[len(workpath + os.sep):], zipfile.ZIP_DEFLATED)
epub.close()
sublime.status_message('Saved ePub ' + epubpath)
print(PACKAGE_NAME + ':save: \'' + epubpath + '\'')
class EpubMakerPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit):
workpath = get_work_path(self.view)
if workpath is None:
return
filename = self.view.file_name()
if not is_valid_format(filename, ['html', 'htm', 'xhtml', 'xhtm']):
return
previewfile = open(get_resource_path('preview.html'), 'r')
preview = previewfile.read()
previewfile.close()
preview = preview.replace('#EPUB_NAME#', os.path.basename(workpath))
preview = preview.replace('#EPUB_SPINE_NAME#', os.path.basename(filename))
preview = preview.replace('#EPUB_SPINE_PATH#', filename.replace(workpath + os.sep, ''))
previewpath = get_preview_path(workpath)
with codecs.open(previewpath, 'w', 'utf-8') as html:
html.write(preview)
html.close()
sublime.active_window().run_command('side_bar_open_in_browser', {'browser': 'chromium', 'paths': [previewpath], 'type': 'testing'})
###
### Global Def (utility)
###
def get_platform_name():
return sublime.platform()
def is_windows():
return get_platform_name().startswith('windows')
def is_osx():
return get_platform_name().startswith('osx')
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def set_extension(path=None, extension=None):
if path is None or extension is None:
return None
else:
return path + '.' + extension
def is_valid_format(filename=None, extensions=['epub']):
if filename is None or '.' not in filename:
return False
else:
return filename.rsplit('.', 1)[1] in extensions
def is_ignore_file(filename=None):
if filename is None:
return True
elif is_valid_format(filename, IGNORE_EXTENSIONS):
return True
else:
return False
def get_setting(key):
return SETTINGS[key];
def load_settings():
settings = sublime.load_settings(PACKAGE_NAME + '.s | ublime-settings')
SETTINGS['new_window'] = settings.get('new_window', True)
SETTINGS['auto_save'] = settings.get('auto_save', False)
SETTINGS['require_confirm_save'] = settings.get('require_confirm_save', False)
SETTINGS['overwite_original'] = settings.get('overwite_original', True)
SETTINGS['backup_original'] = settings.get('backup_original', Tru | e)
SETTINGS['backup_extension'] = settings.get('backup_extension', 'back')
# workpath: 할당된 작업 경로
def create_sublime_project(workpath):
if not os.path.exists(workpath):
return None
else:
projectpath = get_sumblime_project_path(workpath)
with codecs.open(projectpath, 'w', 'utf-8') as project:
project.write(json.dumps({"folders": [{"path": workpath}]}, sort_keys=True, indent=4, separators=(',', ': ')))
project.close()
return projectpath
def get_sumblime_project_path(workpath):
return set_extension(os.path.join(workpath, os.path.basename(workpath)), PROJECT_EXTENSION)
# workpath: 할당된 작업 경로
# epubpath: 원본 ePub 파일의 경로
def create_epub_identifier(workpath, epubpath):
if not os.path.exists(workpath):
return None
else:
idpath = get_epub_identifier_path(workpath)
with codecs.open(idpath, 'w', |
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itk
itk.auto_progress(2)
n_channels = 31
# Verify UC addition operation
vector_type = itk.VariableLengthVector[itk.UC]
vector1 = vector_type(n_channels)
vector2 = vector_type(n_channels)
assert len(vector1) == n_channels and len(vector2) == n_channels
vector1.Fill(16)
for idx in range(n_channels):
vector2[idx] = idx
sum = vector1 + vector2
print(f'UC sum: {sum}')
for idx in range(n_channels):
assert sum[idx] == 16 + idx, "Got unexpected result from vector sum"
# Verify float addition operation |
vector_float_type = itk.VariableLengthVector[itk.F]
vector3 = vector_float_type(n_channels)
vector4 = vector_float_type(n_channels)
assert len(vector | 3) == n_channels and len(vector4) == n_channels
vector3.Fill(0.5)
for idx in range(n_channels):
vector4.SetElement(idx, 0.1 * idx)
float_sum = vector3 + vector4
print(f'float sum: {float_sum}')
tolerance = 1e-6
for idx in range(n_channels):
diff = abs(float_sum[idx] - (0.5 + 0.1 * idx))
print(f'float sum[{idx}]: {float_sum[idx]:0.9f} diff: {diff:0.2e}')
assert diff < tolerance, "Got unexpected result from vector float sum"
|
"""
Support for python 2 & 3, ripped pieces from six.py
" | ""
import sys
PY3 = sys.versio | n_info[0] == 3
if PY3:
string_types = str,
else:
string_types = basestring,
|
# Author: Pontus Laestadius.
# Since: 2nd of March, 2017.
# Maintai | ned since: 17th of April 2017.
from receiver import Receiver
print("Version 2.2")
| Receiver("172.24.1.1", 9005)
|
################################################################################
# #
# Copyright (C) 2010,2011,2012,2013,2014, 2015,2016 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Rectification System Setup #
# #
################################################################################
from __future__ import print_function
from math import cos, pi, sin
import numpy as np
import os
import sys
from espressomd import assert_features, lb
from espressomd.lbboundaries import LBBoundary
from espressomd.shapes import Cylinder, Wall, HollowCone
assert_features(["LB_GPU","LB_BOUNDARIES_GPU"])
# Setup constants
outdir = "./RESULTS_RECTIFICATION_GEOMETRY/"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# Setup the box (we pad the diameter to ensure that the LB boundaries
# and therefore the constraints, are away from the edge of the box)
length = 100
diameter = 20
dt = 0.01
# Setup the MD parameters
system = espressomd.System(box_l=[length, dieameter+4, diameter+4])
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 0.5
# Setup LB parameters (these are irrelevant here) and fluid
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi, visc=visco, tau=dt, fric=frict)
system.actors.add(lbf)
################################################################################
#
# Now we set up the three LB boundaries that form the rectifying geometry.
# The cylinder boundary/constraint is actually already capped, but we put
# in two planes for safety's sake. If you want to create an cylinder of
# 'infinite length' using the periodic boundaries, then the cylinder must
# extend over the boundary.
#
################################################################################
# Setup cylinder
cylinder = LBBoundary(shape=Cylinder(center=[length/2.0, (diameter+4)/2.0, (diameter+4)/2.0],
axis=[1,0,0],
radius=diameter/2.0,
length=length,
direction=-1))
system.lbboundaries.add(cylinder)
# Setup walls
wall = LBBoundary(shape=Wall(dist=2, normal=[1,0,0]))
system.lbboundaries.add(wall)
wall = LBBoundary(shape=Wall(dist=-(length - 2), normal=[-1,0,0]))
system.lbboundaries.add(wall)
# Setup cone
irad = 4.0
angle = pi/4.0
orad = (diameter - irad)/sin(angle)
shift = 0.25*orad*cos(angle)
hollow_cone = LBBoundary(shape=HollowCone(position_x=length/2.0 - shift,
position_y=(diameter+4)/2.0,
position_z=(diameter+4)/2.0,
orientation_x=1,
orientation_y=0,
| orientation_z=0,
outer_radius=orad,
inner_radius=irad,
width=2.0,
opening_angle=angle,
direction=1))
system.lbboundaries.add(h | ollow_cone)
################################################################################
# Output the geometry
lbf.print_vtk_boundary("{}/boundary.vtk".format(outdir))
################################################################################
|
def process(target, other):
result = [[] for ch in target]
ret = []
for xi, xv | in enumerate(target):
for yi, yv in enumerate(other):
if xv != yv:
result[xi].append(0)
elif 0 == xi or 0 == yi:
result[xi].append(1)
else:
result[xi].append(result[xi-1][yi-1]+1)
ret.append(max(result[xi]))
return ret
def find_shortest(word_length, sub_map):
for l in range(1, word_length+1):
# print "LEN: ", l
for pos in range(l-1, word_length):
| # print "POS: ", pos
flag = True
for other in sub_map:
# print l, other[pos]
if l <= other[pos]:
flag = False
break
if flag:
return l
def solve(n, word_list):
for (xi, xv) in enumerate(word_list):
result = []
for (yi, yv) in enumerate(word_list):
if (xv != yv):
result.append(process(xv, yv))
# print xv, len(xv), result
print find_shortest(len(xv), result)
if __name__ == '__main__':
N = int(raw_input())
WORD = []
for n in xrange(N):
WORD.append(raw_input().strip())
solve(N, WORD)
|
# -*- coding: utf-8 -*-
#
# test_enable_multithread.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
__author__ = 'sdiaz'
# Structural plasticity currently does not work with multiple threads.
# An exception should be rised if structural plasticity is enabled
# and multiple threads are set, or if multiple threads are set and
# the enable_structural_plasticity function is called.
HAVE_OPENMP = nest.sli_func("is_threaded")
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
class TestEnableMultithread(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
def test_enable_multithread(self):
nest.ResetKernel()
nest.EnableStructuralPlasticity()
# Setting multiple threads when structural plasticity is enabled should
# throw an exception
with self.assertRaises(nest.NESTError) | :
nest.SetKernelStatus(
{
'local_num_threads': 2
}
)
def test_multithread_enable(self):
nest.ResetKernel()
nest.SetKernelStatus(
{
'local_num_threads': 2
}
)
# Setting mult | iple threads when structural plasticity is enabled should
# throw an exception
with self.assertRaises(nest.NESTError):
nest.EnableStructuralPlasticity()
def suite():
test_suite = unittest.makeSuite(TestEnableMultithread, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import xml.etree.ElementTree
from xml.etree.cElementTree import ElementTree, Element, SubElement
from xml.etree.cElementTree import fromstring, tostring
import fs_uae_launcher.fsui as fsui
from ..Config import Config
from ..Settings import Settings
from ..I18N import _, ngettext
class XMLControl(fsui.TextArea):
def __init__(self, parent):
fsui.TextArea.__init__(self, parent, horizontal_scroll=True)
self.path = ""
def connect_game(self, info):
tree = self.get_tree()
root = tree.getroot()
if not root.tag == "config":
return
game_node = self.find_or_create_node(root, "game")
game_node.set("uuid", info["uuid"])
game_name_node = self.find_or_create_node(game_node, "name")
game_name_node.text = info["name"]
self.set_tree(tree)
def find_or_create_node(self, element, name):
node = element.find(name)
if node is None:
node = SubElement(element, name)
return node
def set_path(self, path):
if not os.path.exists(path):
path = ""
self.path = path
if path:
self.load_xml(path)
else:
self.set_text("")
def get_tree(self):
text = self.get_text().strip()
try:
root = fromstring(text.encode("UTF-8"))
except Exception:
# FIXME: show message
import traceback
traceback.print_exc()
return
tree = ElementTree(root)
indent_tree(root)
return tree
def set_tree(self, tree):
data = tostring(tree.getroot(), encoding="UTF-8").decode("UTF-8")
std_decl = "<?xml version='1.0' encoding='UTF-8'?>"
if data.startswith(std_decl):
data = data[len(std_decl):].strip()
self.set_text(data)
def load_xml(self, path):
with open(path, "rb") as f:
data = f.read()
self.set_text(data)
def save(self):
if not self.path:
print("no path to save XML t | o")
return
self.save_xml(self.path)
def save_xml(self, path):
self.get_tree().write(self.path)
def indent_tree(elem, leve | l=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_tree(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
|
. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
if _USE_C_DESCRIPTORS:
# The class, or tuple of classes, that are considered as "virtual
# subclasses" of this descriptor class.
_C_DESCRIPTOR_CLASS = ()
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
fields_by_camelcase_name: (dict str -> FieldDescriptor) Same
FieldDescriptor objects as in |fields|, but indexed by
"camelcase_name" attribute in each FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
oneofs: (list of OneofDescriptor) The list of descriptors for oneof fields
in this message.
oneofs_by_name: (dict str -> OneofDescriptor) Same objects as in |oneofs|,
but indexed by "name" attribute.
file: (FileDescriptor) Reference to file descriptor.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.Descriptor
def __new__(cls, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None,
syntax=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindMessageTypeByName(full_name)
# NOTE(tmarek): The file argument redefining a builtin is nothing we can
# fix right now since we don't know how many clients already rely on the
# name of the argument.
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, e | xtensions, options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None,
syntax=None): # pylint:disable=redefined-builtin
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__i | nit__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
|
class | APIConnectionError(Exception):
pass
class DownloadError(Exce | ption):
pass
class ProducerAPIError(APIConnectionError):
pass
class ConsumerAPIError(APIConnectionError):
pass
|
"""
A Python interface to the primer3_core executable.
TODO: it is not possible to keep a persistent primer3 process
using subprocess module - communicate() terminates the input
stream and waits for the process to finish
Author: Libor Morkovsky 2012
"""
# This file is a part of Scrimer.
# See LICENSE.txt for details on licensing.
# Copyright (C) 2012, 2013 Libor Morkovsky
class BoulderIO:
"""Provides Python interface for ``BoulderIO`` format used by Primer3.
"""
@classmethod
def parse(self, string):
r"""Parse a BoulderIO string ``(KEY=VAL\n)``
return a list of records, where each record is a dictionary
end of the string implies a single ``'=\n'`` (record separator).
"""
record_strings = string.split("=\n")
return [dict(tuple(line.split("=", 1)) for line in record.split("\n") if len(line) > 3) for record in record_strings if len(record) > 3]
@classmethod
def deparse(self, records):
r"""Accepts a dict or a list of dicts, produces a BoulderIO string ``(KEY=VAL\n)``
with records separated by ``'=\n'``.
"""
# unify the input, create a list with single element
if type(records) == dict:
records = [records]
return "\n=\n".join("\n".join("=".join(kval) for kval in record.iteritems()) for record in records) + "\n=\n"
class Primer3:
"""Wraps Primer3 executable. `kwargs` are converted to strings and used as default parameters
for each call of primer3 binary.
"""
def __init__(self, p3path="primer3_core", **kwargs):
# store path to primer3
self.p3path = p3path
# add stringized versions of all kwargs to default args
self.default_params = {}
str_kw = dict((key, str(val)) for key, val in kwargs.iteritems())
self.default_params.update(str_kw)
def call(self, records):
"""Merge each of the records with `default_params`, the record taking precedence,
call the ``primer3`` binary,
parse the output and return a list of dictionaries,
``{RIGHT:[], LEFT:[], PAIR:[], INTERNAL:[]}`` for each input record
uppercase keys (in the result) are the original names from BoulderIO format,
lowercase keys have no direct equivalent in primer3 output (``position``, ``other-keys``)
"""
# merge the defaults with current query
full_records = [dict(self.default_params.items() + record.items()) for record in records]
# call primer3
import subprocess
self.child = subprocess.Popen([self.p3path, '-strict_tags'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.child.communicate(BoulderIO.deparse(full_records))
# simple check for errors in stderr
if len(err):
raise Exception(err)
results = BoulderIO.parse(out)
# parse the results to {RIGHT:[], LEFT:[], PAIR:[], INTERNAL:[]}
sides = ['RIGHT', 'LEFT', 'PAIR', 'INTERNAL']
primers = []
for result in results:
# primers for current result
res_primers = dict((side, []) for side in sides)
used_keys = []
for side in sides:
nret_key = 'PRIMER_%s_NUM_RETURNED' % side
nret = int(result.get(nret_key, 0))
used_keys.append(nret_key)
# extract the values for each single primer and put those to
# equivalent key
for num in xrange(nret):
template = 'PRIMER_%s_%d_' % (side, num)
primer_keys = filter(lambda k: template in k, result.iterkeys())
primer = dict((key[len(template):], result[key]) for key in primer_keys)
# extract the position, which itself has no extractible name in BoulderIO
# only 'PRIMER_LEFT_0'
if side != 'PAIR':
pos_key = template[:len(template)-1]
primer['position'] = result.get(pos_key, "#error!")
used_keys.append(pos_key)
# keep track of keys used in current record
used_keys.extend(primer_keys)
res_primers[side].append(primer)
# store all the unused keys for current result
res_primers['other-keys'] = dict((key, result[key]) for key in result.iterkeys() if key not in used_keys)
primers.append(res_primers)
return primers
if __name__ == "__main__":
print "Running tests"
import textwrap
record = BoulderIO.parse(textwrap.dedent(
"""
SEQUENCE_ID=example
SEQUENCE_TEMPLATE=GTAGTCAGTAGACGATGACTACTGACGATGCAGACNACACACACACACACAGCACACAGGTATTAGTGGGCCATTCGATCCCGACCCAAATCGATAGCTACGATGACG
SEQUENCE_TARGET=37,21
PRIMER_PICK_INTERNAL_OLIGO=0
PRIMER_OPT_SIZE=18
PRIMER_MIN_SIZE=15
PRIMER_MAX_SIZE=21
PRIMER_MAX_NS_ACCEPTED=3
PRIMER_PRODUCT_SIZE_RANGE=50-100
"""))
record_no_res = BoulderIO.parse(textwrap.dedent(
"""
SEQUENCE_ID=example
SEQUENCE_TEMPLATE=GTAGTCAGTAGACNATGACNACTGACGATGCAGACNACACACACACACACAGCACACAGGTATTAGTGGGCCATTCGATCCCGACCCAAATCGATAGCTACGATGACG
SEQUENCE_TARGET=37,21
PRIMER_TASK=pick_detection_primers
PRIMER_PICK_LEFT_PRIMER=1
PRIMER_PICK_INTERNAL_OLIGO=1
PRIMER_PICK_RIGHT_PRIMER=1
| PRIMER_OPT_SIZE=18
PRIMER_MIN_SIZE=15
PRIMER_MAX_SIZE=21
PRIMER_MAX_NS_ACCEPTED=1
PRIMER_PRODUCT_SIZE_RANGE=75-100
SEQUENCE_INTERNAL_EXCLUDED_REGION=37,21
"""))
default_params = BoulderIO.parse(textwrap.dedent(
"""
PRIMER | _THERMODYNAMIC_PARAMETERS_PATH=/opt/primer3/bin/primer3_config/
PRIMER_MAX_NS_ACCEPTED=0
PRIMER_EXPLAIN_FLAG=1
"""))[0]
print "Testing BoulderIO, single record:",
record_dp = BoulderIO.deparse(record)
record_reparsed = BoulderIO.parse(record_dp)
if record == record_reparsed:
print "OK"
else:
print "Failed!"
print "Testing BoulderIO, two records:",
two_records = record + record_no_res
record_dp = BoulderIO.deparse(two_records)
record_reparsed = BoulderIO.parse(record_dp)
if two_records == record_reparsed:
print "OK"
else:
print "Failed!"
print "Testing Primer3, single record:",
p3 = Primer3(**default_params)
# test for single record
res = p3.call(record)
if res[0]['RIGHT'][0]['SEQUENCE'] == 'GTCGGGATCGAATGGCCC':
print "OK"
else:
print "Failed!"
# test for multiple records
print "Testing Primer3, two records:",
res = p3.call(two_records)
# second record should produce no results
if len(res[1]['RIGHT']) == 0:
print "OK"
else:
print "Failed!"
# if no exception occurs, the test should be OK
print "Tests ran OK"
|
import paho.mqtt.client as mqtt
import os,binascii
import logging
import time
from enum import Enum
from threading import Timer
import json
import random
import math
ID_STRING = binascii.hexlify(os.urandom(15)).decode('utf-8')[:4]
CLIENT_ID = "robot-emulator-" + ID_STRING
BROKER_HOST = "mosquitto"
TOPIC_STATUS = "twin/%s/status" % ID_STRING
TOPIC_PLANS = "twin/%s/plans" % ID_STRING
TOPIC_REGISTRATION = "twins/registration/announce"
TOPIC_HANDSHAKE = " | twins/registration/handshake"
class TwinStatus(Enum):
NOT_CONNECTED = 1
SEARCHING = 2
SELECTED = 3
CONNECTED = 4
DISCONNECTED = 5
status = TwinStatus.NOT_CONNECTED
timer = None
def main():
logging.info("Client '%s' is connecting...", CLIENT_ID)
# Client(client_id=””, clean_ses | sion=True, userdata=None, protocol=MQTTv311, transport=”tcp”)
client = mqtt.Client(CLIENT_ID)
client.on_connect = on_connect
client.on_message = on_message
try:
client.connect(BROKER_HOST)
logging.info("Client '%s' CONNECTED to '%s'", CLIENT_ID, BROKER_HOST)
except Exception as e:
logging.error("Failed to connect to the MQTT broker on host '%s' (CLIENT_ID='%s')", BROKER_HOST, CLIENT_ID)
logging.debug(e)
client.loop_forever()
def twin_search_timeout(client, n):
if not status == TwinStatus.CONNECTED:
logging.warning("Twin connection is not established (%s)", status)
request_twin(client)
schedule_reconnect(client, n+1)
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# no need to sub to our own statuses
# sub(client, TOPIC_STATUS)
sub(client, TOPIC_PLANS)
sub(client, TOPIC_HANDSHAKE)
# client.publish(TOPIC_STATUS, "{'status': 'on'}")
request_twin(client)
schedule_reconnect(client, 1)
# TODO also publish some message on the 'registration' topic
def sub(client, topic):
client.subscribe(topic)
logging.info("Subscribed to %s", topic)
def schedule_reconnect(client, n):
delay = min(0.1 * 2 ** (n-1) + (random.randint(0, 200) / 1000), 10)
logging.debug("Next reconnection attempt in %fs", delay)
timer = Timer(delay, twin_search_timeout, [client, n])
timer.start()
def request_twin(client):
client.publish(TOPIC_REGISTRATION, json.dumps({'twin': ID_STRING, 'status': 'awaiting'}))
status = TwinStatus.SEARCHING
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
logging.debug("New message '%s' (topic: '%s', QoS%d)", msg.payload, msg.topic, msg.qos)
if not msg.topic == TOPIC_STATUS:
client.publish(TOPIC_STATUS, json.dumps({'status': 'done'}))
if msg.topic == TOPIC_HANDSHAKE:
reg_reply = json.loads(msg.payload)
process_reg_reply(reg_reply, client, msg)
def process_reg_reply(reg_reply, client, msg):
if reg_reply["device"] != ID_STRING:
logging.debug("A registration message for another device received: %s", msg.payload)
else:
t = reg_reply["twin"]
logging.debug("Trying to select the twin '%s'", t)
# TODO do we really need this status?
status = TwinStatus.SELECTED
register_with_twin(t)
def register_with_twin(t):
logging.warning("Not implemented yet")
status = TwinStatus.CONNECTED
twin = t
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M')
main()
logging.warning("Client '%s' is shutting down", CLIENT_ID)
|
import unittest
import mock
from ...management.resource_servers import ResourceServers
class TestResourceServers(unittest.TestCase):
def test_init_with_optionals(self):
t = ResourceServers(domain='domain', token='jwttoken', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.create({'name': 'TestApi', 'identifier': 'https://test.com/api'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/resource-servers',
data={'name': 'TestApi', 'identifier': 'https://test.com/api'}
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_get_all(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
# with default params
r.get_all()
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers',
params={
'page': None,
'per_page': None,
'include_totals': 'false'
}
)
# with pagination params
r.get_all(page=3, per_page=27, include_totals=True)
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers',
params={
'page': 3,
'per_page': 27,
'include_totals': 'true'
}
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.get('some_id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers/some_id'
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.delete('some_id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/resource-servers/some_id'
)
@moc | k.patch('auth0.v3.management.resource_servers.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.update('some_id', {'name': 'TestApi2',
'identifier': 'https://test.com/api2'})
mock_instance.patch.assert_called_with(
'htt | ps://domain/api/v2/resource-servers/some_id',
data={'name': 'TestApi2',
'identifier': 'https://test.com/api2'}
)
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Client actions related to plist files."""
import cStringIO
import types
from grr.client import actions
from grr.client import vfs
from grr.lib import plist as plist_lib
from grr.lib import rdfvalue
from grr.parsers import binplist
class PlistQuery(actions.ActionPlugin):
"""Parses the plist request specified and returns the results.
PlistQuery allows you to obtain data from a plist, optionally only if it
matches the given filter.
Querying for a plist is done in two steps. First, its contents are
retrieved.
For plists where the top level element is a dict, you can use the key
parameter of the PlistRequest to specify a path into the dict to retrieve.
When specifying a key, the requested key values are places under a dictionary
key called "key".
Whether you've specified a key or not, the query parameter allows you to
filter based on the
"""
in_rdfvalue = rdfvalue.PlistRequest
out_rdfvalue = rdfvalue.RDFValueArray
MAX_PLIST_SIZE = 1024 * 1024 * 100 # 100 MB
|
def Run(self, args):
self.context = args.context
self.filter_query = args.query
with vfs.VFSOpen(args.pathspec, progress_callba | ck=self.Progress) as fd:
data = fd.Read(self.MAX_PLIST_SIZE)
plist = binplist.readPlist(cStringIO.StringIO(data))
# Create the query parser
parser = plist_lib.PlistFilterParser(self.filter_query).Parse()
filter_imp = plist_lib.PlistFilterImplementation
matcher = parser.Compile(filter_imp)
if self.context:
# Obtain the values for the context using the value expander
value_expander = filter_imp.FILTERS["ValueExpander"]
iterator = value_expander().Expand(plist, self.context)
else:
# If we didn't get a context, the context is the whole plist
iterator = [plist]
reply = rdfvalue.RDFValueArray()
for item in iterator:
# As we're setting the context manually, we need to account for types
if isinstance(item, types.ListType):
for sub_item in item:
partial_plist = plist_lib.PlistValueToPlainValue(sub_item)
if matcher.Matches(partial_plist):
reply.Append(sub_item)
else:
partial_plist = plist_lib.PlistValueToPlainValue(item)
if matcher.Matches(partial_plist):
reply.Append(partial_plist)
self.SendReply(reply)
|
atient_relationship, uuid_pk_column
from radar.models.logs import log_changes
COUNTRIES = OrderedDict([
('AF', 'Afghanistan'),
('AX', 'Åland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia, Plurinational State of'),
('BQ', 'Bonaire, Sint Eustatius and Saba'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, the Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Côte d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CW', 'Curaçao'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, the former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova, Republic of'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Réunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthélemy'),
('SH', 'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin (French part)'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SX', 'Sint Maarten (Dutch part)'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('SS', 'South Sudan'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('GB', 'United Kingdom'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela, Bolivarian Republic of'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Is | lands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
])
@log_changes
class PatientAddress(db.Model, MetaModelMixin):
__tablename__ = 'patient_addresses'
id = uuid_ | pk_column()
patient_id = patient_id_column()
patient = patient_relationship('patient_addresses')
source_group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
source_group = relationship('Group')
source_type = Column(String, nullable=False)
from_date = Column(Date)
to_date = Column(Date)
address1 = Column(String)
address2 = Column(String)
address3 = Column(String)
address4 = Column(String)
postcode = Column(String)
country = Column(String)
@property
def full_address(self):
parts = []
parts.extend([
self.address1,
self.address2,
self.address3,
self.address4,
self.postcode,
self.country,
])
return '\n'.join(x for x in parts if x)
@property
def anonymised_postcode(self):
postcode = self.postcode
if postcode is None:
anonymised_postcode = None
else:
# Postcode outbound code
anonymised_postcode = postcode.split(' ')[0][:4]
return anonymised_postcode
Index( |
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option | ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def smooth_bruckner(y | , smooth_points, iterations):
y_original = y
N_data = y.size
N = smooth_points
N_float = float(N)
y = np.empty(N_data + N + N)
y[0:N].fill(y_original[0])
y[N:N + N_data] = y_original[0:N_data]
y[N + N_data:N_data + N + N].fill(y_original[-1])
y_avg = np.average(y)
y_min = np.min(y)
y_c = y_avg + 2. * (y_avg - y_min)
y[y > y_c] = y_c
window_size = N_float*2+1
for j in range(0, iterations):
window_avg = np.average(y[0: 2*N + 1])
for i in range(N, N_data - 1 - N - 1):
if y[i]>window_avg:
y_new = window_avg
#updating central value in average (first bracket)
#and shifting average by one index (second bracket)
window_avg += ((window_avg-y[i]) + (y[i+N+1]-y[i - N]))/window_size
y[i] = y_new
else:
#shifting average by one index
window_avg += (y[i+N+1]-y[i - N])/window_size
return y[N:N + N_data] |
#!/usr/bin/env python
"""
Standaone Rule
==============
This is a customer spec, parser and rule and can be run
against the local host using the following command::
$ insights-run -p examples.rules.stand_alone
or from the examples | /rules directory::
$ ./stand_alone.py
"""
from __future__ import print_function
from collections import namedtuple
from insights import get_active_lines, parser, Parser
from insights import make_fail, make_pass, rule, run
from insights.core.spec_factory import SpecSet, simple_file
from insights.parsers.redhat_release import RedhatRelease
# Error key used in make_fail
ERROR_KEY = "TOO_MANY_HOSTS"
# jinga2 template displa | yed for rule responses
CONTENT = {
make_fail: """Too many hosts in /etc/hosts: {{num}}""",
make_pass: """Just right"""
}
class Specs(SpecSet):
""" Datasources for collection from local host """
hosts = simple_file("/etc/hosts")
@parser(Specs.hosts)
class HostParser(Parser):
"""
Parses the results of the ``hosts`` Specs
Attributes:
hosts (list): List of the namedtuple Host
which are the contents of the hosts file
including ``.ip``, ``.host``, and ``.aliases``.
"""
Host = namedtuple("Host", ["ip", "host", "aliases"])
def parse_content(self, content):
"""
Method to parse the contents of file ``/etc/hosts``
This method must be implemented by each parser.
Arguments:
content (list): List of strings that are the contents
of the /etc/hosts file.
"""
self.hosts = []
for line in get_active_lines(content):
# remove inline comments
line = line.partition("#")[0].strip()
# break the line into parts
parts = line.split()
ip, host = parts[:2]
aliases = parts[2:]
self.hosts.append(HostParser.Host(ip, host, aliases))
def __repr__(self):
""" str: Returns string representation of the class """
me = self.__class__.__name__
msg = "%s([" + ", ".join([str(d) for d in self.hosts]) + "])"
return msg % me
@rule(HostParser, RedhatRelease, content=CONTENT)
def report(hp, rhr):
"""
Rule reports a response if there is more than 1 host
entry defined in the /etc/hosts file.
Arguments:
hp (HostParser): Parser object for the custom parser in this
module.
rhr (RedhatRelease): Parser object for the /etc/redhat-release
file.
"""
if len(hp.hosts) > 1:
return make_fail("TOO_MANY_HOSTS", num=len(hp.hosts))
return make_pass("TOO_MANY_HOSTS", num=len(hp.hosts))
if __name__ == "__main__":
run(report, print_summary=True)
|
from django. | shortcuts import render
from django.template.loader import render_to_string
def home(request):
context_dict = {}
return render(request,'ms2ldaviz/index.html',context_dict)
def people(request):
context_dict = {}
return render(request,'ms2ldaviz/people.html',context_dict)
def api(request):
context_dict = {}
return render(request,'ms2ldaviz/api.html',context_dict)
def user_guide(request):
m | arkdown_str = render_to_string('markdowns/user_guide.md')
return render(request, 'markdowns/user_guide.html', {'markdown_str':markdown_str})
def disclaimer(request):
markdown_str = render_to_string('markdowns/disclaimer.md')
return render(request, 'markdowns/disclaimer.html', {'markdown_str':markdown_str})
def confidence(request):
markdown_str = render_to_string('markdowns/confidence.md')
return render(request, 'markdowns/confidence.html', {'markdown_str':markdown_str}) |
# Speak.activity
# A simple front end to the espeak text-to-speech engine on the XO laptop
# http://wiki.laptop.org/go/Speak
#
# Copyright (C) 2008 Joshua Minor
# Copyright (C) 2014 Walter Bender
# This file is part of Speak.activity
#
# Parts of Speak.activity are based on code from Measure.activity
# Copyright (C) 2007 Arjun Sarwal - arjun@laptop.org
#
# Speak.activity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Speak.activity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from eye import Eye
from utils import svg_str_to_pixbuf
class Sleepy(Eye):
def __init__(self, fill_color):
Eye.__init__(self, fill_color)
self._pixbuf = svg_str_to_pixbuf(eye_svg())
def draw(self, widget, cr):
bounds = self.get_allocation()
# background
cr.set_source_rgba(*self.fill_color.get_rgba())
cr.rectangle(0, 0, bounds.width, bounds.height)
cr.fill()
w = h = min(bounds.width, bounds.height)
x = int((bounds.width - w) // 2)
y = int((bounds.height - h) // 2)
pixbuf = self._pixbuf.scale_simple(w, h, GdkPixbuf.InterpType.BILINEAR)
cr.translate(x + w / 2., y + h / 2.)
cr.translate(-x - w / 2., -y - h / 2.)
Gdk.cairo_set_source_pixbuf(cr, pixbuf, x, y)
cr.rectangle(x, y, w, h)
cr.fill()
return True
def eye_svg():
return \
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n' + \
'<svg\n' + \
' xmlns:svg="http://www.w3.org/2000/svg"\n' + \
' xmlns="http://www.w3.org/2000/svg"\n' + \
' version="1.1"\n' + \
' width="300"\n' + \
' height="300">\n' + \
' <path\n' + \
' d="m 260.26893,151.09803 c -6.07398,14.55176 -15.05894,27.89881 -26.27797,39.03563 -11.21904,11.13683 -24.66333,20.05466 -39.32004,26.081 | 68 -14.65671,6.02702 -30.51431,9.15849 -46.37814,9.15849 -15.86384,0 -31.72144,-3.13147 -46.37815,-9.15849 C 87.257925,210.18832 73.813631,201.27049 62.594594,190.13366 51.375557,178.99684 42.3906,165.64979 36.316616,151.09803"\n' + \
' style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:13.18636799 | ;stroke-linecap:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />\n' + \
'</svg>\n'
|
# -*- coding: utf-8 -*-
from minheap import minheap
class maxheap(minheap):
"""
Heap class - made of keys and items
methods: build_heap, heappush, heappop
"""
MAX_HEAP = True
def __str__(self):
return "Max-heap with %s items" % (len(self.heap))
def heapify(self, i):
l = self.leftchild(i)
r = self.rightchild(i)
largest = i
if l < self.max_elements() and self.heap[l] > self.heap[largest]:
largest = l
if r < self.max_elements() and self.heap[r] > self.heap[largest]:
| largest = r
if largest != i:
self.heap[i], self.heap[largest] = self.heap[largest], self.heap[i]
self.heapify(largest)
def heappush(self, x):
""" Adds a new item x in the heap"""
i = len(self.heap)
self.heap.append(x)
parent = self.parent(i)
while p | arent != -1 and self.heap[int(i)] > self.heap[int(parent)]:
self.heap[int(i)], self.heap[int(parent)] = self.heap[
int(parent)], self.heap[int(i)]
i = parent
parent = self.parent(i)
|
from pygame import Rect
from widget import Widget
class GridView(Widget):
# cell_size (width, height) size of each cell
#
# Abstract methods:
#
# num_rows() --> no. of rows
# num_cols() --> no. of columns
# draw_cell(surface, row, col, rect)
# click_cell(row, col, event)
def __init__(self, cell_size, nrows, ncols, **kwds):
"""nrows, ncols are for calculating initial size of widget"""
Widget.__init__(self, **kwds)
self.cell_size = cell_size
w, h = cell | _size
d = 2 * self.margin
self.size = (w * ncols + d, h * nrows + d)
self.cell_size = cell_size
def draw(self, surface):
for row in xrange(self.num_rows()):
for col in xrange(self.num_cols()):
r = self.cell_rect(row, col | )
self.draw_cell(surface, row, col, r)
def cell_rect(self, row, col):
w, h = self.cell_size
d = self.margin
x = col * w + d
y = row * h + d
return Rect(x, y, w, h)
def draw_cell(self, surface, row, col, rect):
pass
def mouse_down(self, event):
x, y = event.local
w, h = self.cell_size
W, H = self.size
d = self.margin
if d <= x < W - d and d <= y < H - d:
row = (y - d) // h
col = (x - d) // w
self.click_cell(row, col, event)
def click_cell(self, row, col, event):
pass
|
# -*- coding: utf-8 -*-
import logging
from chisch.common.retwrapper import RetWrapper
import cor | es
logger = logging.getLogger('django')
def signature_url(request):
params_query_dict = request.GET
params = {k: v for k, v in params_query_dict.items()}
try:
url = cores.get_url()
excep | t Exception, e:
return RetWrapper.wrap_and_return(e)
result = {'url': url}
return RetWrapper.wrap_and_return(result)
|
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from django.conf import settings
from . import views
products = r'/products/(?P<product>\w+)'
versions = r'/versions/(?P<versions>[;\w\.()]+)'
version = r'/versions/(?P<version>[;\w\.()]+)'
perm_legacy_redirect = settings.PERMANENT_LEGACY_REDIRECTS
urlpatterns = patterns(
'', # prefix
url('^robots\.txt$',
views.robots_txt,
name='robots_txt'),
url(r'^status/json/$',
views.status_json,
name='status_json'),
url(r'^status/revision/$',
views.status_revision,
name='status_revision'),
url(r'^crontabber-state/$',
views.crontabber_state,
name='crontabber_state'),
url('^crashes-per-day/$',
views.crashes_per_day,
name='crashes_per_day'),
url(r'^exploitability/$',
views.exploitability_report,
name='exploitability_report'),
url(r'^report/index/(?P<crash_id>[\w-]+)$',
views.report_index,
name='report_index'),
url(r'^search/quick/$',
views.quick_search,
name='quick_search'),
url(r'^buginfo/bug', views.buginfo,
name='buginfo'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36})-(?P<name>\w+)\.'
r'(?P<extension>json|dmp|json\.gz)$',
views.raw_data,
name='raw_data_named'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36}).(?P<extension>json|dmp)$',
views.raw_data,
name='raw_data'),
url(r'^login/$',
views.login,
name='login'),
url(r'^graphics_report/$',
views.graphics_report,
name='graphics_report'),
url(r'^about/throttling/$',
views.about_throttling,
name='about_throttling'),
# if we do a permanent redirect, the browser will "cache" the redirect and
# it will make it very hard to ever change the DEFAULT_PRODUCT
url(r'^$',
RedirectView.as_view(
url='/home/product/%s' % settings.DEFAULT_PRODUCT,
permanent=False # this is not a legacy URL
)),
# redirect deceased Advanced Search URL to Super Search
url(r'^query/$',
RedirectView.as_view(
url='/search/',
query_string=True,
| permanent=True
)),
# redirect deceased Report List URL to Signature report
url(r'^report/list$',
RedirectView. | as_view(
pattern_name='signature:signature_report',
query_string=True,
permanent=True
)),
# redirect deceased Daily Crashes URL to Crasher per Day
url(r'^daily$',
RedirectView.as_view(
pattern_name='crashstats:crashes_per_day',
query_string=True,
permanent=True
)),
# Redirect old independant pages to the unified Profile page.
url(r'^your-crashes/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
url(r'^permissions/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
# Redirect deleted status page to monitoring page.
url(
r'^status/$',
RedirectView.as_view(
pattern_name='monitoring:index',
permanent=not settings.DEBUG,
),
name='status_redirect',
),
# handle old-style URLs
url(r'^products/(?P<product>\w+)/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
url(r'^products/(?P<product>\w+)/versions/(?P<versions>[;\w\.()]+)/$',
RedirectView.as_view(
url='/home/products/%(product)s/versions/%(versions)s',
permanent=perm_legacy_redirect
)),
url('^home' + products + '/versions/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
)
|
"""Main | view for geo locator application"""
from django.shortcuts import render
def index(request):
if request.location:
location = request.location
else:
location = None
return render(request, "homepage.html", {'lo | cation': location})
|
# Generated file. Do not edit
__author__="drone"
from Abs import Abs
from And import And
from Average import Average
from Ceil import Ceil
from Cube import Cube
from Divide import Divide
from Double import Double
from Equal import Equal
from Even import Even
from Floor import Floor
from Greaterorequal import Greaterorequal
from Greaterthan import Greaterthan
f | rom Half import Half
from If import If
from Increment import Increment
from Lessorequal import Lessorequal
from Lessthan import Lessthan
from Max import Max
from Min import Min
from Module import Module
from Multiply import Multiply
from Negate import Negate
from Not import Not
from Odd import Odd
from One import One
from Positive import Positive
from Quadruple import Quadruple
from Sign import Sign
from Sub import Sub
from Sum import Sum
from Two import Two
from Zero import | Zero
__all__ = ['Abs', 'And', 'Average', 'Ceil', 'Cube', 'Divide', 'Double', 'Equal', 'Even', 'Floor', 'Greaterorequal', 'Greaterthan', 'Half', 'If', 'Increment', 'Lessorequal', 'Lessthan', 'Max', 'Min', 'Module', 'Multiply', 'Negate', 'Not', 'Odd', 'One', 'Positive', 'Quadruple', 'Sign', 'Sub', 'Sum', 'Two', 'Zero']
|
<commandflush>
<status>Pending+Failed</status>
<mobile_devices>
<mobile_device>
<id>1</id>
</mobile_device>
<mobile_device>
<id>2</id>
</mobile_device>
</mobile_devices>
</commandflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not isinstance(data, basestring):
data = ElementTree.tostring(data, encoding='UTF-8')
self.jss.delete(self.url, data)
def command_flush_for(self, id_type, command_id, status):
"""Flush commands for an individual device.
Args:
id_type (str): One of 'computers', 'computergroups',
'mobiledevices', or 'mobiledevicegroups'.
id_value (str, int, list): ID value(s) for the devices to
flush. More than one device should be passed as IDs
in a list or tuple.
status (str): One of 'Pending', 'Failed', 'Pending+Failed'.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
id_types = ('computers', 'computergroups', 'mobiledevices',
'mobiledevicegroups')
status_types = ('Pending', 'Failed', 'Pending+Failed')
if id_type not in id_types or status not in status_types:
raise ValueError("Invalid arguments.")
if isinstance(command_id, list):
command_id = ",".join(str(item) for item in command_id)
flush_url = "{}/{}/id/{}/status/{}".format(
self.url, id_type, command_id, status)
self.jss.delete(flush_url)
# pylint: disable=too-few-public-methods
class FileUpload(object):
"""FileUploads are a special case in the API. They allow you to add
file resources to a number of objects on the JSS.
To use, instantiate a new FileUpload object, then use the save()
method to upload.
Once the upload has been posted you may only interact with it
through the web interface. You cannot list/get it or delete it
through the API.
However, you can reuse the FileUpload object if you wish, by
changing the parameters, and issuing another save().
"""
_endpoint_path = "fileuploads"
allowed_kwargs = ('subset',)
def __init__(self, j, resource_type, id_type, _id, resource):
"""Prepare a new FileUpload.
Args:
j: A JSS object to POST the upload to.
resource_type:
String. Acceptable Values:
Attachments:
computers
mobiledevices
enrollmentprofiles
peripherals
mobiledeviceenrollmentprofiles
Icons:
policies
ebooks
mobiledeviceapplicationsicon
Mobile Device Application:
mobiledeviceapplicationsipa
Disk Encryption
diskencryptionconfigurations
diskencryptions (synonymous)
PPD
printers
id_type:
String of desired ID type:
id
name
_id: Int or String referencing the identity value of the
resource to add the FileUpload to.
resource: String path to the file to upload.
"""
resource_types = ["computers", "mobiledevices", "enrollmentprofiles",
"peripherals", "mobiledeviceenrollmentprofiles",
"policies", "ebooks", "mobiledeviceapplicationsicon",
"mobiledeviceapplicationsipa",
"diskencryptionconfigurations", "printers"]
id_types = ["id", "name"]
self.jss = j
# Do some basic error checking on parameters.
if resource_type in resource_types:
self.resource_type = resource_type
else:
raise TypeError(
"resource_type must be one of: %s" % ', '.join(resource_types))
if id_type in id_types:
self.id_type = id_type
else:
raise TypeError("id_type must be one of: %s" % ', '.join(id_types))
self._id = str(_id)
basename = os.path.basename(resource)
content_type = mimetypes.guess_type(basename)[0]
self.resource = {"name": (basename, open(resource, "rb"),
content_type)}
self._set_upload_url()
def _set_upload_url(self):
"""Generate the full URL for a POST."""
# pylint: disable=protected-access
self._upload_url = "/".join([
| self.jss._url, self._endpoint_path, self.resource_type,
self.id_type, str(self._id)])
# pylint: enable=protected-access
def save(self):
"""POST the object to the JSS."""
try:
response = self.jss.session.post(
self._upload_url, files=self.resource)
except PostError as error:
if error.status_code == 409:
raise PostError(error)
| else:
raise MethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print("POST: Success")
print(response.content)
elif response.status_code >= 400:
error_handler(PostError, response)
class LogFlush(object):
_endpoint_path = "logflush"
def __init__(self, jss):
"""Initialize a new LogFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def log_flush_with_xml(self, data):
"""Flush logs for devices with a supplied xml string.
From the Casper API docs:
log, log_id, interval, and devices specified in an XML file.
Sample file:
<logflush>
<log>policy</log>
<log_id>2</log_id>
<interval>THREE MONTHS</interval>
<computers>
<computer>
<id>1</id>
</computer>
<computer>
<id>2</id>
</computer>
</computers>
</logflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Elements:
logflush (root)
log (Unknown; "policy" is the only one listed in
docs).
log_id: Log ID value.
interval: Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month",
"Year". e.g. ("Three+Months")
Please note: The documentation for this
specifies the singular form (e.g. "Month"),
and plural ("Months") at different times, and
further the construction is listed as
"THREE MONTHS" elsewhere. Limited testing
indicates that pluralization does not matter,
nor does capitalization. The "+" seems optional
as well.
Please test!
Device Arrays:
Again, acceptable values are not listed in the
docs, aside from the example ("computers").
Presumably "mobiledevices", and possibly
"computergroups" and "mobiledevicegroups" work.
Raises:
DeleteError if provid |
from landsc | ape.client.tests.helpers import LandscapeTest
from landscape.client.patch import UpgradeManager
from landscape.client.upgraders import monitor
class TestMonitorUpgraders(LandscapeTest): |
def test_monitor_upgrade_manager(self):
self.assertEqual(type(monitor.upgrade_manager), UpgradeManager)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
##############################################################################
# Configuration parameters for Google App Engine
##############################################################################
KEEP_CACHED = False # request a dummy url every 10secs to force caching app
LOG_STATS = False # web2py level log statistics
APPSTATS = True # GAE level usage statistics and profiling
DEBUG = False # debug mode
AUTO_RETRY = True # force gae to retry commit on failure
#
# Read more about APPSTATS here
# http://googleappengine.blogspot.com/2010/03/easy-performance-profiling-with.html
# can be accessed from:
# http://localhos | t:8080/_ah/stats
##############################################################################
# All tricks in this file developed | by Robin Bhattacharyya
##############################################################################
import time
import os
import sys
import logging
import cPickle
import pickle
import wsgiref.handlers
import datetime
path = os.path.dirname(os.path.abspath(__file__))
sys.path = [path]+[p for p in sys.path if not p==path]
sys.modules['cPickle'] = sys.modules['pickle']
from gluon.settings import global_settings
from google.appengine.api.labs import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
global_settings.web2py_runtime_gae = True
global_settings.db_sessions = True
if os.environ.get('SERVER_SOFTWARE', '').startswith('Devel'):
(global_settings.web2py_runtime, DEBUG) = \
('gae:development', True)
else:
(global_settings.web2py_runtime, DEBUG) = \
('gae:production', False)
import gluon.main
def log_stats(fun):
"""Function that will act as a decorator to make logging"""
def newfun(env, res):
"""Log the execution time of the passed function"""
timer = lambda t: (t.time(), t.clock())
(t0, c0) = timer(time)
executed_function = fun(env, res)
(t1, c1) = timer(time)
log_info = """**** Request: %.2fms/%.2fms (real time/cpu time)"""
log_info = log_info % ((t1 - t0) * 1000, (c1 - c0) * 1000)
logging.info(log_info)
return executed_function
return newfun
logging.basicConfig(level=logging.INFO)
def wsgiapp(env, res):
"""Return the wsgiapp"""
if env['PATH_INFO'] == '/_ah/queue/default':
if KEEP_CACHED:
delta = datetime.timedelta(seconds=10)
taskqueue.add(eta=datetime.datetime.now() + delta)
res('200 OK',[('Content-Type','text/plain')])
return ['']
env['PATH_INFO'] = env['PATH_INFO'].encode('utf8')
return gluon.main.wsgibase(env, res)
if LOG_STATS or DEBUG:
wsgiapp = log_stats(wsgiapp)
if AUTO_RETRY:
from gluon.contrib.gae_retry import autoretry_datastore_timeouts
autoretry_datastore_timeouts()
def main():
"""Run the wsgi app"""
if APPSTATS:
run_wsgi_app(wsgiapp)
else:
wsgiref.handlers.CGIHandler().run(wsgiapp)
if __name__ == '__main__':
main()
|
from typing import (Tuple,
List)
import matplotlib
# More info at
# http://matplotlib.org/faq/usage_faq.html#what-is-a-backend for details
# TODO: use this: https://stackoverflow.com/a/37605654/7851470
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.axes import Axes
import numpy as np
import pandas as pd
from .utils import to_cartesian_from_equatorial
# Kinematic properties of the thin disk taken from the paper of
# N.Rowell and N.C.Hambly (mean motions are relative to the Sun):
# "White dwarfs in the SuperCOSMOS Sky Survey: the thin disc,
# thick disc and spheroid luminosity functions"
# Mon. Not. R. Astron. Soc. 417, 93–113 (2011)
# doi:10.1111/j.1365-2966.2011.18976.x
AVERAGE_POPULATION_VELOCITY_U = -8.62
AVERAGE_POPULATION_VELOCITY_V = -20.04
AVERAGE_POPULATION_VELOCITY_W = -7.1
STD_POPULATION_U = 32.4
STD_POPULATION_V = 23
STD_POPULATION_W = 18.1
def plot(stars: pd.DataFrame,
*,
filename: str = 'velocity_clouds.ps',
figure_size: Tuple[float, float] = (8, 12),
spacing: float = 0.25,
u_label: str = '$U(km/s)$',
v_label: str = '$V(km/s)$',
w_label: str = '$W(km/s)$',
u_limits: Tuple[float, float] = (-150, 150),
v_limits: Tuple[float, float] = (-150, 150),
w_limits: Tuple[float, float] = (-150, 150)) -> None:
figure, (uv_subplot,
uw_subplot,
vw_subplot) = plt.subplots(nrows=3,
figsize=figure_size)
draw_subplot(subplot=uv_subplot,
xlabel=u_label,
ylabel=v_label,
xlim=u_limits,
ylim=v_limits,
x=stars['u_velocity'],
y=stars['v_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_V,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_V)
draw_subplot(subplot=uw_subplot,
xlabel=u_label,
ylabel=w_label,
xlim=u_limits,
ylim=w_limits,
x=stars['u_velocity'],
y=stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_W)
draw_subplot(subplot=vw_subplot,
xlabel=v_label,
ylabel=w_label,
xlim=v_limits,
ylim=w_limits,
x=stars['v_velocity'],
y=stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_V,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_V,
y_std=STD_POPULATION_W)
figure.subplots_adjust(hspace=spacing)
plt.savefig(filename)
def plot_lepine_case(stars: pd.DataFrame,
*,
filename: str = 'velocity_clouds.ps',
figure_size: Tuple[float, float] = (8, 12),
spacing: float = 0.25,
u_label: str = '$U(km/s)$',
v_label: str = '$V(km/s)$',
w_label: str = '$W(km/s)$',
u_limits: Tuple[float, float] = (-150, 150),
v_limits: Tuple[float, float] = (-150, 150),
w_limits: Tuple[float, float] = (-150, 150)) -> None:
x_coordinates, y_coordinates, z_coordinates = to_cartesian_from_equatorial(
stars)
highest_coordinates = np.maximum.reduce([np.abs(x_coordinates),
np.abs(y_coordinates),
np.abs(z_coordinates)])
uv_cloud_stars = stars[(highest_coordinates == z_coordinates)]
uw_cloud_stars = stars[(highest_coordinates == y_coordinates)]
vw_cloud_stars = stars[(highest_coordinates == x_coordinates)]
figure, (uv_subplot,
uw_subplot,
vw_subplot) = plt.subplots(nrows=3,
figsize=figure_size)
draw_subplot(subplot=uv_subplot,
xlabel=u_label,
ylabel=v_label,
xlim=u_limits,
ylim=v_limits,
x=uv_cloud_stars['u_velocity'],
y=uv_cloud_stars['v_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_V,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_V)
draw_subplot(subplot=uw_subplot,
xlabel=u_label,
ylabel=w_label,
xlim=u_limits,
ylim=w_limits,
x=uw_cloud_stars['u_velocity' | ],
y=uw_cloud_stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_W)
draw_subplot(subplot=vw_subplot,
xlabel=v_label,
ylabel=w_label,
xlim=v_limits,
ylim=w_limits,
x=vw_cloud_stars['v_velocity'],
y=vw_clou | d_stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_V,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_V,
y_std=STD_POPULATION_W)
figure.subplots_adjust(hspace=spacing)
plt.savefig(filename)
def draw_subplot(*,
subplot: Axes,
xlabel: str,
ylabel: str,
xlim: Tuple[float, float],
ylim: Tuple[float, float],
x: List[float],
y: List[float],
cloud_color: str = 'k',
point_size: float = 0.5,
x_avg: float,
y_avg: float,
x_std: float,
y_std: float,
ratio: float = 10 / 13) -> None:
subplot.set(xlabel=xlabel,
ylabel=ylabel,
xlim=xlim,
ylim=ylim)
subplot.scatter(x=x,
y=y,
color=cloud_color,
s=point_size)
plot_ellipses(subplot=subplot,
x_avg=x_avg,
y_avg=y_avg,
x_std=x_std,
y_std=y_std)
subplot.minorticks_on()
subplot.xaxis.set_ticks_position('both')
subplot.yaxis.set_ticks_position('both')
subplot.set_aspect(ratio / subplot.get_data_ratio())
def plot_ellipses(subplot: Axes,
x_avg: float,
y_avg: float,
x_std: float,
y_std: float,
ellipse_color: str = 'b') -> None:
std_ellipse = Ellipse(xy=(x_avg, y_avg),
width=x_std * 2,
height=y_std * 2,
fill=False,
edgecolor=ellipse_color,
linestyle='dashed')
double_std_ellipse = Ellipse(xy=(x_avg, y_avg),
width=x_std * 4,
height=y_std * 4,
fill=False,
edgecolor=ellipse_color)
subplot.add_artist(std_ellipse)
subplot.add_artist(double_std_ellipse)
|
#!/usr/bin/python
from typing import List, Optional
"""
16. 3Sum Closest
https://leetcode.com/problems/3sum-closest/
"""
def bsearch(nums, left, right, res, i, j, target):
while left <= right:
midd | le = (left + right) // 2
candidate = nums[i] + nums[j] + nums[middle]
if res is No | ne or abs(candidate - target) < abs(res - target):
res = candidate
if candidate == target:
return res
elif candidate > target:
right = middle - 1
else:
left = middle + 1
return res
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> Optional[int]:
res = None
nums = sorted(nums)
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
res = bsearch(nums, j + 1, len(nums) - 1, res, i, j, target)
return res
def main():
sol = Solution()
print(sol.threeSumClosest([-111, -111, 3, 6, 7, 16, 17, 18, 19], 13))
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
"""Generate test data for IDTxl network comparison unit and system tests.
Generate test data for IDTxl network comparison unit and system tests. Simulate
discrete and continous data from three correlated Gaussian data sets. Perform
network inference using bivariate/multivariate mutual information (MI)/transfer
entropy (TE) analysis. Results are saved used for unit and system testing of
network comparison (systemtest_network_comparison.py).
A coupling is simulated as a lagged, linear correlation between three Gaussian
variables and looks like this:
1 -> 2 -> 3 with a delay of 1 sample for each coupling
"""
import pickle
import numpy as np
from idtxl.multivariate_te import MultivariateTE
from idtxl.bivariate_te import BivariateTE
from idtxl.multivariate_mi import MultivariateMI
from idtxl.bivariate_mi import BivariateMI
from idtxl.estimators_jidt import JidtDiscreteCMI
from idtxl.data import Data
# path = os.path.join(os.path.dirname(__file__) + '/data/')
path = 'data/'
def analyse_mute_te_data():
# Generate example data: the following was ran once to generate example
# data, which is now in the data sub-folder of the test-folder.
data = Data()
data.generate_mute_data(100, 5)
# analysis settings
settings = {
'cmi_estimator': 'JidtKraskovCMI',
'n_perm_max_stat': 50,
'n_perm_min_stat': 50,
'n_perm_omnibus': 200,
'n_perm_max_seq': 50,
'max_lag_target': 5,
'max_lag_sources': 5,
'min_lag_sources': 1,
'permute_in_time': True
}
# network inference for individual data sets
nw_0 = MultivariateTE()
res_0 = nw_0.analyse_network(
settings, data, targets=[0, 1], sources='all')
pickle.dump(res_0, open(path + 'mute_results_0.p', 'wb'))
res_1 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_1, open(path + 'mute_results_1.p', 'wb'))
res_2 = nw_0.analyse_network(
settings, data, targets=[0, 2], sources='all')
pickle.dump(res_2, open(path + 'mute_results_2.p', 'wb'))
res_3 = nw_0.analyse_network(
settings, data, targets=[0, 1, 2], sources='all')
pickle.dump(res_3, open(path + 'mute_results_3.p', 'wb'))
res_4 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_4, open(path + 'mute_results_4.p', 'wb'))
res_5 = nw_0.analyse_network(settings, data)
pickle.dump(res_5, open(path + 'mute_results_full.p', 'wb'))
def generate_discrete_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=True)
data = Data(d, dim_order='psr', normalise=False)
return data
def generate_continuous_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=False)
data = Data(d, dim_order='psr', normalise=True)
return data
def generate_gauss_data(n_replications=1, discrete=False):
settings = {'discretise_method': 'equal',
'n_discrete_bins': 5}
est = JidtDiscreteCMI(settings)
covariance_1 = 0.4
covariance_2 = 0.3
n = 10000
delay = 1
if discrete:
d = np.zeros((3, n - 2*delay, n_replications), dtype=int)
else:
d = np.zeros((3, n - 2*delay, n_replications))
for r in range(n_replications):
proc_1 = np.random.normal(0, 1, size=n)
proc_2 = (covariance_1 * proc_1 + (1 - covariance_1) *
np.random.normal(0, 1, size=n))
proc_3 = (covariance_2 * proc_2 + (1 - covariance_2) *
np.random.normal(0, 1, size=n))
proc_1 = proc_1[(2*delay):]
proc_2 = proc_2[delay:-delay]
proc_3 = proc_3[:-(2*delay)]
if discrete: # discretise data
proc_1_dis, proc_2_dis = est._discretise_vars(
var1=proc_1, var2=proc_2)
proc_1_dis, proc_3_dis = est._discretise_vars(
var1=proc_1, var2=proc_3)
d[0, :, r] = proc_1_dis
d[1, :, r] = proc_2_dis
d[2, :, r] = proc_3_dis
else:
d[0, :, r] = proc_1
d[1, :, r] = proc_2
d[2, :, r] = proc_3
return d
def analyse_discrete_data():
"""Run network inference on discrete data."""
data = generate_discrete_data()
settings = {
'cmi_estimator': 'JidtDiscreteCMI',
'discretise_method': 'none',
'n_discrete_bins': 5, # alphabet size of the variables analysed
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = MultivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
def analyse_continuous_data():
"""Run network inference on continuous data."""
data = generate_continuous_data()
settings = {
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle. | dump(res, open('{0}continuous_results_mte_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
| res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bte_{1}.p'.format(
path, estimator), 'wb'))
nw = MultivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mmi_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bmi_{1}.p'.format(
path, estimator), 'wb'))
def assert_results():
for algo in ['mmi', 'mte', 'bmi', 'bte']:
# Test continuous data:
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
res = pickle.load(open(
'data/continuous_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
# Test discrete data:
estimator = 'JidtDiscreteCMI'
res = pickle.load(open(
'data/discrete_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
def _print_result(res):
res.adjacency_matrix.print_matrix()
tp = 0
fp = 0
if res.adjacency_matrix._edge_matrix[0, 1] == True: tp += 1
if res.adjacency_matrix._edge_matrix[1, 2] == True: tp += 1
if res.adjacency_matrix._edge_matrix[0, 2] == True: fp += 1
fn = 2 - tp
print('TP: {0}, FP: {1}, FN: {2}'.format(tp, fp, fn))
if __name__ == '__main__':
analyse_discrete_data()
analyse_mute_te_data()
analyse_continuous_data()
assert_res |
import sublime
| from . import SblmCmmnFnctns
class Spinner:
SYMBOLS_ROW = u'←↑→↓'
SYMBOLS_BOX = u'⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
def __init__(self, symbols, view, startStr, endStr):
self.symbols = symbols
self.length = len(symbols)
self.position = 0
self.stopFlag = False
self.view = view
self.startStr = startStr
self.endStr = endStr
def __next__(self):
self.position = self.position + 1
return self. | startStr + self.symbols[self.position % self.length] + self.endStr
def start(self):
if not self.stopFlag:
self.view.set_status(SblmCmmnFnctns.SUBLIME_STATUS_SPINNER, self.__next__())
sublime.set_timeout(lambda: self.start(), 300)
def stop(self):
self.view.erase_status(SblmCmmnFnctns.SUBLIME_STATUS_SPINNER)
self.stopFlag = True
|
right=u"0cm", text_indent=u"0cm",
**{'style:auto-text-indent': u"false"})
doc.insert_style(definition_style, automatic=False)
styles['definition'] = definition_style
return definition_style
def convert_definition_list(node, context):
"""Convert a list of term/definition pairs to styled paragraphs.
The "Definition List Term" style is looked for term paragraphs, and the
"Definition List Definition" style is looked for definition paragraphs.
"""
styles = context['styles']
term_style = _get_term_style(context).get_style_name()
definition_style = _get_definition_style(context).get_style_name()
for item in node:
if item.tagname != "definition_list_item":
printwarn('node "%s" not supported in definition_list' % (
item.tagname))
continue
for child in item:
tagname = child.tagname
if tagname == "term":
paragraph = odf_create_paragraph(text=child.astext(),
style=term_style)
context["top"].append(paragraph)
elif tagname == "definition":
# Push a style on the stack for next paragraphs to use
styles['paragraph'] = definition_style
for subchildren in child:
convert_node(subchildren, context)
# Pop the paragraph style
del styles['paragraph']
else:
printwarn('node "%s" not supported in definition_list_item' %
tagname)
def convert_block_quote(node, context):
# TODO Add the style
for child in node:
convert_node(child, context)
def _get_caption_style(context):
styles = context['styles']
caption_style = styles.get('caption')
if caption_style is not None:
return caption_style
caption_style = odf_create_style('graphic', parent=u"Frame",
**{'style:wrap': u"none", 'style:vertical-pos': u"top",
'style:vertical-rel': u"paragraph-content",
'style:horizontal-pos': u"center",
'style:horizontal-rel': u"paragraph-content",
'fo:padding': u"0.25cm", 'fo:border': u"0cm solid #000000"})
context['doc'].insert_style(caption_style, automatic=True)
styles['caption'] = caption_style
return caption_style
def _get_image_style(context):
styles = context['styles']
image_style = styles.get('image')
if image_style is not None:
return image_style
image_style = odf_create_style('graphic', parent="Graphics",
**{'style:horizontal-pos': u"center",
'style:horizontal-rel': u"paragraph"})
context['doc'].insert_style(image_style, automatic=True)
styles['image'] = image_style
return image_style
def _add_image(image, caption, context, width=None, height=None):
# Load the image to find its size
encoding = stdout.encoding if stdout.encoding is not None else "utf-8"
try:
image_file = open(image.encode(encoding), 'rb')
image_object = Image.open(image_file)
except (UnicodeEncodeError, IOError, OverflowError), e:
printwarn('unable to insert the image "%s": %s' % (image, e))
return
size = image_object.size
# Convert pixels to inches
if width:
try:
width = int(width.replace('px', ''))
except ValueError:
raise NotImplementedError, 'only pixel units supported'
if height:
try:
height = int(height)
except ValueError:
raise NotImplementedError, 'only pixel units supported'
else:
height = int(width / (float(size[0]) / float(size[1])))
size = (width, height)
elif height:
try:
height = int(height.replace('px', ''))
except ValueError:
raise NotImplementedError, 'only pixel units supported'
width = int(height * (float(size[0]) / float(size[1])))
size = (width, height)
size = ("%sin" % (float(size[0]) / DPI), "%sin" % (float(size[1]) / DPI))
# Add the image
local_uri = context["doc"].add_file(image)
# Frame style for the caption frame
caption_style = _get_caption_style(context).get_style_name()
# Frame style for the image frame
image_style = _get_image_style(context).get_style_name()
# In text application, image must be inserted in a paragraph
if context["top"].get_tag() == "office:text":
container = odf_create_paragraph()
context["top"].append(container)
else:
container = context["top"]
if caption:
paragraph = odf_create_paragraph()
image_frame = odf_create_image_frame(local_uri, size=size,
style=image_style)
paragraph.append(image_frame)
paragraph.append(caption)
# A new frame, we fix only the width
text_frame = odf_create_text_frame(paragraph, size=(size[0], None),
style=caption_style)
container.append(text_frame)
else:
image_frame = odf_create_image_frame(local_uri, size=size,
style=image_style)
container.append(image_frame)
def convert_image(node, context):
image = node.get("uri")
width = node.get('width')
height = node.get('height')
_add_image(image, None, context, width=width, height=height)
def convert_figure(node, context):
image = None
caption = None
width = None
height = None
for child in node:
tagname = child.tagname
if tagname == "image":
if image is not None:
printwarn("unexpected duplicate image in a figure")
continue
image = child.get("uri")
width = child.get('width')
height = child.get('height')
elif tagname == "caption":
if caption is not None:
printwarn("unexpected duplicate caption in a figure")
continue
caption = child.astext()
_add_image(image, caption, context, width=width, height=height)
def _convert_table_rows(container, node, context, cell_style=None):
for row in node:
if row.tagname != "row":
printwarn('node "%s" not supported in thead/tbody' % row.tagname)
continue
odf_row = odf_create_row()
container.append(odf_row)
for entry in row:
if entry.tagname != "entry":
printwarn('node "%s" not supported in row' % entry.tagname)
continue
# C | reate a new odf_cell
odf_ce | ll = odf_create_cell(cell_type="string", style=cell_style)
odf_row.append(odf_cell)
# XXX We don't add table:covered-table-cell !
# It's bad but OO can nevertheless load the file
morecols = entry.get("morecols")
if morecols is not None:
morecols = int(morecols) + 1
odf_cell.set_attribute('table:number-columns-spanned',
str(morecols))
morerows = entry.get("morerows")
if morerows is not None:
morerows = int(morerows) + 1
odf_cell.set_attribute('table:number-rows-spanned',
str(morerows))
# Save the current top
old_top = context["top"]
# Convert
context["top"] = odf_cell
for child in entry:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def _get_cell_style(context):
styles = context['styles']
cell_style = styles.get('cell')
if cell_style is not None:
return cell_style
# Give borders to cells
cell_style = odf_create_style('table-cell', u"odf_table.A1",
padding=u"0.049cm", border=u"0.002cm solid #000000")
context['doc'].insert_style(cell_style, automatic=True)
styles['cell'] = cell_style
return cell_style
def convert_table(node, context):
cell_style = _get_cell_style( |
,
"MAF",
"MDG",
"MHL",
"MKD",
"MLI",
"MMR",
"MNG",
"MAC",
"MNP",
"MTQ",
"MRT",
"MSR",
"MLT",
"MUS",
"MDV",
"MWI",
"MEX",
"MYS",
"MOZ",
"NAM",
"NCL",
"NER",
"NFK",
"NGA",
"NIC",
"NLD",
"NOR",
"NPL",
"NRU",
"NIU",
"NZL",
"OMN",
"PAN",
"PER",
"PYF",
"PNG",
"PHL",
"PAK",
"POL",
"SPM",
"PCN",
"PRI",
"PSE",
"PRT",
"PLW",
"PRY",
"QAT",
"REU",
"ROU",
"SRB",
"RUS",
"RWA",
"SAU",
"SLB",
"SYC",
"SDN",
"SWE",
"SGP",
"SHN",
"SVN",
"SJM",
"SVK",
"SLE",
"SMR",
"SEN",
"SOM",
"SUR",
"SSD",
"STP",
"SLV",
"SYR",
"SWZ",
"TCA",
"TCD",
"ATF",
"TGO",
"THA",
"TJK",
"TKL",
"TLS",
"TKM",
"TUN",
"TON",
"TUR",
"TTO",
"TUV",
"TWN",
"TZA",
"UKR",
"UGA",
"UMI",
"USA",
"URY",
"UZB",
"VAT",
"VCT",
"VEN",
"VGB",
"VIR",
"VNM",
"VUT",
"WLF",
"WSM",
"YEM",
"MYT",
"ZAF",
"ZMB",
"ZWE",
],
"fifa": [
"AFG",
"AIA",
"ALB",
"ALG",
"AND",
"ANG",
"ARG",
"ARM",
"ARU",
"ARU",
"ASA",
"ATG",
"AUT",
"AZE",
"BAH",
"BAN",
"BDI",
"BEL",
"BEN",
"BER",
"BFA",
"BHR",
"BHU",
"BIH",
"BLR",
"BLZ",
"BOE",
"BOL",
"BOT",
"BRA",
"BRB",
"BRU",
"BUL",
"CAM",
"CAN",
"CAY",
"CGO",
"CHA",
"CHI",
"CHN",
"CIV",
"CMR",
"COD",
"COK",
"COL",
"COM",
"CPV",
"CRC",
"CRO",
"CTA",
"CUB",
"CUW",
"CYP",
"CZE",
"DEN",
"DJI",
"DMA",
"DOM",
"ECU",
"EGY",
"ENG",
"EQG",
"ERI",
"ESP",
"EST",
"ETH",
"FIJ",
"FIN",
"FRA",
"FRO",
"GAB",
"GAM",
"GEO",
"GER",
"GHA",
"GIB",
"GNB",
"GPE",
"GRE",
"GRN",
"GUA",
"GUI",
"GUM",
"GUY",
"GYF",
"HAI",
"HKG",
"HON",
"HUN",
"IDN",
"IND",
"IRL",
"IRN",
"IRQ",
"ISL",
"ISR",
"ITA",
"JAM",
"JOR",
"JPN",
"KAZ",
"KEN",
"KGZ",
"KIR",
"KOR",
"KSA",
"KUW",
"LAO",
"LBR",
"LBY",
"LCA",
"LES",
"LIB",
"LIE",
"LTU",
"LUX",
"LVA",
"MAC",
"MAD",
"MAR",
"MAS",
"MDA",
"MDV",
"MEX",
"MKD",
"MLI",
"MLT",
"MNE",
"MNG",
"MOZ",
"MRI",
"MSR",
"MTN",
"MTQ",
"MWI",
"MYA",
"NAM",
"NCA",
"NCL",
"NED",
"NEP",
"NGA",
"NIG",
"NIR",
"NIU",
"NMI",
"NOR",
"NZL",
"OMA",
"PAK",
"PAN",
"PAR",
"PER",
"PHI",
"PLE",
"PNG",
"POL",
"POR",
"PRK",
"PUR",
"QAT",
"REU",
"ROU",
"RSA",
"RUS",
"RWA",
"SAM",
"SCO",
"SDN",
"SEN",
"SEY",
"SIN",
"SKN",
"SLE",
"SLV",
"SMR",
"SMT",
"SOL",
"SOM",
"SRB",
"SRI",
"SSD",
"STP",
"SUI",
"SUR",
"SVK",
"SVN",
"SWE",
"SWZ",
"SXM",
"SYR",
"TAH",
"TAN",
"TCA",
"TGA",
"THA",
"TJK",
"TKM",
"TLS",
"TOG",
"TPE",
"TRI",
"TUN",
"TUR",
"TUV",
"UAE",
"UGA",
"UKR",
"URU",
"USA",
"UZB",
"VAN",
"VEN",
"VGB",
"VIE",
"VIN",
"VIR",
"WAL",
"YEM",
"ZAM",
"ZAN",
"ZIM",
],
"ioc": [
"AFG",
"ALB",
"ALG",
"AND",
"ANG",
"ANT",
"ARG",
"ARM",
"ARU",
"ASA",
"AUS",
"AUT",
"AZE",
"BAH",
"BAN",
"BAR",
"BDI",
"BEL",
"BEN",
"BER",
"BHU",
"BIH",
"BIZ",
"BLR",
"BOL",
"BOT",
"BRA",
"BRN",
"BRU",
"BUL",
"BUR",
"CAF",
"CAM",
"CAN",
"CAY",
"CGO",
"CHA",
"CHI",
"CHN",
"CIV",
"CMR",
"COD",
"COK",
"COL",
"COM",
"CPV",
"CRC",
"CRO",
"CUB",
"CYP",
"CZE",
"DEN",
"DJI",
"DMA",
"DOM",
"ECU",
"EGY",
"ERI",
"ESA",
"ESP",
"EST",
"ETH",
"FIJ",
"FIN",
"FRA",
"FSM",
"GAB",
"GAM",
"GBR",
"GBS",
"GEO",
"GEQ",
"GER",
"GHA",
"GRE",
"GRN",
"GUA",
"GUI",
"GUM",
"GUY",
"HAI",
"HKG",
"HON",
"HUN",
"INA",
"IND",
"IRI",
"IRL",
"IRQ",
"ISL",
"ISR",
"ISV",
"ITA",
"IVB",
"JAM",
"JOR",
"JPN",
"KAZ",
"KEN",
"KGZ",
"KIR",
"KOR",
"KSA",
" | KUW",
"LAO",
"LAT",
"LBA",
"LBR",
"LCA",
"LES",
"LIB",
"LIE",
"LTU",
"LUX",
"MAD",
"MAR",
"MAS",
"MAW",
"MDA",
"MDV",
"MEX",
"MGL",
| "MHL",
"MKD",
"MLI",
"MLT",
"MNE",
"MON",
"MOZ",
"MRI",
"MTN",
"MYA",
"NAM",
"NCA",
"NED",
"NEP",
"NGR",
"NIG",
"NOR",
"NRU",
"NZL",
"OMA",
"PAK",
"PAN",
"PAR",
"PER",
"PHI",
"PLE",
"PLW",
"PNG",
"POL",
"POR",
"PRK",
"PUR",
"QAT",
"ROU",
"RSA",
"RUS",
"RWA",
"SAM",
"SEN",
"SEY",
"SIN",
"SKN",
"SLE",
"SLO",
"SMR",
"SOL",
"SOM",
"SRB",
"SRI",
"STP",
"SUD",
"SUI",
"SUR",
"SVK",
"SWE",
"SWZ",
"SYR",
"TAN",
"TGA",
"THA",
"TJK",
"TKM",
"TLS",
"TOG",
"TPE",
"TTO",
"TUN",
"TUR",
"TUV",
"UAE",
"UGA",
"UKR",
"URU",
"USA",
"UZB",
"VAN",
"VEN",
"VIE",
"VIN",
"YEM",
"ZAM",
"ZIM",
],
"numeric": [
"020",
"784",
"004",
"028",
"660",
"008",
"051" |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import argparse
import asyncio
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
python3 select_get_poetry3.py port1 port2 port3 ...
"""
parser = argparse.ArgumentParser(usage)
parser.add_argument('port', nargs='+')
args = vars(parser.parse_args())
addresses = args['port']
if not addresses:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
class PoetryClientProtocol(asyncio.Protocol):
def __init__(self, infile):
self.infile = infile
def connection_made(self, transport):
print(transport.get_extra_info('peername'))
self.transport = transport
self.transport.write(b'poems')
def data_received(self, data):
if data:
print(data)
print('writing to {}'.format | (self.infile.name))
self.infile.write(data)
self.transp | ort.write(b'poems')
def eof_received(self):
print('end of writing')
self.infile.close()
def main():
addresses = parse_args()
eventloop = asyncio.get_event_loop()
for address in addresses:
host, port = address
filename = str(port) + '.txt'
infile = open(filename, 'wb')
coro = eventloop.create_connection(
lambda: PoetryClientProtocol(infile), host, port)
t, p = eventloop.run_until_complete(coro)
print(t, p)
try:
eventloop.run_forever()
finally:
eventloop.close()
if __name__ == '__main__':
main()
|
fr | om .design_inputs import *
| |
#!/usr/bin/env python
# A bag contains one red disc and one blue disc. In a game of chance a player
# takes a disc at rando | m and its colour is noted. After each turn the disc is
# returned to the bag, an extra red disc is added, and another disc is
# taken at random.
# The player... wins if they have taken more blue discs than red discs a
# the end of the game.
# ------------------------------------------------------------------------
# P_n = p | rob(disc n is blue) = 1/(n + 1)
# For n discs, let C_1-C_2-...-C_n be the colors drawn, let i_1,...,i_k be the
# indices j such that disk i_j was drawn red. The probability of this event
# is (i_1 * ... * i_k)/factorial(n + 1)
# We can enumeratively define n_{j,k} to be the aggregate numerator
# of all possible draws with j blues drawn out of k draws
#
# The initial conditions are n_{0,1} = 1, n_{1,1} = 1
# The recurrence is defined by the fact that the n_{j + 1,k + 1} is
# can only have the (k + 1)'st element be blue or red, hence
# n_{j + 1,k + 1} = numer(blue)*n_{j,k} + numer(red)*n_{j + 1,k}
# = n_{j,k} + (k + 1)*n_{j + 1,k}
# except for the cases j = k, where n_{j,k} = numer(all blue) = 1
# except for the cases j = 0, where n_{0,k} = k!
from math import factorial
from python.decorators import euler_timer
def iterative_numerator(n):
numerators = {}
for k in range(1, n + 1):
for j in range(k + 1):
if j == 0:
numerators[(j, k)] = factorial(k)
elif j == k:
numerators[(j, k)] = 1
else:
numerators[(j, k)] = (numerators[(j - 1, k - 1)] +
k * numerators[(j, k - 1)])
min_blue = (n / 2) + 1
count = 0
for blue in range(min_blue, n + 1):
count += numerators[(blue, n)]
return count
def max_payout(n):
# Integer division precludes floor operation
return factorial(n + 1) / iterative_numerator(n)
def main(verbose=False):
return max_payout(15)
if __name__ == '__main__':
print euler_timer(121)(main)(verbose=True)
|
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Nicolas Bornand
#
# The licence is in the file __manifest__.py
#
##############################################################################
from mock import patch
from .onramp_base_test import TestOnramp
mock_oauth = (
"odoo.addons.message_center_compassion.models.ir_http.IrHTTP._oauth_validation"
)
class TestOnRampController(TestOnramp):
def setUp(self):
super().setUp()
def test_no_token(self):
""" Check we have an access denied if token is not provided
"""
del self.opener.headers["Authorization"]
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
error = response.json()
self.assertEqual(error["ErrorMethod"], "ValidateToken")
def test_bad_token(self):
""" Check we have an access denied if token is not valid
"""
self.opener.headers["Authorization"] = "Bearer notrealtoken"
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
@patch(mock_oauth)
def test_wrong_client_id(self, oauth_patch):
""" Check that if we get a token with unrecognized client_id,
access is denied. """
oauth_patch.return_value = "wrong_user"
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
@patch(mock_oauth)
def t | est_good_client_id(self, oauth_patch):
""" Check that if we connect with admin as client_id,
access is granted. """
oauth | _patch.return_value = "admin"
response = self._send_post({"nothing": "nothing"})
json_result = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json_result["Message"], "Unknown message type - not processed."
)
|
########################################################################
# #
# Anomalous Diffusion #
# | #
########################################### | #############################
import steps.interface
########################################################################
# Create Model
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from steps.saving import *
from steps.visual import *
import time
mdl = Model()
r = ReactionManager()
with mdl:
X = Species.Create()
vsys = VolumeSystem.Create()
with vsys:
dif_X = Diffusion.Create(X, 2e-09)
########################################################################
# Create Gemoetry
tetmesh = TetMesh.LoadAbaqus('2_20_0.7.inp', scale=1e-06, ebs=None, shadow_mesh="2_20_0.7_conf")
########################################################################
# Create Random number generator
rng = RNG('mt19937', 512, int(time.time()%4294967295))
########################################################################
# Initialize simulation
sim = Simulation('Tetexact', mdl, tetmesh, rng)
sim.injection.X.Count = 2000
########################################################################
# Visualization
rs = ResultSelector(sim)
# Create control
sc = SimControl(end_time = 1.0, upd_interval = 0.00001)
with sc:
with SimDisplay('Show Spine Species'):
# Static mesh element
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
# Dynamic element
ElementDisplay(rs.LIST('dend', 'shaft').X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with SimDisplay('Hide Spine Species'):
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
ElementDisplay(rs.shaft.X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with PlotDisplay('Plots'):
SpatialPlot(rs.TETS(tetmesh.shaft.tets).X.Count, axis=[0, 0, 1], nbins=100)
# Enter visualization loop
sc.run()
|
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import plugin.databaseConnect as database
from datetime import datetime
class sendMessageUI(QMainWindow):
def __init__(self, id = None, bulk = None, parent = None):
QMainWindow.__init__(self,None)
self.setMinimumSize(626,380)
self.setWin | dowTitle("Message")
self.parent = parent
self.id = id
self.bulk = bulk
self.UIinit()
def UIinit(self):
loader = QUiLoader()
form = loader.load("resources/UI/sendMessage.ui",None)
self.setCentralWidget(form)
#QPushButton
self.send_button = form.findChild(QPushButton,"sendButton")
self.close_button = form.findChild(QPushButton,"closeButton")
| #LineEdit
self.to_user = form.findChild(QLineEdit,"to")
self.message = form.findChild(QTextEdit,"message")
#Connect
self.send_button.clicked.connect(self.sendMes)
self.close_button.clicked.connect(self.closeWindow)
if(self.id != None):
self.to_user.setText(self.id)
def closeWindow(self):
self.close()
##Create message and send it to other user##
def sendMes(self):
db = database.databaseMessage()
toUser = self.to_user.text()
message = self.message.toPlainText()
time = datetime.now()
if(self.bulk == None):
data = self.parent.getCurrentUser()
fromUser = data.getID()
if(db.sendMessage(toUser, fromUser, message, time)):
db.disconnect()
self.parent.showOK("Message Sent", "The message has been sent to the user!")
self.closeWindow()
else:
self.parent.showERROR("UserID Not Found", "The UserID you entered does not exists.")
else:
data = self.parent.parent.getCurrentUser()
fromUser = data.getID()
val = 0
for id in self.bulk:
val = db.sendMessage(id, fromUser, message, time)
if (val):
db.disconnect()
self.parent.parentshowOK("All Message Sent to user.", "The message has been sent to all user!")
self.closeWindow()
else:
self.parent.parent.showERROR("ERROR!", "Some Messages are not delivered.")
|
break = j
while prev_break == len(cps) or prev_break != 0 and not cps[prev_break][1]:
prev_break -= 1
next_break = min(j + 1, len(cps))
while next_break != len(cps) and not cps[next_break][1]:
next_break += 1
break_tests += '''\
EXPECT_EQ(boos | t::text::prev_{4}{3}_break(cps.begin(), cps.begin() + {0}, cps.end()){5} - cps.begin(), {1});
EXPECT_EQ(boost::text::next_{4}{3}_break(cps.begin() + {1}, cps.end()){5} - cps.begin(), {2});
'''.format(j, prev_break, next_break, prop_, prop_prefix, call_suffix)
break_tests += ' }\n\n'
| cpp_file = open('{}_break_{:02}.cpp'.format(prop_, i), 'w')
cpp_file.write(break_test_form.format(prop_, break_tests, i))
def contains_surrogate(cps):
for cp in cps:
if int(cp[0], 16) == 0xD800:
return True
return False
def generate_iterator_tests(cps_and_breaks, prop_):
for i in range(len(cps_and_breaks)):
iterator_tests = ''
chunk = cps_and_breaks[i]
elem_index = -1
for elem in chunk:
elem_index += 1
(cps, line, comment) = elem
comment_fields = comment.split(' ')
break_cp_indices = []
for j in range(len(cps)):
if cps[j][1]: # if break
break_cp_indices.append(j)
graphemes_and_end = []
code_unit_graphemes_and_end = []
for j in range(len(break_cp_indices)):
last_cp = j == len(break_cp_indices) - 1
first = break_cp_indices[j]
last = last_cp and len(cps) or break_cp_indices[j + 1]
graphemes_and_end.append('''\
EXPECT_EQ(it.base(), cps + {0});
EXPECT_EQ((*it).begin(), cps + {0});
EXPECT_EQ((*it).end(), cps + {1});'''.format(first, last))
code_unit_grapheme = '''\
EXPECT_EQ(*it.base(), cps[{0}]);
EXPECT_EQ(*it->begin(), cps[{0}]);'''.format(first)
if not last_cp:
code_unit_grapheme += '''
EXPECT_EQ(*it->end(), cps[{0}]);'''.format(last)
code_unit_grapheme += '''
EXPECT_EQ(it.base().base(), cus + cp_indices[{0}]);
EXPECT_EQ(it->begin().base(), cus + cp_indices[{0}]);
EXPECT_EQ(it->end().base(), cus + cp_indices[{1}]);'''.format(first, last)
code_unit_graphemes_and_end.append(code_unit_grapheme)
graphemes_and_end.append('''\
EXPECT_EQ(it.base(), cps + {});
EXPECT_EQ((*it).begin(), (*it).end());'''.format(len(cps)))
code_unit_graphemes_and_end.append('''\
EXPECT_EQ(it.base().base(), cus + cp_indices[{}]);
EXPECT_EQ(it->begin(), (*it).end());'''.format(len(cps)))
# forward
iterator_tests += '''
TEST({3}, iterator_{5:02}_{6}_fwd)
{{
// {0}
// {1}
{{
uint32_t const cps[] = {{ {2} }};
boost::text::{3}_iterator<uint32_t const *> it(cps, cps, cps + {4});
'''.format(line, comment, '0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
iterator_tests += '\n\n ++it;\n\n'.join(graphemes_and_end)
iterator_tests += '\n }\n}\n'
# reverse
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_rev)
{{
{{
// reverse
uint32_t const cps[] = {{ {0} }};
boost::text::{1}_iterator<uint32_t const *> it(cps, cps + {2}, cps + {2});
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
iterator_tests += '\n\n --it;\n\n'.join(reversed(graphemes_and_end))
iterator_tests += '\n }\n}\n'
# forth and back
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_fab)
{{
{{
// forth and back
uint32_t const cps[] = {{ {0} }};
boost::text::{1}_iterator<uint32_t const *> it(cps, cps, cps + {2});
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
idx = 0
iterator_tests += graphemes_and_end[idx]
for j in range(len(graphemes_and_end)):
for k in range(j):
iterator_tests += '\n\n ++it;\n\n'
idx += 1
iterator_tests += graphemes_and_end[idx]
for k in range(j):
iterator_tests += '\n\n --it;\n\n'
idx -= 1
iterator_tests += graphemes_and_end[idx]
iterator_tests += '\n }\n}\n'
# back and forth
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_baf)
{{
{{
// back and forth
uint32_t const cps[] = {{ {0} }};
boost::text::{1}_iterator<uint32_t const *> it(cps, cps + {2}, cps + {2});
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
idx = len(graphemes_and_end) - 1
iterator_tests += graphemes_and_end[idx]
for j in range(len(graphemes_and_end)):
for k in range(j):
iterator_tests += '\n\n --it;\n\n'
idx -= 1
iterator_tests += graphemes_and_end[idx]
for k in range(j):
iterator_tests += '\n\n ++it;\n\n'
idx += 1
iterator_tests += graphemes_and_end[idx]
iterator_tests += '\n }\n}\n'
# from UTF8
if contains_surrogate(cps):
iterator_tests += \
'// Skipping from-utf8 test due to presence of surrogate code point.\n'
else:
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_utf8)
{{
{{
// from UTF8
uint32_t const cps[] = {{ {0} }};
char cus[1024] = {{ 0 }};
int cp_indices[1024] = {{ 0 }};
std::copy(
boost::text::utf_32_to_8_iterator<uint32_t const *>(cps, cps, cps + {2}),
boost::text::utf_32_to_8_iterator<uint32_t const *>(cps, cps + {2}, cps + {2}),
cus);
boost::text::null_sentinel sentinel;
int * index_it = cp_indices;
for (boost::text::utf_8_to_32_iterator<char const *, boost::text::null_sentinel> it(cus, cus, boost::text::null_sentinel{{}}); ; ++it) {{
*index_it++ = it.base() - cus;
if (it == sentinel)
break;
}}
using iter_t = boost::text::utf_8_to_32_iterator<char const *, boost::text::null_sentinel>;
boost::text::{1}_iterator<iter_t, boost::text::null_sentinel> it(
iter_t{{cus, cus, boost::text::null_sentinel{{}}}}, iter_t{{cus, cus, boost::text::null_sentinel{{}}}}, sentinel);
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
iterator_tests += '\n\n ++it;\n\n'.join(code_unit_graphemes_and_end)
iterator_tests += '\n }\n}\n'
cpp_file = open('{}_iterator_{:02}.cpp'.format(prop_, i), 'w')
cpp_file.write(grapheme_iterator_test_form.format(iterator_tests, i))
bidi_property_cps = {
'L': '0x0041',
'R': '0x05BE',
'EN': '0x0030',
'ES': '0x002B',
'ET': '0x0023',
'AN': '0x0660',
'CS': '0x002C',
'B': '0x2029',
'S': '0x0009',
'WS': '0x0020',
'ON': '0x0021',
'BN': '0x00AD',
'NSM': '0x0300',
'AL': '0x0608',
'LRO': '0x202D',
'RLO': '0x202E',
'LRE': '0x202A',
'RLE': '0x202B',
'PDF': '0x202C',
'LRI': '0x2066',
'RLI': '0x2067',
'FSI': '0x2068',
'PDI': '0x2069'
}
def generate_bidi_tests(filename, batch_size):
current_batch = []
test_data = []
lines = open(filename, 'r').readlines()
num_lines = 0
curr_levels = []
curr_reorder = []
line_number = 0
for line in lines:
line_number += 1
if num_lines == batch_size:
test_data.append(current_batch)
current_batch = []
num_lines = 0
line = line[:-1]
|
#!/usr/bin/env python
path="/var/lib/gpu/gpu_locked.txt"
import os,sys
import as | t
import socket
def getHost():
return socket.gethostname()
def getlocked():
hostname=getHost()
#print path
fp=open(path, "r")
info=fp.read()
#print info
| d=ast.literal_eval(info)
#print len(d)
print "%s,nvidia0,%d" % (hostname, (9999 - d['nvidia0']['available_count']))
print "%s,nvidia1,%d" % (hostname, (9999 - d['nvidia1']['available_count']))
print "%s,nvidia2,%d" % (hostname, (9999 - d['nvidia2']['available_count']))
print "%s,nvidia3,%d" % (hostname, (9999 - d['nvidia3']['available_count']))
fp.close()
if __name__ == "__main__":
getlocked()
|
#!/usr/bin/python
__author__ = 'anson'
import optparse
import re
import sys
from utils.utils_cmd import execute_sys_cmd
from lib_monitor.monitor_default_format import nagios_state_to_id
class messages_check():
def __init__(self, rex, config, type):
self.rex = rex
self.config = config
self.type = type
def run(self):
result, infos = execute_sys_cmd('/usr/local/nagios/libexec/check_logfiles -f ' + se | lf.config)
v_protocol = None
exit_state = 3
if len(infos) > 0:
state = infos[0].split()[0]
if state not i | n nagios_state_to_id.keys():
print infos
sys.exit(exit_state)
exit_state = nagios_state_to_id[state]
if nagios_state_to_id[state] > 0:
m_protocol = re.search(r'\(\d+ errors in ([^ ]+)\)', infos[0])
v_protocol = m_protocol.group(1) if m_protocol else None
else:
sys.exit(exit_state)
if v_protocol is not None:
rex_dict = []
with open(self.rex, buffering=2000000) as rex_all:
for rex_split in rex_all:
rex_dict.append(rex_split)
with open('/tmp/' + v_protocol, buffering=2000000) as file_to_check:
for part in file_to_check:
for rex_rule in rex_dict:
m_iface = re.search(rex_rule, part)
v_dev = m_iface.group(1) if m_iface else 'none'
print v_dev
sys.exit(exit_state)
def main():
"""
messages_monitor.py
unit test example
python messages_monitor.py
"""
parser = optparse.OptionParser(
usage="%prog [options] [--parameter]",
description="To monitor system log file."
)
parser.add_option("--config",
dest="config",
help="Config file for error extraction",
type="string",
default="/usr/local/nagios/libexec/check_log.log"
)
parser.add_option("--type",
dest="type",
help="Event type",
type="string",
default="disk"
)
parser.add_option("--rex",
dest="rex",
help="Regular Expression",
type="string",
default="/usr/local/nagios/libexec/rule.conf"
)
(options, args) = parser.parse_args()
check = messages_check(options.rex, options.config, options.type)
check.run()
if __name__ == '__main__':
main() |
#!/usr/bin/env python2
"""
COSMO TECHNICAL TESTSUITE
General purpose script to compare two files containing tables
Only lines with given table pattern are considered
"""
# built-in modules
import os, sys, string
# information
__author__ = "Xavier Lapillonne"
__maintainer__ = "xavier.lapillonne@meteoswiss.ch"
def cmp_table(file1,file2,colpattern,minval,threshold,verbose=1,maxcompline=-1):
# General purpose script to compare two files containing tables
# Only lines with given table column pattern. Column to be compared are marked with c
# column to discard with x
#init
ncomp=0
nerror=0
lerror=False
epsilon=1e-16 #used to avoid division by zero in case minval is zero
# check file existence
if not(os.path.exists(file1)):
print('File %s does not exist' %(file1))
return -1
elif not(os.path.exists(file2)):
print('File %s does not exist' %(file2))
print('File '+file2+' does not exist')
return -1
# convert input
colpattern=[x=='c' for x in list(colpattern)]
threshold=float(threshold)
minval=float(minval)
# open file
data1=open(file1).readlines()
data2=open(file2).readlines()
# get max record
nd1=len(data1)
nd2=len(data2)
# check that files are not empty
if nd1==0:
print('file %s is empty!' %(file1))
return -1
if nd2==0:
print('file %s is empty!' %(file2))
return -1
if nd1!=nd2 and verbose>1:
print('Warning: %s and %s have different size, comparing commun set only \n' %(file1,file2))
ncdata=min(nd1,nd2)
if (maxcompline>0):
ncdata=min(ncdata,maxcompline)
# Iterates through the lines
for il in range(ncdata):
l1=data1[il].split()
l2=data2[il].split()
l1match=matchColPattern(l1,colpattern)
l2match=matchColPattern(l2,colpattern)
# compare values if both lines are compatible
if l1match and l2match:
for ic in range(len(colpattern)):
if colpattern[ic]:
v1=float(l1[ic])
v2=float(l2[ic])
val_abs_max=max(abs(v1),abs(v2))
if val_abs_max > minval:
ncomp+=1
diff=abs(v1-v2)/(val_abs_max+epsilon)
if diff>threshold:
nerror+=1
# Print error
if verbose>1:
print('Error %2.2e above %2.2e thresold at line %i, col %i' %(diff,threshold,il+1,ic+1))
print('> %s' %(file1))
print(data1[il])
print('< %s' %(file2))
print(data2[il])
#save line for first error
if not lerror:
differ=diff
linerr=il+1
colerr=ic+1
linerr1=data1[il]
linerr2=data2[il]
lerror=True
if ncomp==0:
print('Warning :no line to compare')
nerror=-2
if lerror and verbose>0:
print('Compared values: %i, errors above threshold: %i ; %i %% ' %(ncomp,nerror,nerror*100./ncomp))
if verbose==1:
print('First error %2.2e above %2.2e thresold at line %i, col %i' %(differ,threshold,linerr,colerr))
print('> %s' %(file1))
print(linerr1)
print('< %s' %(file2))
print(linerr2)
return nerror
#----------------------------------------------------------------------------
# Local functions
def matchColPattern(line,colpattern):
if len(line)!=len(colpattern):
re | turn False
try:
for i in range(len(colpattern)):
if colpattern[i]: f=float(line[i])
except ValueError:
return False
return True
#-----------------------------------
#execute as a script
if __name__ == "__main__":
if len(sys.argv)==6:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5])
elif len(sys.argv)==7:
cmp_table | (sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5],sys.argv[6])
elif len(sys.argv)==8:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5],sys.argv[6],sys.argv[7])
else:
print('''USAGE : ./comp_table file1 file2 colpattern minval threshold [verbose maxcompline]
General purpose script to compare two files containing tables
Only lines with given table column pattern. Column to be compared must be numbers are marked with c
column to discard with x
colpattern c for compare or x for ignore, ex: xccx discard first and last column of a 4 column table
''')
|
end([d.name for d in get_child_nodes('Item Group', d.item_group)])
if args_list:
cond = "and i.item_group in (%s)" % (', '.join(['%s'] * len(args_list)))
return frappe.db.sql("""
select
i.name, i.item_code, i.item_name, i.description, i.item_group, i.has_batch_no,
i.has_serial_no, i.is_stock_item, i.brand, i.stock_uom, i.image,
id.expense_account, id.selling_cost_center, id.default_warehouse,
i.sales_uom, c.conversion_factor, it.item_tax_template, it.valid_from
from
`tabItem` i
left join `tabItem Default` id on id.parent = i.name and id.company = %s
left join `tabItem Tax` it on it.parent = i.name
left join `tabUOM Conversion Detail` c on i.name = c.parent and i.sales_uom = c.uom
where
i.disabled = 0 and i.has_variants = 0 and i.is_sales_item = 1
{cond}
group by i.item_code
""".format(cond=cond), tuple([company] + args_list), as_dict=1)
def get_item_groups(pos_profile):
item_group_dict = {}
item_groups = frappe.db.sql("""Select name,
lft, rgt from `tabItem Group` order by lft""", as_dict=1)
for data in item_groups:
item_group_dict[data.name] = [data.lft, data.rgt]
return item_group_dict
def get_customers_list(pos_profile={}):
cond = "1=1"
customer_groups = []
if pos_profile.get('customer_groups'):
# Get customers based on the customer groups defined in the POS profile
for d in pos_profile.get('customer_groups'):
customer_groups.extend([d.get('name') for d in get_child_nodes('Customer Group', d.get('customer_group'))])
cond = "customer_group in (%s)" % (', '.join(['%s'] * len(customer_groups)))
return frappe.db.sql(""" select name, customer_name, customer_group,
territory, customer_pos_id from tabCustomer where | disabled = 0
and {cond}""".format(cond=cond), tuple(customer_groups), as_dict=1) or {}
def get_customers_address(customers):
customer_address = {}
i | f isinstance(customers, string_types):
customers = [frappe._dict({'name': customers})]
for data in customers:
address = frappe.db.sql(""" select name, address_line1, address_line2, city, state,
email_id, phone, fax, pincode from `tabAddress` where is_primary_address =1 and name in
(select parent from `tabDynamic Link` where link_doctype = 'Customer' and link_name = %s
and parenttype = 'Address')""", data.name, as_dict=1)
address_data = {}
if address:
address_data = address[0]
address_data.update({'full_name': data.customer_name, 'customer_pos_id': data.customer_pos_id})
customer_address[data.name] = address_data
return customer_address
def get_contacts(customers):
customer_contact = {}
if isinstance(customers, string_types):
customers = [frappe._dict({'name': customers})]
for data in customers:
contact = frappe.db.sql(""" select email_id, phone, mobile_no from `tabContact`
where is_primary_contact=1 and name in
(select parent from `tabDynamic Link` where link_doctype = 'Customer' and link_name = %s
and parenttype = 'Contact')""", data.name, as_dict=1)
if contact:
customer_contact[data.name] = contact[0]
return customer_contact
def get_child_nodes(group_type, root):
lft, rgt = frappe.db.get_value(group_type, root, ["lft", "rgt"])
return frappe.db.sql(""" Select name, lft, rgt from `tab{tab}` where
lft >= {lft} and rgt <= {rgt} order by lft""".format(tab=group_type, lft=lft, rgt=rgt), as_dict=1)
def get_serial_no_data(pos_profile, company):
# get itemwise serial no data
# example {'Nokia Lumia 1020': {'SN0001': 'Pune'}}
# where Nokia Lumia 1020 is item code, SN0001 is serial no and Pune is warehouse
cond = "1=1"
if pos_profile.get('update_stock') and pos_profile.get('warehouse'):
cond = "warehouse = %(warehouse)s"
serial_nos = frappe.db.sql("""select name, warehouse, item_code
from `tabSerial No` where {0} and company = %(company)s """.format(cond),{
'company': company, 'warehouse': frappe.db.escape(pos_profile.get('warehouse'))
}, as_dict=1)
itemwise_serial_no = {}
for sn in serial_nos:
if sn.item_code not in itemwise_serial_no:
itemwise_serial_no.setdefault(sn.item_code, {})
itemwise_serial_no[sn.item_code][sn.name] = sn.warehouse
return itemwise_serial_no
def get_batch_no_data():
# get itemwise batch no data
# exmaple: {'LED-GRE': [Batch001, Batch002]}
# where LED-GRE is item code, SN0001 is serial no and Pune is warehouse
itemwise_batch = {}
batches = frappe.db.sql("""select name, item from `tabBatch`
where ifnull(expiry_date, '4000-10-10') >= curdate()""", as_dict=1)
for batch in batches:
if batch.item not in itemwise_batch:
itemwise_batch.setdefault(batch.item, [])
itemwise_batch[batch.item].append(batch.name)
return itemwise_batch
def get_barcode_data(items_list):
# get itemwise batch no data
# exmaple: {'LED-GRE': [Batch001, Batch002]}
# where LED-GRE is item code, SN0001 is serial no and Pune is warehouse
itemwise_barcode = {}
for item in items_list:
barcodes = frappe.db.sql("""
select barcode from `tabItem Barcode` where parent = %s
""", item.item_code, as_dict=1)
for barcode in barcodes:
if item.item_code not in itemwise_barcode:
itemwise_barcode.setdefault(item.item_code, [])
itemwise_barcode[item.item_code].append(barcode.get("barcode"))
return itemwise_barcode
def get_item_tax_data():
# get default tax of an item
# example: {'Consulting Services': {'Excise 12 - TS': '12.000'}}
itemwise_tax = {}
taxes = frappe.db.sql(""" select parent, tax_type, tax_rate from `tabItem Tax Template Detail`""", as_dict=1)
for tax in taxes:
if tax.parent not in itemwise_tax:
itemwise_tax.setdefault(tax.parent, {})
itemwise_tax[tax.parent][tax.tax_type] = tax.tax_rate
return itemwise_tax
def get_price_list_data(selling_price_list, conversion_rate):
itemwise_price_list = {}
price_lists = frappe.db.sql("""Select ifnull(price_list_rate, 0) as price_list_rate,
item_code from `tabItem Price` ip where price_list = %(price_list)s""",
{'price_list': selling_price_list}, as_dict=1)
for item in price_lists:
itemwise_price_list[item.item_code] = item.price_list_rate * conversion_rate
return itemwise_price_list
def get_customer_wise_price_list():
customer_wise_price = {}
customer_price_list_mapping = frappe._dict(frappe.get_all('Customer',fields = ['default_price_list', 'name'], as_list=1))
price_lists = frappe.db.sql(""" Select ifnull(price_list_rate, 0) as price_list_rate,
item_code, price_list from `tabItem Price` """, as_dict=1)
for item in price_lists:
if item.price_list and customer_price_list_mapping.get(item.price_list):
customer_wise_price.setdefault(customer_price_list_mapping.get(item.price_list),{}).setdefault(
item.item_code, item.price_list_rate
)
return customer_wise_price
def get_bin_data(pos_profile):
itemwise_bin_data = {}
filters = { 'actual_qty': ['>', 0] }
if pos_profile.get('warehouse'):
filters.update({ 'warehouse': pos_profile.get('warehouse') })
bin_data = frappe.db.get_all('Bin', fields = ['item_code', 'warehouse', 'actual_qty'], filters=filters)
for bins in bin_data:
if bins.item_code not in itemwise_bin_data:
itemwise_bin_data.setdefault(bins.item_code, {})
itemwise_bin_data[bins.item_code][bins.warehouse] = bins.actual_qty
return itemwise_bin_data
def get_pricing_rule_data(doc):
pricing_rules = ""
if doc.ignore_pricing_rule == 0:
pricing_rules = frappe.db.sql(""" Select * from `tabPricing Rule` where docstatus < 2
and ifnull(for_price_list, '') in (%(price_list)s, '') and selling = 1
and ifnull(company, '') in (%(company)s, '') and disable = 0 and %(date)s
between ifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31')
order by priority desc, name desc""",
{'company': doc.company, 'price_list': doc.selling_price_list, 'date': nowdate()}, as_dict=1)
return pricing_rules
@frappe.whitelist()
def make_invoice(pos_profile, doc_list={}, email_queue_list={}, customers_list={}):
import json
if isinstance(doc_list, string_types):
doc_list = json.loads(doc_list)
if isinstance(email_queue_list, string_types):
email_queue_list = json.loads(email_queue_list)
if isinstance(customers_list, string_types) |
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
| if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
| ):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line( |
e RDS resources take much longer than others to be ready. Check
# less aggressively for slow ones to avoid throttling.
if time.time() > start_time + 90:
check_interval = 20
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group', 'port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', |
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_ | instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = mo |
alchemy.orm.properties import \\
ColumnProperty,\\
| CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__claus | e_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parentmapper = parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parentmapper, adapt_to_entity)
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@util.memoized_property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
"""
strategy_wildcard_key = None
@util.memoized_property
def _wildcard_path(self):
if self.strategy_wildcard_key:
return ('loaderstrategy', (self.strategy_wildcard_key,))
else:
return None
def _get_context_strategy(self, context, path):
strategy_cls = path._inlined_get_for(self, context, 'loaderstrategy')
if not strategy_cls:
wc_key = self._wildcard_path
if wc_key and wc_key in context.attributes:
strategy_cls = context.attributes[wc_key]
if strategy_cls:
try:
return self._strategies[strategy_cls]
except KeyError:
return self.__init_strategy(strategy_cls)
return self.strategy
def _get_strategy(self, cls):
try:
return self._strategies[cls]
except KeyError:
return self.__init_strategy(cls)
def __init_strategy(self, cls):
self._strategies[cls] = strategy = cls(self)
return strategy
def setup(self, context, entity, path, adapter, **kwargs):
self._get_context_strategy(context, path).\
setup_query(context, entity, path,
adapter, **kwargs)
def create_row_processor(self, context, path, mapper, row, adapter):
return self._get_context_strategy(context, path).\
create_row_processor(context, path,
mapper, row, adapter)
def do_init(self):
self._strategies = {}
self.strategy = self.__init_strategy(self.strategy_class)
def post_instrument_class(self, mapper):
if self.is_primary() and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
Query object generated by scalar or object lazy loaders.
"""
def process_query(self, query):
pass
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
Used when secondary loaders resend existing options to a new
Query."""
self.process_query(query)
class PropertyOption(MapperOption):
"""A MapperOption that is applied to a property off the mapper or
one of its child mappers, identified by a dot-separated key
or list of class-bound attributes. """
def __init__(self, key, mapper=None):
self.key = key
self.mapper = mapper
def process_query(self, query):
self._process(query, True)
def process_qu |
#!/usr/bin/env python
import os
import shutil
import logging
from unicode_helper import p
__all__ = ["Renamer"]
def log():
"""Returns the logger for current file
"""
return logging.getLogger(__name__)
def same_partition(f1, f2):
"""Returns True if both files or directories are on the same partition
"""
return os.stat(f1).st_dev == os.stat(f2).st_dev
def delete_file(fpath):
"""On OS X: Trashes a path using the Finder, via OS X's Scripting Bridge.
On other platforms: unlinks file.
"""
try:
from AppKit import NSURL
from ScriptingBridge import SBApplication
except ImportError:
p("Deleting %s" % fpath)
log().debug("Deleting %r" % fpath)
os.unlink(fpath)
else:
p("Trashing %s" % fpath)
log().debug("Trashing %r" % fpath)
targetfile = NSURL.fileURLWithPath_(fpath)
finder = SBApplication.applicationWithBundleIdentifier_("com.apple.Finder")
items = finder.items().objectAtLocation_(targetfile)
items.delete()
def rename_file(old, new):
"""Rename 'old' file to 'new'. Both files must be on the same partition.
Preserves access and modification time.
"""
p("Renaming %s to %s" % (old, new))
log().debug("Renaming %r to %r" % (old, new))
stat = os.stat(old)
os.rename(old, new)
os.utime(new, (stat.st_atime, stat.st_mtime))
def copy_file(old, new):
"""Copy 'old' file to 'new'.
"""
p("Copying %s to %s" % (old, new))
log().debug("Copying %r to %r" % (old, new))
shutil.copyfile(old, new)
shutil.copystat(old, new)
def symlink_file(target, name):
"""Create symbolic link named 'name' pointing to 'target'.
"""
p("Creating symlink %s to %s" % (name, target))
log().debug("Creating symlink %r to %r" % (name, target))
os.symlink(target, name)
class Renamer(object):
"""Deals with renaming of files
"""
def __init__(self, filename):
self.filename = os.path.abspath(filename)
def rename(self, new_fullpath, force=False, always_copy=False, always_move=False, leave_symlink=False, create_dirs=True):
"""Moves the file to a new path.
If it is on the same partition, it will be moved (unless always_copy is True)
If it is on a different partition, it will be copied, and the original
only deleted if always_move is True.
If the target file already exists, it will raise OSError unless force is True.
If it was moved, a symlink will be left behind with the original name
pointing to the file's new destination if leave_symlink is True.
"""
new_dir = os.path.dirname(new_fullpath)
if create_dirs:
p("Creating directory %s" % new_dir)
try:
os.makedirs(new_dir)
except OSError, e:
if e.errno != 17:
raise
if os.path.exists(new_fullpath):
# If the destination exists, raise exception unless force is True
if not force:
raise OSError("File %s already exists, not forcefully moving %s" % (
new_fullpath, self.filename))
if same_partition(self.filename, new_dir):
if always_copy:
# Same partition, but forced to copy
copy_file(self.filename, new_fullpath)
else:
# Same partition, just rename the file to move it
rename_file(self.filename, new_fullpath)
# Leave a symlink behind if configured to do so
if leave_symlink:
symlink_file(new_fullpath, self.filename)
else:
# File is on different partition (different disc), copy it
copy_file(self.filename, new_fullpath)
if always_move:
# Forced to move file, we just trash old file
delete_file(self.filename)
# Leave a symlink behind if conf | igured to do so
if leave_symlink:
symlink_file(new_fullpath, self.filena | me)
self.filename = new_fullpath
|
ne)
self.redraw(None)
def showpopupmenu(self,widget,event):
print('button ',event.button)
if event.button == 3:
m = self.builder.get_object("menufunctions")
print(widget, event)
m.show_all()
m.popup(None,None,None,3,0)
def get_time_selection(self,widget,current=True):
print 'name',widget.get_parent().get_name()
sel_ind = []
sel_onset_ind = []
def selection_to_ind(sels,sele,inc):
print 'getting sel'
if sele == sels: #only one point selected
sele = sels+inc
nearest.nearest(self.t,arange(sels,sele,inc))
sel_ind = nearest.nearest(self.t,arange(sels,sele,inc))
return sel_ind
if widget.get_parent().get_name() == 'GtkMenu' and current == True: #call from editor menu
print 'call from right click menu'
try:
self.sel_ind = selection_to_ind(self.selections[-1][0],\
self.selections[-1][1],self.t[1]-self.t[0])
except AttributeError:
print 'no selections yet'
return -1
else: #call from selector
print 'call from selector window'
liststore,iter = self.SelView.get_selection().get_selected_rows()
for i in iter:
j = int(liststore[i][0])
sel_ind.extend(selection_to_ind(self.selections[j][0],\
self.selections[j][1],self.t[1]-self.t[0]))
sel_onset_ind.extend(selection_to_ind(self.selections[j][0],\
self.selections[j][0],self.t[1]-self.t[0]))
self.sel_ind = sel_ind
self.sel_onset_ind = sel_onset_ind
def plot_contour(self,widget):
if size(self.data,1) < 4:
self.builder.get_object("messagedialog1").format_secondary_text\
('Contour Plot Requires at least 4 Channels')
self.builder.get_object("messagedialog1").show()
return -1
print widget.get_parent().get_name()
if self.get_time_selection(widget) == -1: #no selections
self.builder.get_object("messagedialog1").format_secondary_text\
('No Selection Made Yet')
self.builder.get_object("messagedialog1").show()
return -1
try:
print 'state',self.mc.window.get_property('visible')
if self.mc.window.get_property('visible') == False:
#someone closed the window
self.mc.window.show()
print 'done replotting'
except AttributeError: #first call. setup
print 'first plot'
self.mc = contour_gtk.setup_gui()
self.mc.window.show()
self.mc.fig.clf()
self.mc.display(self.data[self.sel_ind,:],self.channels, subplot='on', labels=self.chanlabels)
def generate_testdata(self,widget):
self.quick_load_pdf_script()
#numpts = 100
#self.numchannels = 10
#self.t = arange(0,numpts, .01)
#self.data = zeros((len(self.t),self.numchannels))
#self.scalefact = 1e-9
#for i in arange(0,self.numchannels):
#r = random.randn()
#self.data[:,i] = float32((sin(2*0.32*pi*self.t*r) * \
#sin(2*2.44*pi*self.t*r)))#+ self.space
#self.data[:,0] = random.randn((len(self.t)))
#self.data = self.data * self.scalefact
#self.tstart = 0; self.tstop = len(self.t)
#self.time = copy(self.t[self.tstart:self.tstop])
#print self.tstart,self.tstop
#self.chanind = arange(0,self.numchannels)
#self.chanlabels = arange(0,self.numchannels)
self.data2plot = self.data
self.display_apply(None)
#self.space_data()
#self.redraw(None)
def quick_load_pdf_script(self):
from pdf2py import pdf
datapath = '/home/danc/programming/python/data/'
p = pdf.read(datapath+'test/e,rfhp1.0Hz,ra')
#p = pdf.read(datapath+'0611/0611piez/e,rfhp1.0Hz')
#p = pdf.read(datapath+'data/0611/drawing3/01%01%01@01:01/2/c,rfDC')
p.data.setchannels('meg')
#p.data.setchannellabels(['A1','A69','A130'])#meg')
#p.data.setchannellabels(['A178'])
p.data.getdata(0,p.data.pnts_in_file)
self.numchannels = size(p.data.data_block,1)
self.t = p.data.wintime #eventtime
self.data = p.data.data_block
self.tstart = 0; self.tstop = len(self.t)
self.time = copy(self.t[self.tstart:self.tstop])
self.chanind = arange(self.numchannels)
self.chanlabels = p.data.channels.labellist
self.scalefact = (p.data.data_block.min()+p.data.data_block.max())/2
self.channels = p.data.channels.chanlocs
self.srate = p.hdr.header_data.sample_period
self.data_loaded_setup()
self.curchannel = 0
def hideinsteadofdelete(self,widget, ev=None):
widget.hide()
return True
def load_data(self,widget):
from gui.gtk import filechooser
fn = filechooser.open()
try: #pdf load method
self.data_assist = meg_assistant.setup(path = fn[0], \
callback=self.load_data_callback)
except:
print 'something wrong with load'
return -1
def load_data_callback(self, widget):
print 'DONE!'
p = self.data_assist.pdfdata #4D MEG file format
input_dict = {'data_block':p.data.data_block,'srate':p.data.srate,'wintime':p.data.wintime,'labellist':p.data.channels.labellist,'chanlocs':p.data.channels.chanlocs}
self.data_handler(widget, input_dict)
|
def data_handler(self, widget, input_dict, callback=None):
'''
datahandler(data,srate,wintime,chanlabels,chanlocs)
-
data = 2D array
srate = type(float or int)
wintime = type(list or array) of same length as first dimension of da | ta
chanlabels = type(list of strings) of same length as
second dimension of data
chanlocs = shape is 2Xnumber of channels, ie, (2,248) and contains page
coordinates for each channel. Position of X and Y is between -.5
and .5
'''
####!!!!!!!
'''should rerwite the following as well as the filechooser method to make simple and compatible with dictionary based load and read'''
data = input_dict['data_block']
srate = input_dict['srate']
wintime = input_dict['wintime']
chanlabels = input_dict['labellist']
chanlocs = input_dict['chanlocs']
print type(data),srate,type(wintime),type(chanlabels),type(chanlocs)
print len(chanlabels),size(data,1),len(wintime),size(data,0),\
size(chanlocs,1)
if len(chanlabels) != size(data,1) or len(wintime) != size(data,0):
#or size(chanlocs,1) != size(data,1):
print 'error matching wintime or chlabels or chanlocs with data'
#self.builder.get_object("messagedialog1").format_secondary_text\
#('error matching wintime or chlabels or chanlocs with data')
#self.builder.get_object("messagedialog1").show()
#raise RuntimeError
self.data = data
self.srate = srate
self.chanlabels = chanlabels
self.t = array(wintime)
self.tstart = 0; self.tstop = len(self.t)
self.time = copy(self.t[self.tstart:self.tstop])
self.numchannels = size(data,1)
self.chanind = arange(self.numchannels)
print 'DEBUG',data
self.scalefact = (data.min()+data.max())/2
print 'scalefact', self.scalefact
self.channels = chanlocs
self.curchannel = 0
self.tstart = 0; self.tstop = len(self.t)
self.data_loaded_setup()
self.data2plot = self.data
self.display_apply(None)
try: callback(widget); self.callback = callback
except TypeError, NameError: print('no callback')
def offset_correct(self,widget):
print self.get_time_selection(widget)
if self.get_time_selection(widget) == -1: #no selections
###self.builder.get_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.