code
stringlengths 1
199k
|
|---|
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
from androlyze.error.WrapperException import WrapperException
def _create_delete_error_msg(content, destination):
return "Could not delete %s from %s" % (content, destination)
def _create_store_error_msg(content, destination):
return "Could not store result for %s to %s" % (content, destination)
def _create_load_error_msg(content, source):
return "Could not load %s from %s" % (content, source)
class StorageException(WrapperException):
''' Base exception for data storage '''
pass
DB_STORE = "database"
class DatabaseException(StorageException):
pass
class EDatabaseException(DatabaseException):
''' Extended DatabaseException that has the database as parameter as well as content '''
def __init__(self, db, content, caused_by = None, **kwargs):
'''
Parameters
----------
db : object
content : object
The object that couldn't be loaded/stored.
caused_by: Exception, optional (default is None)
the exception that caused this one to raise
'''
DatabaseException.__init__(self, caused_by = caused_by, **kwargs)
self.db = db
self.content = content
class DatabaseDeleteException(EDatabaseException):
def _msg(self):
return _create_delete_error_msg(self.content, self.db)
class DatabaseStoreException(EDatabaseException):
def _msg(self):
return _create_store_error_msg(self.content, self.db)
class DatabaseLoadException(EDatabaseException):
def _msg(self):
return _create_load_error_msg(self.content, self.db)
class DatabaseOpenError(DatabaseException):
def __init__(self, db_name, **kwargs):
super(DatabaseOpenError, self).__init__(**kwargs)
self.db_name = db_name
def _msg(self):
return 'Could not open database: "%s"' % self.db_name
DB_STORE = "database"
class S3StorageException(StorageException):
pass
class ES3StorageException(S3StorageException):
''' Extended DatabaseException that has the database as parameter as well as content '''
def __init__(self, db, content, caused_by = None, **kwargs):
'''
Parameters
----------
db : object
content : object
The object that couldn't be loaded/stored.
caused_by: Exception, optional (default is None)
the exception that caused this one to raise
'''
S3StorageException.__init__(self, caused_by = caused_by, **kwargs)
self.db = db
self.content = content
class S3StorageDeleteException(ES3StorageException):
def _msg(self):
return _create_delete_error_msg(self.content, self.db)
class S3StorageStoreException(ES3StorageException):
def _msg(self):
return _create_store_error_msg(self.content, self.db)
class S3StorageLoadException(ES3StorageException):
def _msg(self):
return _create_load_error_msg(self.content, self.db)
class S3StorageOpenError(ES3StorageException):
def __init__(self, db_name, **kwargs):
super(ES3StorageException, self).__init__(**kwargs)
self.db_name = db_name
def _msg(self):
return 'Could not open bucket: "%s"' % self.db_name
class FileSysException(StorageException):
def __init__(self, file_path, fs_storage, *args, **kwargs):
'''
Parameters
----------
file_path: str
the path of the file
fs_store : FileSysStorage
'''
super(FileSysException, self).__init__(*args, **kwargs)
self.file_path = file_path
self.fs_storage = fs_storage
class FileSysStoreException(FileSysException):
def __init__(self, file_path, content, fs_storage, caused_by = None):
'''
Parameters
----------
file_path: str
the path of the file
content: object
the content which should be stored
fs_store : FileSysStorage
caused_by: Exception, optional (default is None)
the exception that caused this one to raise
'''
super(FileSysStoreException, self).__init__(file_path, fs_storage, caused_by = caused_by)
self.content = content
def _msg(self):
return _create_store_error_msg(self.content, self.file_path)
class FileSysCreateStorageStructureException(FileSysException):
def __init__(self, file_path, fs_storage, caused_by = None):
'''
Parameters
----------
file_path: str
the path of the file
fs_store : FileSysStorage
caused_by: Exception, optional (default is None)
the exception that caused this one to raise
'''
super(FileSysCreateStorageStructureException, self).__init__(file_path, fs_storage, caused_by = caused_by)
def _msg(self):
return "Could not create the file system structure: %s" % self.file_path
class FileSysLoadException(FileSysException):
def _msg(self):
return _create_load_error_msg(self.file_path, self.fs_storage)
class FileSysDeleteException(FileSysException):
def _msg(self):
return _create_delete_error_msg(self.file_path, self.fs_storage)
|
import os, sys
import commands
import optparse
import shutil
INSTALL_DIR = ""
BASE_DIR = os.path.dirname(__file__)
SIP_FILE = "poppler-qt4.sip"
BUILD_DIR = "build"
SBF_FILE = "QtPoppler.sbf"
def _cleanup_path(path):
"""
Cleans the path:
- Removes traling / or \
"""
path = path.rstrip('/')
path = path.rstrip('\\')
return path
def pkgconfig(package):
'''
Calls pkg-config for the given package
Returns: - None if the package is not found.
- {'inc_dirs': [List of -L Paths]
'lib_dirs' : [List of -I Paths]
'libs ' : [List of -l libs]
}
'''
code, msg = commands.getstatusoutput("pkg-config --exists %s" % package)
if code != 0:
return None
tokens = commands.getoutput("pkg-config --libs --cflags %s" % package).split()
return {
'inc_dirs': [ token[2:] for token in tokens if token[:2] == '-I'],
'lib_dirs': [ token[2:] for token in tokens if token[:2] == '-L'],
'libs': [ token[2:] for token in tokens if token[:2] == '-l'],
}
def create_optparser(sipcfg):
'''Comandline parser'''
def store_abspath(option, opt_str, value, parser):
setattr(parser.values, option.dest, os.path.abspath(value))
def get_default_moddir():
default = sipcfg.default_mod_dir
default = os.path.join(default, INSTALL_DIR)
return default
p = optparse.OptionParser(usage="%prog [options]")
default_moddir = get_default_moddir()
p.add_option("-d", "--destdir", action="callback",
default=default_moddir, type="string",
metavar="DIR",
dest="moddir", callback=store_abspath,
help="Where to install PyPoppler-Qt4 python modules."
"[default: %default]")
p.add_option("-s", "--sipdir", action="callback",
default=os.path.join(sipcfg.default_sip_dir, INSTALL_DIR),
metavar="DIR", dest="sipdir", callback=store_abspath,
type="string", help="Where the .sip files will be installed "
"[default: %default]")
p.add_option("", "--popplerqt-includes-dir", action="callback",
default=None,
metavar="DIR", dest="popplerqt_inc_dirs", callback=store_abspath,
type="string", help="PopplerQt include paths"
"[default: Auto-detected with pkg-config]")
p.add_option("", "--popplerqt-libs-dir", action="callback",
default=None,
metavar="DIR", dest="popplerqt_lib_dirs", callback=store_abspath,
type="string", help="PopplerQt libraries paths"
"[default: Auto-detected with pkg-config]")
return p
def get_pyqt4_config():
try:
import PyQt4.pyqtconfig
return PyQt4.pyqtconfig.Configuration()
except ImportError, e:
print >> sys.stderr, "ERROR: PyQt4 not found."
sys.exit(1)
def get_sip_config():
try:
import sipconfig
return sipconfig.Configuration()
except ImportError, e:
print >> sys.stderr, "ERROR: SIP (sipconfig) not found."
sys.exit(1)
def get_popplerqt_config(opts):
config = pkgconfig('poppler-qt4')
if config is not None:
found_pkgconfig = True
else:
found_pkgconfig = False
config = {'libs': ['poppler-qt4', 'poppler'],
'inc_dirs': None,
'lib_dirs': None}
if opts.popplerqt_inc_dirs is not None:
config['inc_dirs'] = opts.popplerqt_inc_dirs.split(" ")
if opts.popplerqt_lib_dirs is not None:
config['lib_dirs'] = opts.popplerqt_lib_dirs.split(" ")
if config['lib_dirs'] is None or config['inc_dirs'] is None:
print >> sys.stderr, "ERROR: poppler-qt4 not found."
print "Try to define PKG_CONFIG_PATH "
print "or use --popplerqt-libs-dir and --popplerqt-includes-dir options"
sys.exit(1)
config['inc_dirs'] = map(_cleanup_path, config['inc_dirs'])
config['lib_dirs'] = map(_cleanup_path, config['lib_dirs'])
config['sip_dir'] = _cleanup_path(opts.sipdir)
config['mod_dir'] = _cleanup_path(opts.moddir)
print "Using PopplerQt include paths: %s" % config['inc_dirs']
print "Using PopplerQt libraries paths: %s" % config['lib_dirs']
print "Configured to install SIP in %s" % config['sip_dir']
print "Configured to install binaries in %s" % config['mod_dir']
return config
def create_build_dir():
dir = os.path.join(BASE_DIR, BUILD_DIR)
if os.path.exists(dir):
return
try:
os.mkdir(dir)
except:
print >> sys.stderr, "ERROR: Unable to create the build directory (%s)" % dir
sys.exit(1)
def run_sip(pyqtcfg):
create_build_dir()
cmd = [pyqtcfg.sip_bin,
"-c", os.path.join(BASE_DIR, BUILD_DIR),
"-b", os.path.join(BUILD_DIR, SBF_FILE),
"-I", pyqtcfg.pyqt_sip_dir,
pyqtcfg.pyqt_sip_flags,
os.path.join(BASE_DIR, SIP_FILE)]
os.system( " ".join(cmd) )
def generate_makefiles(pyqtcfg, popplerqtcfg, opts):
from PyQt4 import pyqtconfig
import sipconfig
pypopplerqt4config_file = os.path.join(BASE_DIR, "pypopplerqt4config.py")
# Creeates the Makefiles objects for the build directory
makefile_build = pyqtconfig.sipconfig.ModuleMakefile(
configuration=pyqtcfg,
build_file=SBF_FILE,
dir=BUILD_DIR,
install_dir=popplerqtcfg['mod_dir'],
warnings=1,
qt=['QtCore', 'QtGui', 'QtXml']
)
# Add extras dependencies for the compiler and the linker
# Libraries names don't include any platform specific prefixes
# or extensions (e.g. the "lib" prefix on UNIX, or the ".dll" extension on Windows)
makefile_build.extra_lib_dirs = popplerqtcfg['lib_dirs']
makefile_build.extra_libs = popplerqtcfg['libs']
makefile_build.extra_include_dirs = popplerqtcfg['inc_dirs']
# Generates build Makefile
makefile_build.generate()
# Generates root Makefile
installs_root = []
installs_root.append( (os.path.join(BASE_DIR, SIP_FILE), popplerqtcfg['sip_dir']) )
installs_root.append( (pypopplerqt4config_file, popplerqtcfg['mod_dir']) )
sipconfig.ParentMakefile(
configuration=pyqtcfg,
subdirs=[_cleanup_path(BUILD_DIR)],
installs=installs_root
).generate()
def generate_configuration_module(pyqtcfg, popplerqtcfg, opts):
import sipconfig
content = {
"pypopplerqt4_sip_dir": popplerqtcfg['sip_dir'],
"pypopplerqt4_sip_flags": pyqtcfg.pyqt_sip_flags,
"pypopplerqt4_mod_dir": popplerqtcfg['mod_dir'],
"pypopplerqt4_modules": 'PopplerQt',
"popplerqt4_inc_dirs": popplerqtcfg['inc_dirs'],
"popplerqt4_lib_dirs": popplerqtcfg['lib_dirs'],
}
# This creates the pypopplerqt4config.py module from the pypopplerqt4config.py.in
# template and the dictionary.
sipconfig.create_config_module(
os.path.join(BASE_DIR, "pypopplerqt4config.py"),
os.path.join(BASE_DIR, "pypopplerqt4config.py.in"),
content)
def main():
sipcfg = get_sip_config()
pyqtcfg = get_pyqt4_config()
parser = create_optparser(sipcfg)
opts, args = parser.parse_args()
popplerqtcfg = get_popplerqt_config(opts)
run_sip(pyqtcfg)
generate_makefiles(pyqtcfg, popplerqtcfg, opts)
generate_configuration_module(pyqtcfg, popplerqtcfg, opts)
if __name__ == "__main__":
main()
|
'''
Examples of advanced Python features:
- metaclass
- descriptor
- generator/forloop
'''
from __future__ import print_function
import sys
if sys.version_info > (3, ): # Python 3
exec('''
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
''')
exec_in('''
def with_meta(cls):
class Meta(metaclass=cls):
pass
return Meta
''', globals())
else:
exec('''
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '', 'exec', dont_inherit=True)
exec code in glob, loc
''')
exec_in('''
def with_meta(cls):
class Meta(object):
__metaclass__ = cls
pass
return Meta
''', globals())
class AnimalMeta(type):
species = 0
def __new__(cls, name, bases, attrs):
if not name == 'Meta':
cls.species += 1
print(
'First, metaclass.__new__ received (metaclass, name, bases, attrs)')
print(cls, name, bases, attrs)
return super(AnimalMeta, cls).__new__(cls, name, bases, attrs)
def __init__(self, name, bases, attrs):
if not name == 'Meta':
print(
'Second, metaclass.__init__ received (self, name, bases, attrs)')
print(self, name, bases, attrs)
def __call__(self, *args, **kwargs):
print("AnimalMeta.__call__")
return super(AnimalMeta, self).__call__(*args, **kwargs)
class Cat(with_meta(AnimalMeta)):
name = 'cat'
def __init__(self):
print('Meow')
kit = Cat()
|
"""
Prefill an Array
(6 kyu)
https://www.codewars.com/kata/54129112fb7c188740000162/train/python
Create the function prefill that returns an array of n elements that all have
the same value v. See if you can do this without using a loop.
You have to validate input:
- v can be anything (primitive or otherwise)
- if v is ommited, fill the array with undefined
- if n is 0, return an empty array
- if n is anything other than an integer or integer-formatted string
(e.g. '123') that is >=0, throw a TypeError
- When throwing a TypeError, the message should be n is invalid, where you
replace n for the actual value passed to the function.
Code Examples
prefill(3,1) --> [1,1,1]
prefill(2,"abc") --> ['abc','abc']
prefill("1", 1) --> [1]
prefill(3, prefill(2,'2d'))
--> [['2d','2d'],['2d','2d'],['2d','2d']]
prefill("xyz", 1)
--> throws TypeError with message "xyz is invalid"
"""
def prefill(n, v=None):
try:
n = int(n)
except (ValueError, TypeError):
raise TypeError("{} is invalid".format(n))
return [v] * n
|
from .mdl_user import *
from .mdl_club import *
from .mdl_event import *
from .mdl_receipt import *
from .mdl_budget import *
from .mdl_division import *
from .mdl_eventsignin import *
from .mdl_joinrequest import *
|
__author__ = 'deevarvar'
import string
import random
import os
def string_generator(size=6, chars=string.ascii_letters+string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def touchFile(fname, time=None):
with open(fname, 'a'):
os.utime(fname,time)
|
import typing
from flask import json
FlaskHeaders = typing.Union[typing.List[typing.Tuple[str, str]], typing.Dict[str, str]]
FlaskResponse = typing.Tuple[str, int, FlaskHeaders]
def success(data, status=200) -> FlaskResponse:
return json.dumps(data, indent=2), status, [("Content-Type", "application/json")]
def failure(message, status=400) -> FlaskResponse:
return json.dumps({"errors": ([message]\
if hasattr(message, 'strip') else message if hasattr(message, 'split')\
else repr(message))}, indent=2),\
status,\
[("Content-Type", "application/json")]
|
import sys
import random
import time
import pauschpharos as PF
import lumiversepython as L
SEQ_LIM = 200
def memoize(ignore = None):
if(ignore is None):
ignore = set()
def inner(func):
cache = dict()
def wrapper(*args):
memo = tuple(filter(lambda x: x,
map(lambda (i, e):
e if (i not in ignore)
else None,
enumerate(args))))
if(memo not in cache):
cache[memo] = func(*args)
return cache[memo]
return wrapper
return inner
def blank():
p = PF.PauschPharos()
p.SetBlank()
p.Trigger(PF.DEFAULT_ID, None)
def fireplace(rig):
# Warm up cache
for seq in xrange(SEQ_LIM):
query(rig, '$sequence=%d' % seq)
def init(upload = True, run = True, wipe = True, fire = True):
rig = L.Rig("/home/teacher/Lumiverse/PBridge.rig.json")
rig.init()
# Upload the blank template
if(upload):
blank()
# Run if requested
if(run):
rig.run()
# Wipe if requested
if(wipe):
for seq in xrange(SEQ_LIM):
query(rig, '$sequence=%d' % seq).setRGBRaw(0, 0, 0)
# Heat up the cache
if(fire and not wipe):
fireplace(rig)
return rig
@memoize(ignore = set([0]))
def query(rig, text):
return rig.select(text)
def seq(rig, num):
return query(rig, '$sequence=%d' % num)
def rand_color():
func = lambda: random.randint(0, 255) / 255.0
return (func(), func(), func())
|
from .circleclient import __version__
|
"""Test the wallet keypool and interaction with wallet encryption/locking."""
from test_framework.test_framework import DoriancoinTestFramework
from test_framework.util import *
class KeyPoolTest(DoriancoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
nodes = self.nodes
addr_before_encrypting = nodes[0].getnewaddress()
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
wallet_info_old = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
# Encrypt wallet and wait to terminate
nodes[0].node_encrypt_wallet('test')
# Restart node 0
self.start_node(0)
# Keep creating keys
addr = nodes[0].getnewaddress()
addr_data = nodes[0].validateaddress(addr)
wallet_info = nodes[0].getwalletinfo()
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min)
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(6)
nodes[0].walletlock()
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 6)
assert_equal(wi['keypoolsize'], 6)
# drain the internal keys
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
nodes[0].getrawchangeaddress()
addr = set()
# the next one should fail
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress)
# drain the external keys
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
addr.add(nodes[0].getnewaddress())
assert(len(addr) == 6)
# the next one should fail
assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 1)
nodes[0].keypoolrefill(3)
# test walletpassphrase timeout
time.sleep(1.1)
assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0)
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1)
nodes[0].walletpassphrase('test', 100)
nodes[0].keypoolrefill(100)
wi = nodes[0].getwalletinfo()
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
if __name__ == '__main__':
KeyPoolTest().main()
|
"""Unit tests for SCardConnect/SCardStatus/SCardDisconnect
This test case can be executed individually, or with all other test cases
thru testsuite_scard.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
from smartcard.scard import *
import sys
sys.path += ['..']
try:
from local_config import expectedATRs, expectedReaders
from local_config import expectedReaderGroups, expectedATRinReader
except:
print 'execute test suite first to generate the local_config.py file'
sys.exit()
class testcase_getATR(unittest.TestCase):
"""Test scard API for ATR retrieval"""
def setUp(self):
hresult, self.hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
self.assertEquals(hresult, 0)
hresult, self.readers = SCardListReaders(self.hcontext, [])
self.assertEquals(hresult, 0)
def tearDown(self):
hresult = SCardReleaseContext(self.hcontext)
self.assertEquals(hresult, 0)
def _getATR(self, r):
if r < len(expectedATRs) and [] != expectedATRs[r]:
hresult, hcard, dwActiveProtocol = SCardConnect(
self.hcontext,
self.readers[r],
SCARD_SHARE_SHARED,
SCARD_PROTOCOL_T0 | SCARD_PROTOCOL_T1)
self.assertEquals(hresult, 0)
try:
hresult, reader, state, protocol, atr = SCardStatus(hcard)
self.assertEquals(hresult, 0)
self.assertEquals(reader, expectedReaders[r])
self.assertEquals(atr, expectedATRs[r])
finally:
hresult = SCardDisconnect(hcard, SCARD_UNPOWER_CARD)
self.assertEquals(hresult, 0)
def test_getATR0(self):
testcase_getATR._getATR(self, 0)
def test_getATR1(self):
testcase_getATR._getATR(self, 1)
def test_getATR2(self):
testcase_getATR._getATR(self, 2)
def test_getATR3(self):
testcase_getATR._getATR(self, 3)
def suite():
suite1 = unittest.makeSuite(testcase_getATR)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
|
from argparse import ArgumentParser
from collections import defaultdict
import sys
import os
from sonLib.bioio import cigarRead, cigarWrite, getTempFile, system
def getSequenceRanges(fa):
"""Get dict of (untrimmed header) -> [(start, non-inclusive end)] mappings
from a trimmed fasta."""
ret = defaultdict(list)
curSeq = ""
curHeader = None
curTrimmedStart = None
for line in fa:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if curHeader is not None:
# Add previous seq info to dict
trimmedRange = (curTrimmedStart,
curTrimmedStart + len(curSeq))
untrimmedHeader = "|".join(curHeader.split("|")[:-1])
ret[untrimmedHeader].append(trimmedRange)
curHeader = line[1:].split()[0]
curTrimmedStart = int(curHeader.split('|')[-1])
curSeq = ""
else:
curSeq += line
if curHeader is not None:
# Add final seq info to dict
trimmedRange = (curTrimmedStart,
curTrimmedStart + len(curSeq))
untrimmedHeader = "|".join(curHeader.split("|")[:-1])
ret[untrimmedHeader].append(trimmedRange)
for key in ret.keys():
# Sort by range's start pos
ret[key] = sorted(ret[key], key=lambda x: x[0])
return ret
def validateRanges(seqRanges):
"""Fail if the given range dict contains overlapping ranges or if the
ranges aren't sorted.
"""
for seq, ranges in seqRanges.items():
for i, range in enumerate(ranges):
start = range[0]
if i - 1 >= 0:
range2 = ranges[i - 1]
assert start >= range2[1]
if i + 1 < len(ranges):
range2 = ranges[i + 1]
assert start < range2[0]
def sortCigarByContigAndPos(cigarPath, contigNum):
contigNameKey = 2 if contigNum == 1 else 6
startPosKey = 3 if contigNum == 1 else 7
tempFile = getTempFile()
system("sort -k %d,%d -k %d,%dn %s > %s" % (contigNameKey, contigNameKey, startPosKey, startPosKey, cigarPath, tempFile))
return tempFile
def upconvertCoords(cigarPath, fastaPath, contigNum, outputFile):
"""Convert the coordinates of the given alignment, so that the
alignment refers to a set of trimmed sequences originating from a
contig rather than to the contig itself."""
with open(fastaPath) as f:
seqRanges = getSequenceRanges(f)
validateRanges(seqRanges)
sortedCigarPath = sortCigarByContigAndPos(cigarPath, contigNum)
sortedCigarFile = open(sortedCigarPath)
currentContig = None
currentRangeIdx = None
currentRange = None
for alignment in cigarRead(sortedCigarFile):
# contig1 and contig2 are reversed in python api!!
contig = alignment.contig2 if contigNum == 1 else alignment.contig1
minPos = min(alignment.start2, alignment.end2) if contigNum == 1 else min(alignment.start1, alignment.end1)
maxPos = max(alignment.start2, alignment.end2) if contigNum == 1 else max(alignment.start1, alignment.end1)
if contig in seqRanges:
if contig != currentContig:
currentContig = contig
currentRangeIdx = 0
currentRange = seqRanges[contig][0]
while (minPos >= currentRange[1] or minPos < currentRange[0]) and currentRangeIdx < len(seqRanges[contig]) - 1:
currentRangeIdx += 1
currentRange = seqRanges[contig][currentRangeIdx]
if currentRange[0] <= minPos < currentRange[1]:
if maxPos - 1 > currentRange[1]:
raise RuntimeError("alignment on %s:%d-%d crosses "
"trimmed sequence boundary" %\
(contig,
minPos,
maxPos))
if contigNum == 1:
alignment.start2 -= currentRange[0]
alignment.end2 -= currentRange[0]
alignment.contig2 = contig + ("|%d" % currentRange[0])
else:
alignment.start1 -= currentRange[0]
alignment.end1 -= currentRange[0]
alignment.contig1 = contig + ("|%d" % currentRange[0])
else:
raise RuntimeError("No trimmed sequence containing alignment "
"on %s:%d-%d" % (contig,
minPos,
maxPos))
cigarWrite(outputFile, alignment, False)
os.remove(sortedCigarPath)
|
import pickle
from pueue.client.socket import connect_socket, receive_data, process_response
def command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
"""Communicate with the daemon.
This function sends a payload to the daemon and returns the unpickled
object sent by the daemon.
Args:
body (dir): Any other arguments that should be put into the payload.
root_dir (str): The root directory in which we expect the daemon.
We need this to connect to the daemons socket.
Returns:
function: The returned payload.
"""
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message, unpickle and return it
response = receive_data(client)
return response
return communicate
def print_command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and prints the response of the daemon. If the daemon sends a
`response['status'] == 'error'`, the pueue client will exit with `1`.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message and print it. Exit with 1, if an error has been sent.
response = receive_data(client)
process_response(response)
return communicate
|
__author__ = 'zhonghong'
|
"""
Title : Java program file
Author : JG
Date : dec 2016
Objet : script to create Propertie File Program
in : get infos from yml
out : print infos in properties file
"""
import sys,os
import yaml
import util as u
from random import randint
def create_properties_file(yml,armaDir):
progDir = u.define_prop_path(armaDir)
filename = progDir+""+u.get_program_name(yml)+".properties"
out = open(filename, 'w')
out.write("#Armadillo Workflow Platform 1.1 (c) Etienne Lord, Mickael Leclercq, Alix Boc, Abdoulaye Banire Diallo, Vladimir Makarenkov"+
"\n#"+yml['author']+
"\n#"+yml['date']+
"\n#Pgrogram info"+
"\nName= "+yml['Program']['name']+
"\nClassName= programs."+u.get_program_name(yml)+""+
"\nEditorClassName= editors."+u.get_program_name(yml)+"Editors"+
"\ndebug= false"+
"\nfilename= C\:\\armadillo2\\data\\properties\\"+u.get_program_name(yml)+".properties")
for paths in yml['Program']['executablePaths']:
out.write("\n"+paths+"="+yml['Program']['executablePaths'][paths])
out.write("\nHelpSupplementary=")
if yml['Program']['helpSupplementary']:
out.write(yml['Program']['helpSupplementary'])
out.write("\nPublication= ")
if yml['Program']['publication']:
out.write(yml['Program']['publication'])
out.write("\nDescription= ")
if yml['Program']['desc']:
out.write(yml['Program']['desc'])
ObjectID = randint(1000000000,9999999999)
out.write("\nObjectID="+u.get_program_name(yml)+"_"+str(ObjectID)+""+
"\nObjectType=Program"+
"\nNoThread=false")
out.write("\nType=")
if yml['Program']['menu']:
out.write(yml['Program']['menu'])
out.write("\nNormalExitValue=")
if yml['Program']['exitValue'] or yml['Program']['exitValue'] == 0:
out.write(str(yml['Program']['exitValue']))
out.write("\nVerifyExitValue=")
if yml['Program']['exitValue']:
out.write('true')
else:
out.write('false')
out.write("\nWebServices=")
if yml['Program']['webServices']:
out.write(yml['Program']['webServices'])
out.write("\nWebsite=")
if yml['Program']['website']:
out.write(yml['Program']['website'])
# Color options
color = u.get_color(yml)
out.write("\ncolorMode = "+color+""+
"\ndefaultColor = "+color+"")
# Inputs types
out.write("\n#INPUTS TYPES")
if len(yml['Inputs']) > 0:
o = ""
s = ""
for op in yml['Inputs']:
if op['type']:
out.write("\nInput"+op['type']+"=Connector"+str(op['connector']))
if op['OneConnectorOnlyFor']:
if o == "":
o = str(op['OneConnectorOnlyFor'])
else:
t = str(op['OneConnectorOnlyFor'])
if t not in o:
o = o+","+t
if op['SolelyConnectors']:
if s == "":
s = str(op['SolelyConnectors'])
else:
t = str(op['SolelyConnectors'])
if t not in o:
s = s+","+t
# Inputs options
if o != "" or s != "":
out.write("\n#INPUTS OPTIONS")
if o != "":
out.write("\nOneConnectorOnlyFor="+o)
if s != "":
out.write("\nSolelyConnectors= "+s)
else:
out.write("\nNO IMPUTS ??\n")
# Inputs Names
out.write("\n#INPUTS Connector text")
tab = ('2','3','4')
for t in tab:
c = ""
if len(yml['Inputs']) > 0:
for op in yml['Inputs']:
o = str(op['connector'])
if t in o or "true" in o:
if c == "":
c = str(op['connectorText'])
else:
s = str(op['connectorText'])
if s not in c:
c = c+", "+s
if c != "":
out.write("\nConnector"+t+"= "+c)
# Number of inputs
out.write("\nnbInput= ")
if yml['Program']['numImputs']:
out.write(str(yml['Program']['numImputs']))
# Outputs values
out.write("\n#OUTPUTS OPTIONS"+
"\nConnector0Output=True"+
"\nOutputResults=Connector0"+
"\nOutputOutputText=Connector0")
if len(yml['Outputs']) > 0:
for op in yml['Outputs']:
if op['type']:
out.write("\nOutput"+op['type']+"=Connector0")
# Default Values
out.write("\n#DEFAULT VALUES"+
"\ndefaultPgrmValues=")
for Panel in yml['Menus']:
pNameS = u.name_without_space(Panel['name'])
if 'Panel' not in Panel:
# Means default option
out.write(""+pNameS+"<>true<>")
else:
for Tab in Panel['Panel']:
if 'Arguments' in Tab:
tName = Tab['tab']
for Arguments in Tab['Arguments']:
cName = Arguments['name']
if 'values' in Arguments and \
Arguments['values'] is not None and \
Arguments['values']['vType'] is not None:
vType = Arguments['values']['vType']
v = u.create_value_name(pNameS,tName,cName,vType)
vDef = str(Arguments['values']['vDefault'])
out.write(v+"<>"+vDef+"<>")
out.write("\n#Cluster")
if 'Cluster' in yml and yml['Cluster'] is not None:
if 'ClusterProgramName' in yml['Cluster']:
out.write("\nClusterProgramName="+yml['Cluster']['ClusterProgramName'])
if 'ExecutableCluster' in yml['Cluster']:
out.write("\nExecutableCluster="+yml['Cluster']['ExecutableCluster'])
if 'version' in yml['Program']:
out.write("\nVersion= "+u.get_program_version(yml)+"")
out.write("\n#Docker")
if 'Docker' in yml and yml['Docker'] is not None:
if 'DockerImage' in yml['Docker']:
out.write("\nDockerImage="+yml['Docker']['DockerImage'])
if 'ExecutableDocker' in yml['Docker']:
out.write("\nExecutableDocker="+yml['Docker']['ExecutableDocker'])
if 'DockerInputs' in yml['Docker']:
out.write("\nDockerInputs="+yml['Docker']['DockerInputs'])
if 'DockerOutputs' in yml['Docker']:
out.write("\nDockerOutputs="+yml['Docker']['DockerOutputs'])
|
import os
os.environ['KIVY_GL_BACKEND'] = 'gl' #need this to fix a kivy segfault that occurs with python3 for some reason
from kivy.app import App
class TestApp(App):
pass
if __name__ == '__main__':
TestApp().run()
|
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import json
import falcon
import urllib
import uuid
import settings
import requests
from geopy.geocoders import Nominatim
import geopy.distance
from geopy.distance import vincenty
import datetime
radius = []
radius_maps = []
geoJSON_template = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
]
}
}
]
}
class interest(object):
global radius
interested = {}
#radius = []i
def proximity_to_others(self, my_coordinates):
if radius:
for x in radius:
radius_center = (x['center'][0],x['center'][1])
my_coordinates = (my_coordinates[0], my_coordinates[1])
distance = vincenty(radius_center, my_coordinates).kilometers
print("Proximity distance")
print(distance)
return distance, x["center"]
else:
return 0, []
def geojson_io_prox(self, resp, my_coordinates, user_name):
global radius
distance = 0
radius = []
try:
distance,radius = self.proximity_to_others(my_coordinates)
except Exception as e:
print(e)
if not distance or distance < 1:
points = []
start = geopy.Point(my_coordinates[0], my_coordinates[1])
d = geopy.distance.VincentyDistance(kilometers = 1)
for x in range(0,360, 10):
points.append(d.destination(point=start, bearing=x))
print("\n\n POINTS")
print("\n\n")
radius_dict = {
'center': my_coordinates,
'radius': points,
'people': [user_name,],
'created_date': datetime.datetime.utcnow().strftime("%a %b %d %H:%M:%S %Z %Y")
}
radius.append(radius_dict)
print("\n\n RADIUS: ")
print(radius)
print("\n\n")
else:
for x in radius:
if x["center"] == radius:
x['people'].append(
{'name': user_name,
'coordinates':
my_coordinates}
)
def proximity(self,req, resp, my_coordinates, user_name):
# Works out user/client proximity to mytransport API stops
# Works on a radius of 1km. Assumption on average walk time
global radius_maps
google_map_url = "http://www.google.com/maps/place/"
query_params = {"point":"{},{}".format(my_coordinates[0], my_coordinates[1]),
"radius":"1000"}
endpoint ="api/stops"
headers = {"Authorization": "Bearer {}".format(settings.ACCESS_TOKEN)}
request = requests.get("{}/{}".format(settings.API_URL,endpoint),
params=query_params,
headers=headers)
print("Response from api/stops")
print(request.status_code)
response_data = request.json()
print(type(response_data))
if not response_data:
resp.status = falcon.HTTP_200
your_radius_map = ""
for x in radius_maps:
if x["center"] == my_coordinates:
your_radius_map = x["geoJSON_url"]
messge_dict = {'message' :
"No stops in your area, adding you to interest area", "maps": your_radius_map}
resp.body = json.dumps(messge_dict)
return False
else:
map_list = []
message_dict = {"message":"", "maps":[]}
for x in response_data:
print(x)
if 'geometry' in x:
coordinates = x["geometry"]["coordinates"]
map_list.append("{}{},{}".format(google_map_url,
coordinates[1],
coordinates[0]))
message_dict["maps"] = map_list
if message_dict:
message_dict["message"] = """You have existing stops within 1km
of your location"""
else:
message_dict["messsage"] = """You\shave no existing stops nearby,
we will combine your interest in a stop with others in the area"""
resp.body = json.dumps(message_dict)
resp.status = falcon.HTTP_200
return True
#return True
def geopy_coordinates(self, address,resp):
try:
geolocator = Nominatim()
location = geolocator.geocode(address)
if location.latitude and location.longitude:
return [location.latitude, location.longitude]
except Exception as e:
print(e)
resp.body = """{'message':'Bad address,
try being more specific and try agai'}"""
resp.status = falcon.HTTP_400
def on_get(self, req, resp):
resp_dict = {"message":"Post request needed with GeoLocation data"}
resp.body = json.dumps(resp_dict)
resp.status = falcon.HTTP_200
def on_post(self, req, resp):
# Main API method, post the following
'''
POST Request
data type: JSON
Required: name, address or coordinates
data format : {
"name" : "Yourname",
"address" : "Your number and street address, province, etc"
"geometry" : { "coordinates" : ["x", "y"] }
'''
global radius_maps
global radius
print(req.headers)
user_name = ""
post_data = json.load(req.stream)
print(post_data)
if "name" in post_data:
user_name = post_data["name"]
print("Username IF statement")
print(user_name)
if "geometry" in post_data:
if not self.proximity(req,resp, post_data["geometry"]["coordinates"],user_name):
self.geojson_io_prox(resp, post_data["geometry"]["coordinates"],user_name)
elif post_data["address"]:
if "address" in post_data:
my_coordinates = self.geopy_coordinates(post_data["address"],resp)
print("BASED ON ADDRESS")
proximity = self.proximity(req, resp, my_coordinates, user_name)
print("PROXIMITY")
print(proximity)
if proximity == False:
print("NO routes")
self.geojson_io_prox(resp,my_coordinates, user_name)
else:
falcon.HTTPMissingParam
resp_dict = { 'message' :
'Please supply a address or coordinates (long,lat)'}
# json.dumps allows proper formating of message
resp.body = json.dumps(resp_dict)
print("Current Radius")
print(radius)
radius_list = []
radius_maps = []
for x in radius:
for y in x['radius']:
radius_list.append([y[1],y[0]])
radius_list.append([x['radius'][0][1],x['radius'][0][0]])
geoJSON_template['features'][0]['geometry']['coordinates'].append(radius_list)
radius_maps.append( {
'center': x['center'],
'geoJSON': geoJSON_template,
'geoJSON_url' : "http://geojson.io/#map=5/{}/{}&data=data:application/json,{}".format(
x['center'][1], x['center'][0], urllib.quote(json.dumps(geoJSON_template).encode()) )
}
)
#resp.body
print(radius_maps)
|
def main(**kwargs):
print('foo foo')
|
"""Test mempool persistence.
By default, bitcoind will dump mempool on shutdown and
then reload it on startup. This can be overridden with
the -persistmempool=0 command line option.
Test is as follows:
- start node0, node1 and node2. node1 has -persistmempool=0
- create 5 transactions on node2 to its own address. Note that these
are not sent to node0 or node1 addresses because we don't want
them to be saved in the wallet.
- check that node0 and node1 have 5 transactions in their mempools
- shutdown all nodes.
- startup node0. Verify that it still has 5 transactions
in its mempool. Shutdown node0. This tests that by default the
mempool is persistent.
- startup node1. Verify that its mempool is empty. Shutdown node1.
This tests that with -persistmempool=0, the mempool is not
dumped to disk when the node is shut down.
- Restart node0 with -persistmempool=0. Verify that its mempool is
empty. Shutdown node0. This tests that with -persistmempool=0,
the mempool is not loaded from disk on start up.
- Restart node0 with -persistmempool. Verify that it has 5
transactions in its mempool. This tests that -persistmempool=0
does not overwrite a previously valid mempool stored on disk.
- Remove node0 mempool.dat and verify savemempool RPC recreates it
and verify that node1 can load it and has 5 transactions in its
mempool.
- Verify that savemempool throws when the RPC is called if
node1 can't write to disk.
"""
from decimal import Decimal
import os
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
wait_until,
)
class MempoolPersistTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [[], ["-persistmempool=0"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.debug("Send 5 transactions from node2 (to its own address)")
tx_creation_time_lower = int(time.time())
for i in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
tx_creation_time_higher = int(time.time())
self.log.debug("Verify that node0 and node1 have 5 transactions in their mempools")
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prioritize a transaction on node0")
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'], fees['modified'])
self.nodes[0].prioritisetransaction(txid=last_txid, fee_delta=1000)
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)['time']
assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower)
assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time)
self.log.debug("Stop-start the nodes. Verify that node0 has the transactions in its mempool and node1 does not. Verify that node2 calculates its balance correctly after loading wallet transactions.")
self.stop_nodes()
# Give this node a head-start, so we can be "extra-sure" that it didn't load anything later
# Also don't store the mempool, to keep the datadir clean
self.start_node(1, extra_args=["-persistmempool=0"])
self.start_node(0)
self.start_node(2)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"], timeout=1)
wait_until(lambda: self.nodes[2].getmempoolinfo()["loaded"], timeout=1)
assert_equal(len(self.nodes[0].getrawmempool()), 5)
assert_equal(len(self.nodes[2].getrawmempool()), 5)
# The others have loaded their mempool. If node_1 loaded anything, we'd probably notice by now:
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.log.debug('Verify prioritization is loaded correctly')
fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees']
assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified'])
self.log.debug('Verify time is loaded correctly')
assert_equal(tx_creation_time, self.nodes[0].getmempoolentry(txid=last_txid)['time'])
# Verify accounting of mempool transactions after restart is correct
self.nodes[2].syncwithvalidationinterfacequeue() # Flush mempool to wallet
assert_equal(node2_balance, self.nodes[2].getbalance())
self.log.debug("Stop-start node0 with -persistmempool=0. Verify that it doesn't load its mempool.dat file.")
self.stop_nodes()
self.start_node(0, extra_args=["-persistmempool=0"])
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.log.debug("Stop-start node0. Verify that it has the transactions in its mempool.")
self.stop_nodes()
self.start_node(0)
wait_until(lambda: self.nodes[0].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[0].getrawmempool()), 5)
mempooldat0 = os.path.join(self.nodes[0].datadir, 'regtest', 'mempool.dat')
mempooldat1 = os.path.join(self.nodes[1].datadir, 'regtest', 'mempool.dat')
self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it")
os.remove(mempooldat0)
self.nodes[0].savemempool()
assert os.path.isfile(mempooldat0)
self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 5 transactions")
os.rename(mempooldat0, mempooldat1)
self.stop_nodes()
self.start_node(1, extra_args=[])
wait_until(lambda: self.nodes[1].getmempoolinfo()["loaded"])
assert_equal(len(self.nodes[1].getrawmempool()), 5)
self.log.debug("Prevent bitcoind from writing mempool.dat to disk. Verify that `savemempool` fails")
# to test the exception we are creating a tmp folder called mempool.dat.new
# which is an implementation detail that could change and break this test
mempooldotnew1 = mempooldat1 + '.new'
os.mkdir(mempooldotnew1)
assert_raises_rpc_error(-1, "Unable to dump mempool to disk", self.nodes[1].savemempool)
os.rmdir(mempooldotnew1)
if __name__ == '__main__':
MempoolPersistTest().main()
|
import argparse
from collections import defaultdict, Counter, deque
import random
import json
import time
from tqdm import tqdm
import wikipedia
class MarkovModel(object):
def __init__(self):
self.states = defaultdict(lambda: Counter())
self.totals = Counter()
def add_sample(self, state, followup):
self.states[state][followup] += 1
self.totals[state] += 1
def generate(self):
result = []
for followup in self.iter_chain():
result.append(followup)
return result
def iter_chain(self, state=tuple()):
while state in self.states:
followup = self.next(state)
state = state[1:] + followup
for token in followup:
yield token
def next(self, state):
r = random.randint(0, self.totals[state] - 1)
for followup, weight in self.states[state].items():
r -= weight
if r < 0:
return followup
raise ValueError("Mismatch of totals / weights for state {}".format(state))
def to_json(self):
converted = {' '.join(state): list(followups.keys()) for state, followups in self.states.items()}
return json.dumps(converted)
def iter_states(tokens, state_size, start_state=tuple(), end_marker=None):
# First transition is from empty state to first token-based state
yield start_state, tuple(tokens[0:state_size])
state = tuple(tokens[0:state_size])
for token in tokens[state_size:]:
# Each additional token means last state to that token
yield state, (token,)
# New state is last {state_size} tokens we yielded
state = state[1:] + (token,)
# End is marked by None
yield state, end_marker
def tokenize_story(story):
story = deque(story)
yield "\n"
while len(story) > 0:
token = eat_one_token(story)
if token is not None:
yield token
def eat_one_token(story):
while len(story) > 0 and isinvalid(story[0]):
story.popleft()
if len(story) == 0:
return None
if isalnum(story[0]):
return eat_word(story)
if ispunctuation(story[0]):
return eat_punctuation(story)
if isnewline(story[0]):
return eat_newline(story)
def isinvalid(char):
return not isalnum(char) and not ispunctuation(char) and not isnewline(char)
def isalnum(char):
return char.isalnum() or char == "'" or char == "’"
def ispunctuation(char):
return char in ",.-!?:&"
def isnewline(char):
return char == '\n'
def eat_word(story):
word = [story.popleft()]
while len(story) > 0 and isalnum(story[0]):
word.append(story.popleft())
return ''.join(word)
def eat_punctuation(story):
token = [story.popleft()]
while len(story) > 0 and ispunctuation(story[0]):
token.append(story.popleft())
return ''.join(token)
def eat_newline(story):
while len(story) > 0 and story[0].isspace():
story.popleft()
return '\n'
def load_story(filenames):
stories = []
for filename in filenames:
with open(filename) as fp:
story = fp.read()
if filename.endswith('.ftxt'):
story = remove_single_newlines(story)
stories.append(story)
return '\n'.join(stories)
def remove_single_newlines(story):
paragraphs = [[]]
for line in story.splitlines():
if len(line.strip()) == 0:
paragraphs.append([])
else:
paragraphs[-1].append(line)
return '\n'.join(' '.join(x for x in p) for p in paragraphs)
def load_wikipedia(num_articles):
lines = []
while num_articles > 0:
chunk = min(10, num_articles)
num_articles -= 10
for article in wikipedia.random(chunk):
try:
page = wikipedia.page(article)
except wikipedia.DisambiguationError as ex:
page = wikipedia.page(ex.args[1][0])
print(article)
lines.extend(x for x in page.content.splitlines() if not x.startswith('==') and len(x) > 0)
return '\n'.join(lines)
def main(args):
model = MarkovModel()
if args.mode == 'txt':
story = load_story(args.txt)
elif args.mode == 'wikipedia':
story = load_wikipedia(100)
else:
raise ValueError("invalid mode {}".format(args.mode))
tokens = list(tqdm(tokenize_story(story), desc="tokenizing"))
for state, followup in tqdm(iter_states(tokens, 3, start_state=tuple('\n'), end_marker=()), desc="building model"):
model.add_sample(state, followup)
print("Saving Model...")
with open("model.json", "w") as fp:
fp.write(model.to_json())
print("Generating Story:")
for token in model.iter_chain(tuple('\n')):
if not ispunctuation(token):
print(" ", end="")
print(token, end="", flush=True)
time.sleep(0.05)
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('mode', choices=['txt', 'wikipedia'])
ap.add_argument('--txt', action='append')
return ap.parse_args()
if __name__ == '__main__':
main(parse_args())
|
from sanic.exceptions import *
class GatcoException(SanicException):
pass
|
SCORES = {'A': 100, 'B': 14, 'C': 9, 'D': 28, 'E': 145, 'F': 12, 'G': 3,
'H': 10, 'I': 200, 'J': 100, 'K': 114, 'L': 100, 'M': 25,
'N': 450, 'O': 80, 'P': 2, 'Q': 12, 'R': 400, 'S': 113, 'T': 405,
'U': 11, 'V': 10, 'W': 10, 'X': 3, 'Y': 210, 'Z': 23}
def sexy_name(name):
name_score = sum(SCORES.get(a, 0) for a in name.upper())
if name_score >= 600:
return 'THE ULTIMATE SEXIEST'
elif name_score >= 301:
return 'VERY SEXY'
elif name_score >= 60:
return 'PRETTY SEXY'
return 'NOT TOO SEXY'
|
l = list(range(1, 20 + 1, 2)) # this is wasteful in this program
for i in l:
print(i)
|
import json
import os
import unittest
import requests
AGNOS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
MANIFEST = os.path.join(AGNOS_DIR, "agnos.json")
class TestAgnosUpdater(unittest.TestCase):
def test_manifest(self):
with open(MANIFEST) as f:
m = json.load(f)
for img in m:
r = requests.head(img['url'])
r.raise_for_status()
self.assertEqual(r.headers['Content-Type'], "application/x-xz")
if not img['sparse']:
assert img['hash'] == img['hash_raw']
if __name__ == "__main__":
unittest.main()
|
import argparse
import smtplib
import mimetypes
import os.path as path
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
def parseOptions():
parser = argparse.ArgumentParser(description='Send e-books to my Kindle Paperwhite.')
parser.add_argument('file', type=str, help='The e-book file.')
parser.add_argument('-c', type=str, default = 'y', help='Convert?.')
parser.add_argument('-t', type=str, default = 'n', help='Test mode, email sent to yahoo.')
args = parser.parse_args()
return args
def send2Kindle():
opt = parseOptions()
msg = MIMEMultipart()
msg['From'] = 'YOUR_EMAIL_ADDR'
msg['To'] = 'YOUR_KINDLE_EMAIL_ADDR' if opt.t == 'n' else 'YOUR_DEBUG_EMAIL_ADDR'
# TODO check if option is valid
msg['Subject'] = '' if opt.c == 'n' else 'Convert'
fileToSend = opt.file # the file to attach
# TODO check if file exists
if not path.exists(fileToSend):
return IOError
# get maintype and subtype for MIMEBase
ctype, encoding = mimetypes.guess_type(fileToSend)
maintype, subtype = ctype.split("/", 1)
# read (only) 1 attachment file
fp = open(fileToSend,"rb")
attachment = MIMEBase(maintype, subtype)
attachment.set_payload(fp.read())
fp.close()
encoders.encode_base64(attachment)
attachment.add_header("Content-Disposition", "attachment", filename = fileToSend.split('/')[-1])
msg.attach(attachment)
# set mail server info, using yahoo.com as an example
username = str('example_email_addr@yahoo.com')
password = str('email_psd') # Use app generated for security purposes
try :
server = smtplib.SMTP("smtp.mail.yahoo.com",587) # an exanple setup for yahoo mail server
print 'Logging into {server}'.format(server='mail.yahoo.com')
server.starttls()
server.login(username,password)
print 'Login success!'
print 'Sending "{file}" to {to}.'.format(file = fileToSend, to = msg['To'])
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
print 'Email has been sent successfully!'
except :
print 'Cannot send the Email!'
if __name__ == '__main__':
send2Kindle()
|
import deep_architect.searchers.common as se
import numpy as np
class SuccessiveNarrowing(se.Searcher):
def __init__(self, search_space_fn, num_initial_samples, reduction_factor,
reset_default_scope_upon_sample):
se.Searcher.__init__(self, search_space_fn,
reset_default_scope_upon_sample)
self.num_initial_samples = num_initial_samples
self.reduction_factor = reduction_factor
self.vals = [None for _ in range(num_initial_samples)]
self.num_remaining = num_initial_samples
self.idx = 0
self.queue = []
for _ in range(num_initial_samples):
inputs, outputs = search_space_fn()
hyperp_value_lst = se.random_specify(outputs)
self.queue.append(hyperp_value_lst)
def sample(self):
assert self.idx < len(self.queue)
hyperp_value_lst = self.queue[self.idx]
(inputs, outputs) = self.search_space_fn()
se.specify(outputs, hyperp_value_lst)
idx = self.idx
self.idx += 1
return inputs, outputs, hyperp_value_lst, {"idx": idx}
def update(self, val, searcher_eval_token):
assert self.num_remaining > 0
idx = searcher_eval_token["idx"]
assert self.vals[idx] is None
self.vals[idx] = val
self.num_remaining -= 1
# generate the next round of architectures by keeping the best ones.
if self.num_remaining == 0:
num_samples = int(self.reduction_factor * len(self.queue))
assert num_samples > 0
top_idxs = np.argsort(self.vals)[::-1][:num_samples]
self.queue = [self.queue[idx] for idx in top_idxs]
self.vals = [None for _ in range(num_samples)]
self.num_remaining = num_samples
self.idx = 0
def run_successive_narrowing(search_space_fn, num_initial_samples,
initial_budget, get_evaluator, extract_val_fn,
num_samples_reduction_factor,
budget_increase_factor, num_rounds,
get_evaluation_logger):
num_samples = num_initial_samples
searcher = SuccessiveNarrowing(search_space_fn, num_initial_samples,
num_samples_reduction_factor)
evaluation_id = 0
for round_idx in range(num_rounds):
budget = initial_budget * (budget_increase_factor**round_idx)
evaluator = get_evaluator(budget)
for idx in range(num_samples):
(inputs, outputs, hyperp_value_lst,
searcher_eval_token) = searcher.sample()
results = evaluator.eval(inputs, outputs)
val = extract_val_fn(results)
searcher.update(val, searcher_eval_token)
logger = get_evaluation_logger(evaluation_id)
logger.log_config(hyperp_value_lst, searcher_eval_token)
logger.log_results(results)
evaluation_id += 1
num_samples = int(num_samples_reduction_factor * num_samples)
|
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
|
"""Implement directed graph abstract data type."""
from __future__ import print_function
from .stack import Stack
from .queue import Queue
class Graph(object):
"""Implement a directed graph."""
def __init__(self):
"""Initialize a graph with no nodes or edges."""
self._nodes = {}
def add_node(self, n):
"""Add a node to the graph.
Do nothing if it's already in the graph.
"""
self._nodes.setdefault(n, {})
def nodes(self):
"""Return a list of all nodes."""
return list(self._nodes)
def _edges(self):
"""Return a generator of all edges (represented by tuples)."""
for node in self._nodes:
for neighbor in self._nodes[node]:
yield (node, neighbor, self._nodes[node][neighbor])
def edges(self):
"""Return a list of all edges (represented by tuples).
The tuples are generated like (node1, node2, weight).
"""
return list(self._edges())
def add_edge(self, a, b, weight):
"""Add an edge between two nodes.
If those nodes don't exist yet in the graph, create them.
"""
self.add_node(a)
self.add_node(b)
self._nodes[a][b] = weight
def del_node(self, n):
"""Delete a node in the graph.
Deletes all the edges involving the node as well. Raises
ValueError if the node isn't in the graph.
"""
try:
del self._nodes[n]
except KeyError:
raise ValueError('Node not in graph')
for neighbors in self._nodes.values():
try:
del neighbors[n]
except KeyError:
pass
def del_edge(self, a, b):
"""Delete an edge in the graph.
Raises ValueError if the edge isn't in the graph.
"""
try:
del self._nodes[a][b]
except KeyError:
raise ValueError('Node not in graph')
def has_node(self, n):
"""Return whether a node is in the graph."""
return n in self._nodes
def neighbors(self, n):
"""Return a list of all the neighbors a node has."""
return list(self._nodes[n])
def adjacent(self, n1, n2):
"""Return whether two nodes are adjacent.
Raises a ValueError if either node is not in the graph.
"""
try:
return n2 in self._nodes[n1]
except KeyError:
raise ValueError('Node not in graph')
def _traverse(self, start, nonempty, add, remove):
"""Traverse iteratively.
Uses given functions (nonempty, add, remove) to store
nodes. Depending on the behavior of these functions, this
traversal may be performed differently.
"""
add(start)
result = []
while nonempty():
root = remove()
if root not in result:
result.append(root)
for node in self.neighbors(root):
add(node)
return result
def depth_first_traversal(self, start):
"""Return a list of nodes as found in depth-first order."""
stack = Stack()
return self._traverse(start, stack.size, stack.push, stack.pop)
def breadth_first_traversal(self, start):
"""Return a list of nodes as found in breadth-first order."""
queue = Queue()
return self._traverse(start, queue.size, queue.enqueue, queue.dequeue)
def shortest_path(self, start, end):
"""Dijkstra's algorithm."""
distance_from_start = {start: 0}
unvisited = set(self.nodes())
parents = {}
while end in unvisited:
current = min((weight, node)
for node, weight
in distance_from_start.items()
if node in unvisited)[1]
for neighbor in self.neighbors(current):
weight = self._nodes[current][neighbor] + distance_from_start[current]
dist = distance_from_start.setdefault(neighbor, weight)
if weight <= dist:
distance_from_start[neighbor] = weight
parents[neighbor] = current
unvisited.remove(current)
s = []
weight = 0
current = end
while current in parents:
s.append(current)
weight += self._nodes[parents[current]][current]
current = parents[current]
s.append(start)
return s[::-1], weight
if __name__ == '__main__':
g = Graph()
g.add_edge('0-0', '1-0', 0)
g.add_edge('0-0', '1-1', 0)
g.add_edge('1-0', '2-0', 0)
g.add_edge('1-0', '2-1', 0)
g.add_edge('1-1', '2-2', 0)
g.add_edge('1-1', '2-3', 0)
print(r'''Graph:
0-0
/ \
1-0 1-1
/ \ / \
2-0 2-1 2-2 2-3''')
print('depth first ', g.depth_first_traversal('0-0'))
print('breadth first ', g.breadth_first_traversal('0-0'))
|
import os
import shutil
import unittest
from django.utils import six
from django_node import node, npm
from django_node.node_server import NodeServer
from django_node.server import server
from django_node.base_service import BaseService
from django_node.exceptions import (
OutdatedDependency, MalformedVersionInput, NodeServiceError, NodeServerAddressInUseError, NodeServerTimeoutError,
ServiceSourceDoesNotExist, MalformedServiceName
)
from django_node.services import EchoService
from .services import TimeoutService, ErrorService
from .utils import StdOutTrap
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
PATH_TO_NODE_MODULES = os.path.join(TEST_DIR, 'node_modules')
DEPENDENCY_PACKAGE = 'yargs'
PATH_TO_INSTALLED_PACKAGE = os.path.join(PATH_TO_NODE_MODULES, DEPENDENCY_PACKAGE)
PACKAGE_TO_INSTALL = 'jquery'
PATH_TO_PACKAGE_TO_INSTALL = os.path.join(PATH_TO_NODE_MODULES, PACKAGE_TO_INSTALL)
PATH_TO_PACKAGE_JSON = os.path.join(TEST_DIR, 'package.json')
echo_service = EchoService()
timeout_service = TimeoutService()
error_service = ErrorService()
class TestDjangoNode(unittest.TestCase):
maxDiff = None
def setUp(self):
self.package_json_contents = self.read_package_json()
def tearDown(self):
if os.path.exists(PATH_TO_NODE_MODULES):
shutil.rmtree(PATH_TO_NODE_MODULES)
self.write_package_json(self.package_json_contents)
if server.is_running:
# Reset the server
server.stop()
def read_package_json(self):
with open(PATH_TO_PACKAGE_JSON, 'r') as package_json_file:
return package_json_file.read()
def write_package_json(self, contents):
with open(PATH_TO_PACKAGE_JSON, 'w+') as package_json_file:
package_json_file.write(contents)
def test_node_is_installed(self):
self.assertTrue(node.is_installed)
def test_node_version_raw(self):
self.assertTrue(isinstance(node.version_raw, six.string_types))
self.assertGreater(len(node.version_raw), 0)
def test_node_version(self):
self.assertTrue(isinstance(node.version, tuple))
self.assertGreaterEqual(len(node.version), 3)
def test_npm_is_installed(self):
self.assertTrue(npm.is_installed)
def test_npm_version_raw(self):
self.assertTrue(isinstance(npm.version_raw, six.string_types))
self.assertGreater(len(npm.version_raw), 0)
def test_npm_version(self):
self.assertTrue(isinstance(npm.version, tuple))
self.assertGreaterEqual(len(npm.version), 3)
def test_ensure_node_installed(self):
node.ensure_installed()
def test_ensure_npm_installed(self):
npm.ensure_installed()
def test_ensure_node_version_greater_than(self):
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, 'v99999.0.0')
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, '99999.0.0')
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (None,))
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (10,))
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (999999999,))
self.assertRaises(MalformedVersionInput, node.ensure_version_gte, (999999999, 0,))
self.assertRaises(OutdatedDependency, node.ensure_version_gte, (999999999, 0, 0,))
node.ensure_version_gte((0, 0, 0,))
node.ensure_version_gte((0, 9, 99999999))
node.ensure_version_gte((0, 10, 33,))
def test_ensure_npm_version_greater_than(self):
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, 'v99999.0.0')
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, '99999.0.0')
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (None,))
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (10,))
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (999999999,))
self.assertRaises(MalformedVersionInput, npm.ensure_version_gte, (999999999, 0,))
self.assertRaises(OutdatedDependency, npm.ensure_version_gte, (999999999, 0, 0,))
npm.ensure_version_gte((0, 0, 0,))
npm.ensure_version_gte((0, 9, 99999999))
npm.ensure_version_gte((2, 1, 8,))
def test_node_run_returns_output(self):
stderr, stdout = node.run('--version',)
stdout = stdout.strip()
self.assertEqual(stdout, node.version_raw)
def test_npm_run_returns_output(self):
stderr, stdout = npm.run('--version',)
stdout = stdout.strip()
self.assertEqual(stdout, npm.version_raw)
def test_npm_install_can_install_dependencies(self):
npm.install(TEST_DIR)
self.assertTrue(os.path.exists(PATH_TO_NODE_MODULES))
self.assertTrue(os.path.exists(PATH_TO_INSTALLED_PACKAGE))
def test_node_server_services_can_be_validated(self):
class MissingSource(BaseService):
pass
self.assertRaises(ServiceSourceDoesNotExist, MissingSource.validate)
class AbsoluteUrlName(EchoService):
name = 'http://foo.com'
self.assertRaises(MalformedServiceName, AbsoluteUrlName.validate)
class MissingOpeningSlashName(EchoService):
name = 'foo/bar'
self.assertRaises(MalformedServiceName, MissingOpeningSlashName.validate)
def test_node_server_services_are_discovered(self):
for service in (EchoService, ErrorService, TimeoutService):
self.assertIn(service, server.services)
def test_node_server_can_start_and_stop(self):
self.assertIsInstance(server, NodeServer)
server.start()
self.assertTrue(server.is_running)
self.assertTrue(server.test())
server.stop()
self.assertFalse(server.is_running)
self.assertFalse(server.test())
server.start()
self.assertTrue(server.is_running)
self.assertTrue(server.test())
server.stop()
self.assertFalse(server.is_running)
self.assertFalse(server.test())
def test_node_server_process_can_rely_on_externally_controlled_processes(self):
self.assertFalse(server.test())
new_server = NodeServer()
new_server.start()
self.assertTrue(server.test())
new_server.stop()
self.assertFalse(new_server.test())
self.assertFalse(server.test())
def test_node_server_process_can_raise_on_port_collisions(self):
self.assertFalse(server.test())
new_server = NodeServer()
new_server.start()
self.assertTrue(server.test())
self.assertEqual(server.address, new_server.address)
self.assertEqual(server.port, new_server.port)
self.assertRaises(NodeServerAddressInUseError, server.start, use_existing_process=False)
new_server.stop()
self.assertFalse(server.test())
server.start(use_existing_process=False)
self.assertTrue(server.test())
def test_node_server_config_is_as_expected(self):
config = server.get_config()
self.assertEqual(config['address'], server.address)
self.assertEqual(config['port'], server.port)
self.assertEqual(config['startup_output'], server.get_startup_output())
services = (EchoService, ErrorService, TimeoutService)
self.assertEqual(len(config['services']), len(services))
service_names = [obj['name'] for obj in config['services']]
service_sources = [obj['path_to_source'] for obj in config['services']]
for service in services:
self.assertIn(service.get_name(), service_names)
self.assertIn(service.get_path_to_source(), service_sources)
def test_node_server_echo_service_pumps_output_back(self):
response = echo_service.send(echo='test content')
self.assertEqual(response.text, 'test content')
def test_node_server_throws_timeout_on_long_running_services(self):
self.assertRaises(NodeServerTimeoutError, timeout_service.send)
def test_node_server_error_service_works(self):
self.assertRaises(NodeServiceError, error_service.send)
def test_node_server_config_management_command_provides_the_expected_output(self):
from django_node.management.commands.node_server_config import Command
with StdOutTrap() as output:
Command().handle()
self.assertEqual(''.join(output), server.get_serialised_config())
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
WGS = ['G69145', 'G71602', 'G71608', 'G76270']
metric_dict = {}
for wg in WGS:
fname = "/seq/picard_aggregation/" + wg + "/NA12878/current/NA12878.duplicate_metrics"
lines=list(csv.reader(open(fname)))
nMetrics = lines[6][0].split('\t')
mvals = lines[7][0].split('\t')
# read in depth info
# fdepth = "/seq/picard_aggregation/" + wg + "/NA12878/current/NA12878.depthSM"
metric_dict[wg] = mvals
df_wgs = pd.DataFrame.from_dict(metric_dict,orient='index')
df_wgs.columns = nMetrics
df_wgs['platform'] = 'WGS'
metric_dict = {}
for wg in WGS:
fname = "/seq/picard_aggregation/" + wg + "/NA12878/current/NA12878.insert_size_metrics"
lines=list(csv.reader(open(fname)))
nMetrics = lines[6][0].split('\t')
mvals = lines[7][0].split('\t')
metric_dict[wg] = mvals
insert_wgs = pd.DataFrame.from_dict(metric_dict,orient='index')
insert_wgs.columns = nMetrics
insert_wgs['platform'] = 'WGS'
NexomeIDs = ['359781',
'359877',
'360457',
'361337',
'388072',
'381153',
'364464',
'377582',
'384210',
'384498',
'372754',
'386674',
'393370',
'385972',
'373158',
'379118',
'385390',
'391382',
'383609',
'386068',
'383320',
'383416',
'382684',
'392292',
'376734',
'376014']
metric_dict = {}
for nx in NexomeIDs:
fname = "/seq/picard_aggregation/D5227/NexPond-" + nx + "/current/NexPond-" + nx + ".duplicate_metrics"
lines=list(csv.reader(open(fname)))
nMetrics = lines[6][0].split('\t')
mvals = lines[7][0].split('\t')
metric_dict[nx] = mvals
df_nex = pd.DataFrame.from_dict(metric_dict,orient='index')
df_nex.columns = nMetrics
df_nex['platform'] = 'Nexome'
frames = [df_wgs, df_nex]
df_merge = pd.concat(frames)
fout = '/home/unix/hogstrom/nexpond_wgs_dup_metrics.txt'
df_merge.to_csv(fout)
import matplotlib.pyplot as plt
import pandas as pd
fin = "/Users/hogstrom/Dropbox (MIT)/genome_analysis/published_data/nexpond_wgs_dup_metrics.txt"
g = pd.read_csv(fin,index_col=0)
g.boxplot('ESTIMATED_LIBRARY_SIZE', by='platform')
g.boxplot('PERCENT_DUPLICATION', by='platform')
g.boxplot('READ_PAIR_DUPLICATES', by='platform')
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0013_auto_20160210_0400'),
]
operations = [
migrations.AlterField(
model_name='category',
name='order',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='question',
name='order',
field=models.PositiveIntegerField(default=1),
),
]
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
class BaseReader(object):
def read(self):
raise NotImplementedError()
class ImageReader(BaseReader):
def __init__(self):
self.width = None
self.height = None
def get_image_size(self):
return self.width, self.height
def set_image_size(self, width, height):
self.width = width
self.height = height
def read(self, filename, num_classes, batch_size=256, feature_map=None):
assert(self.width is not None and self.height is not None)
assert(self.width > 0 and self.height > 0)
reader = tf.TFRecordReader()
tf.add_to_collection(filename, batch_size) # is this really needed?
key, value = reader.read_up_to(filename, batch_size)
if feature_map is None:
feature_map = {
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([self.width * self.height], tf.int64),
}
features = tf.parse_example(value, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
if feature_map.get('label') is not None:
labels = tf.cast(features['label'], tf.int32)
one_hot = tf.map_fn(lambda x: tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), labels)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot, images
empty_labels = tf.reduce_sum(tf.zeros_like(images), axis=1)
return empty_labels, images
|
from django.http import HttpResponse
from AfricasTalkingGateway import AfricasTalkingGateway, AfricasTalkingGatewayException
from reminder.models import Reminder
import sys
import os
import django
sys.path.append("/home/foxtrot/Dropbox/tunza_v2/")
os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.local"
django.setup()
username = "OtisKe"
apikey = "07984423a278ead54fee35d3daf956598deb51405b27fe70f1e2dfe964be5c04"
gateway = AfricasTalkingGateway(username, apikey)
reminder_service = Reminder.objects.values_list('service_id',
'patient_id',
'service__service_name',
'service__service_url',
'patient__patient_contact', )
def voice_callback(request):
if request.method == 'POST':
is_active = request.values.get('isActive', None)
session_id = request.values.get('sessionId', None)
caller_number = request.values.get('callerNumber', None)
direction = request.values.get('direction', None)
print "is_active -> ", is_active
if is_active == str(0):
# Compose the response
duration = request.values.get('durationInSeconds', None)
currency_code = request.values.get('currencyCode', None)
amount = request.values.get('amount', None)
# update session info to Redis
print duration, currency_code, amount
respond = '<?xml version="1.0" encoding="UTF-8"?>'
respond += '<Response>'
respond += '<Say playBeep="false" >Welcome to the reminder system</Say>'
respond += '</Response>'
resp = HttpResponse(respond, 200, content_type='application/xml')
resp['Cache-Control'] = 'no-cache'
return resp
if is_active == str(1):
# Compose the response
respond = '<?xml version="1.0" encoding="UTF-8"?>'
respond += '<Response>'
respond += '<Say playBeep="false" >Welcome to mTunza.org</Say>'
respond += '</Response>'
resp = HttpResponse(respond, 200, content_type='application/xml')
resp['Cache-Control'] = 'no-cache'
return resp
else:
resp = HttpResponse('Bad Request', 400, content_type='application/xml', )
resp['Cache-Control'] = 'no-cache'
return resp
|
import requests
from gevent import monkey
def stub(*args, **kwargs): # pylint: disable=unused-argument
pass
monkey.patch_all = stub
import grequests
import os
import json
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
meraki_api_token = os.getenv("MERAKI_API_TOKEN")
meraki_org = os.getenv("MERAKI_ORG")
meraki_dashboard_map = os.getenv("MERAKI_DASHBOARD_MAP")
header = {"X-Cisco-Meraki-API-Key": meraki_api_token}
def get_meraki_networks():
# Get a list of all networks associated with the specified organization
url = "https://dashboard.meraki.com/api/v0/organizations/" + meraki_org + "/networks"
netlist = requests.get(url, headers=header)
netjson = json.loads(netlist.content.decode("utf-8"))
return netjson
def meraki_create_dashboard_link(linktype, linkname, displayval, urlappend, linknameid):
shownet = displayval
if meraki_dashboard_map:
mapjson = json.loads(meraki_dashboard_map.replace("'", '"'))
if linktype in mapjson:
if linkname in mapjson[linktype]:
shownet = "<a href='" + mapjson[linktype][linkname]["baseurl"] + urlappend + "'>" + displayval + "</a>"
if shownet == displayval and linktype == "devices" and linknameid == 0:
shownet = "<a href='https://dashboard.meraki.com/manage/nodes/show/" + linkname + "'>" + displayval + "</a>"
return shownet
def meraki_dashboard_client_mod(netlink, cliid, clidesc):
showcli = clidesc
if netlink:
if netlink.find("/manage") >= 0:
showcli = netlink.split("/manage")[0] + "/manage/usage/list#c=" + cliid + "'>" + clidesc + "</a>"
else:
showcli = "<a href='https://dashboard.meraki.com/manage/usage/list#c=" + cliid + "'>" + clidesc + "</a>"
return showcli
def collect_url_list(jsondata, baseurl, attr1, attr2, battr1, battr2):
# Iterates the jsondata list/dictionary and pulls out attributes to generate a list of URLs
# jsondata : list of dictionaries or dictionary of lists
# baseurl : base url to use. place a $1 to show where to substitute
# attr1 : when using a list of dictionaries, this is the key that will be retrieved from each dict in the list
# when using a dictionary of lists, this is the key where all of the lists will be found
# attr2 : (optional) pass "" to disable
# when using a dictionary of lists, this is the key that will be retrieved from each dict in each list
# These are both optional, and used if a second substitution is needed ($2)
# battr1 : (optional) when using a list of dictionaries, this is the key that will be retrieved from each dict
# in the list when using a dictionary of lists, this is the key where all of the lists will be found
# battr2 : (optional) pass "" to disable
# when using a dictionary of lists, this is the key that will be retrieved from each dict in each list
urllist = []
sub1 = ""
for jsonitem in jsondata:
if attr2 == "":
if attr1 in jsonitem:
urllist.append(baseurl.replace("$1", jsonitem[attr1]))
else:
if attr1 in jsondata[jsonitem]:
for jsonitem2 in jsondata[jsonitem][attr1]:
if isinstance(jsonitem2, str):
if jsonitem2 == attr2:
if battr1 == "":
urllist.append(baseurl.replace("$1", jsondata[jsonitem][attr1][jsonitem2]))
else:
sub1 = jsondata[jsonitem][attr1][jsonitem2]
else:
if battr1 == "":
urllist.append(baseurl.replace("$1", jsonitem2[attr2]))
else:
sub1 = jsonitem2[attr2]
if battr1 in jsondata[jsonitem]:
for jsonitem2 in jsondata[jsonitem][battr1]:
if isinstance(jsonitem2, str):
if jsonitem2 == battr2:
urllist.append(baseurl.replace("$1", sub1).replace("$2", jsondata[jsonitem][battr1][jsonitem2]))
else:
urllist.append(baseurl.replace("$1", sub1).replace("$2", jsonitem2[battr2]))
return urllist
def do_multi_get(url_list, comp_list, comp_id1, comp_id2, comp_url_idx, comp_key, content_key):
# Issues multiple GET requests to a list of URLs. Also will join dictionaries together based on returned content.
# url_list : list of URLs to issue GET requests to
# comp_list : (optional) pass [] to disable
# used to join the results of the GET operations to an existing dictionary
# comp_id1 : when using a list of dictionaries, this is the key to retrieve from each dict in the list
# when using a dictionary of lists, this is the key where all of the lists will be found
# comp_id2 : (optional) pass "" to disable
# when using a dictionary of lists, this is key that will be retrieved from each dict in each list
# comp_url_idx : (optional) pass -1 to disable
# when merging dictionaries, they can be merged either on a URL comparision or a matching key. Use
# this to specify that they be merged based on this specific index in the URL. So to match
# 'b' in http://a.com/b, you would specify 3 here, as that is the 3rd // section in the URL
# comp_key : (optional) pass "" to disable
# when merging dictionaries, they can be merged either on a URL comparision or a matching key. Use
# this to specify that they be merged based on this key found in the content coming back from the
# GET requests
# content_key : (optional when not merging, required when merging) pass "" to disable
# this is the base key added to the merged dictionary for the merged data
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.2, status_forcelist=[403, 500, 502, 503, 504], raise_on_redirect=True,
raise_on_status=True)
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
rs = (grequests.get(u, headers=header, session=s) for u in url_list)
content_dict = {}
for itemlist in grequests.imap(rs, stream=False):
icontent = itemlist.content.decode("utf-8")
inlist = json.loads(icontent)
if len(inlist) > 0:
# Use the URL index if it was specified, otherwise use the comparision key
if comp_url_idx >= 0:
urllist = itemlist.url.split("/")
matchval = urllist[comp_url_idx]
else:
matchval = inlist[0][comp_key]
if len(comp_list) > 0:
# comp_list was passed, iterate and merge dictionaries
for net in comp_list:
if comp_id2 == "":
# this is a list of dictionaries. if this matches the search, add it to the content dict
if matchval == net[comp_id1]:
kid1 = net["id"]
if kid1 not in content_dict:
content_dict[kid1] = {}
content_dict[kid1]["info"] = net
content_dict[kid1][content_key] = inlist
break
else:
# this is a dictionary of lists. if the match is present in this dictionary, continue parsing
if matchval in json.dumps(comp_list[net][comp_id1]):
kid1 = comp_list[net]["info"]["id"]
for net2 in comp_list[net][comp_id1]:
kid2 = net2["serial"]
if comp_id2 in net2:
if matchval == net2[comp_id2]:
if kid1 not in content_dict:
content_dict[kid1] = {}
if comp_id1 not in content_dict[kid1]:
content_dict[kid1][comp_id1] = {}
if kid2 not in content_dict[kid1][comp_id1]:
content_dict[kid1][comp_id1][kid2] = {}
content_dict[kid1]["info"] = comp_list[net]
content_dict[kid1][comp_id1][kid2]["info"] = net2
content_dict[kid1][comp_id1][kid2][content_key] = inlist
break
else:
if matchval not in content_dict:
content_dict[matchval] = {}
if content_key != "":
if content_key not in content_dict[matchval]:
content_dict[matchval][content_key] = {}
content_dict[matchval][content_key] = inlist
else:
content_dict[matchval] = inlist
return content_dict
def decode_model(strmodel):
# Decodes the Meraki model number into it's general type.
outmodel = ""
if "MX" in strmodel:
outmodel = "appliance"
if "MS" in strmodel:
outmodel = "switch"
if "MR" in strmodel:
outmodel = "wireless"
if "MV" in strmodel:
outmodel = "camera"
if "MC" in strmodel:
outmodel = "phone"
if outmodel == "":
outmodel = strmodel[0:2]
return outmodel
def do_sort_smclients(in_smlist):
# Rearranges the SM Dictionary to group clients by MAC address rather than a single list
out_smlist = {}
for net in in_smlist:
if "devices" in in_smlist[net]:
for cli in in_smlist[net]["devices"]:
if net not in out_smlist:
out_smlist[net] = {"devices": {}}
out_smlist[net]["devices"][cli["wifiMac"]] = cli
return out_smlist
def do_split_networks(in_netlist):
# Splits out combined Meraki networks into individual device networks.
devdict = {}
for net in in_netlist:
base_name = in_netlist[net]["info"]["info"]["name"]
for dev in in_netlist[net]["info"]["devices"]:
thisdevtype = decode_model(dev["model"])
thisupl = {"uplinks": in_netlist[net]["devices"][dev["serial"]]["uplinks"]}
newname = base_name + " - " + thisdevtype
newdev = {**dev, **thisupl}
if newname in devdict:
devdict[newname].append(newdev)
else:
devdict[newname] = [newdev]
return devdict
def get_meraki_health(incoming_msg, rettype):
# Get a list of all networks associated with the specified organization
netjson = get_meraki_networks()
# Parse list of networks to extract/create URLs needed to get list of devices
urlnet = collect_url_list(netjson, "https://dashboard.meraki.com/api/v0/networks/$1/devices", "id", "", "", "")
# Get a list of all devices associated with the networks associated to the organization
netlist = do_multi_get(urlnet, netjson, "id", "", -1, "networkId", "devices")
# Get uplink status of devices
urlnetup = collect_url_list(netlist, "https://dashboard.meraki.com/api/v0/networks/$1/devices/$2/uplink", "info", "id", "devices", "serial")
netlistup = do_multi_get(urlnetup, netlist, "devices", "serial", 8, "", "uplinks")
# Split network lists up by device type
newnetlist = do_split_networks(netlistup)
totaldev = 0
offdev = 0
totaloffdev = 0
devicon = ""
retmsg = "<h3>Meraki Details:</h3>"
retmsg += "<a href='https://dashboard.meraki.com/'>Meraki Dashboard</a><br><ul>"
for net in sorted(newnetlist):
for dev in newnetlist[net]:
for upl in dev["uplinks"]:
if upl["interface"] == "WAN 1":
if upl["status"] != "Active":
offdev += 1
totaloffdev += 1
devicon = chr(0x2757) + chr(0xFE0F)
totaldev += len(newnetlist[net])
shownet = meraki_create_dashboard_link("networks", net, net, "", 0)
retmsg += "<li>Network '" + shownet + "' has " + str(offdev) + " device(s) offline out of " + str(len(newnetlist[net])) + " device(s)." + devicon + "</li>"
offdev = 0
devicon = ""
retmsg += "</ul><b>" + str(totaloffdev) + " device(s) offline out of a total of " + str(totaldev) + " device(s).</b>"
return retmsg
def get_meraki_clients(incoming_msg, rettype):
cmdlist = incoming_msg.text.split(" ")
client_id = cmdlist[len(cmdlist)-1]
# Get a list of all networks associated with the specified organization
netjson = get_meraki_networks()
# Parse list of networks to extract/create URLs needed to get list of devices
urlnet = collect_url_list(netjson, "https://dashboard.meraki.com/api/v0/networks/$1/devices", "id", "", "", "")
smnet = collect_url_list(netjson, "https://dashboard.meraki.com/api/v0/networks/$1/sm/devices/", "id", "", "", "")
# Get a list of all devices associated with the networks associated to the organization
netlist = do_multi_get(urlnet, netjson, "id", "", -1, "networkId", "devices")
smlist = do_multi_get(smnet, [], "id", "", 6, "", "")
newsmlist = do_sort_smclients(smlist)
# Parse list of devices to extract/create URLs needed to get list of clients
urldev = collect_url_list(netlist, "https://dashboard.meraki.com/api/v0/devices/$1/clients?timespan=86400", "devices", "serial", "", "")
# Get a list of all clients associated with the devices associated to the networks associated to the organization
netlist = do_multi_get(urldev, netlist, "devices", "serial", 6, "", "clients")
if rettype == "json":
return {"client": netlist, "sm": newsmlist}
else:
retmsg = "<h3>Associated Clients:</h3>"
for net in sorted(netlist):
for dev in netlist[net]["devices"]:
for cli in netlist[net]["devices"][dev]["clients"]:
if not isinstance(cli, str):
if cli["description"] == client_id and "switchport" in cli:
devbase = netlist[net]["devices"][dev]["info"]
showdev = meraki_create_dashboard_link("devices", devbase["mac"], devbase["name"], "?timespan=86400", 0)
showport = meraki_create_dashboard_link("devices", devbase["mac"], str(cli["switchport"]), "/ports/" + str(cli["switchport"]) + "?timespan=86400", 1)
showcli = meraki_dashboard_client_mod(showdev, cli["id"], cli["dhcpHostname"])
retmsg += "<i>Computer Name:</i> " + showcli + "<br>"
if net in newsmlist:
if "devices" in newsmlist[net]:
if cli["mac"] in newsmlist[net]["devices"]:
smbase = newsmlist[net]["devices"][cli["mac"]]
retmsg += "<i>Model:</i> " + smbase["systemModel"] + "<br>"
retmsg += "<i>OS:</i> " + smbase["osName"] + "<br>"
retmsg += "<i>IP:</i> " + cli["ip"] + "<br>"
retmsg += "<i>MAC:</i> " + cli["mac"] + "<br>"
retmsg += "<i>VLAN:</i> " + str(cli["vlan"]) + "<br>"
retmsg += "<i>Connected To:</i> " + showdev + " (" + devbase["model"] + "), Port " + showport + "<br>"
return retmsg
def get_meraki_health_html(incoming_msg):
return get_meraki_health(incoming_msg, "html")
def get_meraki_clients_html(incoming_msg):
return get_meraki_clients(incoming_msg, "html")
|
"""
Created on 19 Nov 2020
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Two-Channel I2C-Bus Switch With Interrupt Logic and Reset
https://www.ti.com/product/PCA9543A
"""
from scs_host.bus.i2c import I2C
class PCA9543A(object):
"""
classdocs
"""
___I2C_ADDR = 0x70
# ----------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
self.__addr = self.___I2C_ADDR
# ----------------------------------------------------------------------------------------------------------------
def enable(self, ch0, ch1):
ch0_en = 0x01 if ch0 else 0x00
ch1_en = 0x02 if ch1 else 0x00
ctrl = ch1_en | ch0_en
try:
I2C.Sensors.start_tx(self.__addr)
I2C.Sensors.write(ctrl)
finally:
I2C.Sensors.end_tx()
def read(self):
try:
I2C.Sensors.start_tx(self.__addr)
ctrl = I2C.Sensors.read(1)
finally:
I2C.Sensors.end_tx()
return ctrl
def reset(self):
self.enable(False, False)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
try:
ctrl = "0x%02x" % self.read()
except OSError:
ctrl = None
return "PCA9543A:{addr:0x%02x, ctrl:%s}" % (self.__addr, ctrl)
|
"""
A 2d grid map of m rows and n columns is initially filled with water.
We may perform an addLand operation which turns the water at position
(row, col) into a land. Given a list of positions to operate,
count the number of islands after each addLand operation.
An island is surrounded by water and is formed by connecting adjacent
lands horizontally or vertically.
You may assume all four edges of the grid are all surrounded by water.
Given m = 3, n = 3, positions = [[0,0], [0,1], [1,2], [2,1]].
Initially, the 2d grid grid is filled with water.
(Assume 0 represents water and 1 represents land).
0 0 0
0 0 0
0 0 0
Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land.
1 0 0
0 0 0 Number of islands = 1
0 0 0
Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land.
1 1 0
0 0 0 Number of islands = 1
0 0 0
Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land.
1 1 0
0 0 1 Number of islands = 2
0 0 0
Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land.
1 1 0
0 0 1 Number of islands = 3
0 1 0
"""
class Solution(object):
def num_islands2(self, m, n, positions):
ans = []
islands = Union()
for p in map(tuple, positions):
islands.add(p)
for dp in (0, 1), (0, -1), (1, 0), (-1, 0):
q = (p[0] + dp[0], p[1] + dp[1])
if q in islands.id:
islands.unite(p, q)
ans += [islands.count]
return ans
class Union(object):
def __init__(self):
self.id = {}
self.sz = {}
self.count = 0
def add(self, p):
self.id[p] = p
self.sz[p] = 1
self.count += 1
def root(self, i):
while i != self.id[i]:
self.id[i] = self.id[self.id[i]]
i = self.id[i]
return i
def unite(self, p, q):
i, j = self.root(p), self.root(q)
if i == j:
return
if self.sz[i] > self.sz[j]:
i, j = j, i
self.id[i] = j
self.sz[j] += self.sz[i]
self.count -= 1
|
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.1'
from django.conf import settings
from django.db.models.signals import post_save, pre_delete
from djangoerp.core.utils.models import get_model
from djangoerp.core.signals import manage_author_permissions
from .models import Menu, Link, Bookmark
from .utils import create_bookmarks, delete_bookmarks
def _create_bookmarks(sender, instance, *args, **kwargs):
create_bookmarks(instance)
def _delete_bookmarks(sender, instance, *args, **kwargs):
delete_bookmarks(instance)
def manage_bookmarks(cls, enabled=True):
"""Connects handlers for bookmarks management.
This handler could be used to automatically create a related bookmark list
on given model class instance creation. i.e.:
>> manage_bookmarks(User)
It will auto generate a bookmark list associated to each new User's instance.
To disconnect:
>> manage_bookmarks(User, False)
"""
cls = get_model(cls)
cls_name = cls.__name__.lower()
create_dispatch_uid = "create_%s_bookmarks" % cls_name
delete_dispatch_uid = "delete_%s_bookmarks" % cls_name
if enabled:
post_save.connect(_create_bookmarks, cls, dispatch_uid=create_dispatch_uid)
pre_delete.connect(_delete_bookmarks, cls, dispatch_uid=delete_dispatch_uid)
else:
post_save.disconnect(_create_bookmarks, cls, dispatch_uid=create_dispatch_uid)
pre_delete.disconnect(_delete_bookmarks, cls, dispatch_uid=delete_dispatch_uid)
manage_author_permissions(Menu)
manage_author_permissions(Link)
manage_author_permissions(Bookmark)
manage_bookmarks(settings.AUTH_USER_MODEL)
|
import argparse
import h5py
import numpy as np
import os
import sys
def main():
""" Create train/test indices for one repeat of a 10-fold sampled leave-one-study-out
experiment on the RFS data.
The indices will be stored under
<data_dir>/outputs/U133A_combat_RFS/sampled_loso/repeat<repeat idx>
with the following structure:
For k=1..numFolds:
<k>/train.indices
List of indices of the training set (one per line).
<k>/train.labels
List of (0/1) labels of the training set (one per line).
<k>/test.indices
List of indices of the test set (one per line).
<k>/test.labels
List of (0/1) labels of the test set (one per line).
Parameters
----------
data_dir: path
Path to the data folder.
ACES, GSE_RFS, and the outputs directory must be under <data_dir>.
repeat: int
Repeat index.
Example
-------
$ python setUpSampledLOSO_writeIndices.py $SHAREDAT/SamSpecCoEN 0
Reference
---------
Allahyar, A., and Ridder, J. de (2015).
FERAL: network-based classifier with application to breast cancer outcome prediction.
Bioinformatics 31, i311--i319.
"""
parser = argparse.ArgumentParser(description="Build sample-specific co-expression networks" + \
"for a sampled LOSO on the RFS data",
add_help=True)
parser.add_argument("data_dir", help="Path to the data")
parser.add_argument("repeat", help="Index of the repeat", type=int)
args = parser.parse_args()
outDir = '%s/outputs/U133A_combat_RFS/sampled_loso/repeat%d' % (args.data_dir, args.repeat)
# Create outDir if it does not exist
if not os.path.isdir(outDir):
sys.stdout.write("Creating %s\n" % outDir)
try:
os.makedirs(outDir)
except OSError:
if not os.path.isdir(outDir):
raise
# Get expression data, sample labels.
# Do not normalize the data while loading it (so as not to use test data for normalization).
f = h5py.File("%s/ACES/experiments/data/U133A_combat.h5" % args.data_dir)
expressionData = np.array(f['U133A_combat_RFS']['ExpressionData'])
sampleLabels = np.array(f['U133A_combat_RFS']['PatientClassLabels'])
sampleAccess = np.array(f['U133A_combat_RFS']['PatientLabels']).tolist()
f.close()
# Map the indices to the studies
studyDict = {} # studyId:[sampleIdx]
gse_rfs_dir = '%s/GSE_RFS/' % args.data_dir
for studyFile in os.listdir(gse_rfs_dir):
studyPath = '%s/%s' % (gse_rfs_dir, studyFile)
print studyPath
with open(studyPath, 'r') as f:
gsmNames = set([x.split()[0] for x in f.readlines()])
f.close()
gsmNames = gsmNames.intersection(set(sampleAccess))
studyDict[studyFile.split(".")[0]] = [sampleAccess.index(gsm) for gsm in gsmNames]
studyList = studyDict.keys()
numStudies = len(studyList)
print "Found %d studies" % numStudies
np.random.seed(seed=args.repeat)
for foldNr in range(numStudies):
# Training data:
# randomly sample 50% of each study that is not foldNr
trIndices = []
for studyId in [x for x in studyList if x!=foldNr]:
studyIndices = np.random.choice(studyDict[studyId],
size=len(studyDict[studyId])/2,
replace=False)
trIndices.extend(studyIndices)
# studyIndices = studyDict[studyId]
# random.shuffle(studyIndices)
# n = len(studyIndices)
# trIndices.extend(studyIndices[:(n/2)])
# Test data:
# the data from foldNr
teIndices = studyDict[studyList[foldNr]]
# Create output directory
foldDir = "%s/fold%d" % (outDir, foldNr)
try:
os.makedirs(foldDir)
except OSError:
if not os.path.isdir(foldDir):
raise
# Save train indices to file
trIndicesF = '%s/train.indices' % foldDir
np.savetxt(trIndicesF, trIndices, fmt='%d')
sys.stdout.write("Wrote training indices for fold %d to %s\n" % (foldNr, trIndicesF))
# Save test indices to file
teIndicesF = '%s/test.indices' % foldDir
np.savetxt(teIndicesF, teIndices, fmt='%d')
sys.stdout.write("Wrote test indices for fold %d to %s\n" % (foldNr, teIndicesF))
# Save train labels to file
trLabelsF = '%s/train.labels' % foldDir
np.savetxt(trLabelsF, np.array(sampleLabels[trIndices], dtype='int'),
fmt='%d')
sys.stdout.write("Wrote training labels for fold %d to %s\n" % (foldNr, trLabelsF))
# Save test labels to file
teLabelsF = '%s/test.labels' % foldDir
np.savetxt(teLabelsF, np.array(sampleLabels[teIndices], dtype='int'),
fmt='%d')
sys.stdout.write("Wrote test labels for fold %d to %s\n" % (foldNr, teLabelsF))
if __name__ == "__main__":
main()
|
import collections
import unittest
from kobold import assertions
class TestAssertEqual(unittest.TestCase):
def test_empty_hashes(self):
assertions.assert_equal({}, {})
def test_distinct_keys(self):
self.assertRaises(
AssertionError,
assertions.assert_equal,
{'a' : 1},
{'b' : 2})
Response = collections.namedtuple('Response', 'headers status_code data')
class TestAssertResponseMatches(unittest.TestCase):
def test_empty_body(self):
actual = Response(headers={}, status_code=200, data={})
assertions.assert_response_matches({'body' : {},
'status_code' : 200,
'headers' : {}}, actual)
def test_omit_status_and_headers(self):
actual = Response(headers={}, status_code=200, data={})
assertions.assert_response_matches({'body' : {}}, actual)
def test_equal_bodies(self):
actual = Response(
headers={},
status_code=200,
data={'key' : 'value'})
assertions.assert_response_matches({'body' : {'key' : 'value'},
'status_code' : 200,
'headers' : {}}, actual)
def test_unequal_bodies(self):
actual = Response(
headers={},
status_code=200,
data={'key' : 'value'})
self.assertRaises(
AssertionError,
assertions.assert_response_matches,
{'body' : {'key' : 'anothervalue'},
'status_code' : 200,
'headers' : {}},
actual)
def test_unequal_headers(self):
actual = Response(
headers={'header' : 'value'},
status_code=200,
data={'key' : 'value'})
self.assertRaises(
AssertionError,
assertions.assert_response_matches,
{'body' : {'key' : 'value'},
'status_code' : 200,
'headers' : {'header' : 'anothervalue'}},
actual)
|
from msrest.pipeline import ClientRawResponse
import uuid
from .. import models
class ParameterGroupingOperations(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def post_required(
self, parameter_grouping_post_required_parameters, custom_headers={}, raw=False, **operation_config):
"""
Post a bunch of required parameters grouped
:param parameter_grouping_post_required_parameters: Additional
parameters for the operation
:type parameter_grouping_post_required_parameters:
ParameterGroupingPostRequiredParameters
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
body = None
if parameter_grouping_post_required_parameters is not None:
body = parameter_grouping_post_required_parameters.body
custom_header = None
if parameter_grouping_post_required_parameters is not None:
custom_header = parameter_grouping_post_required_parameters.custom_header
query = None
if parameter_grouping_post_required_parameters is not None:
query = parameter_grouping_post_required_parameters.query
path = None
if parameter_grouping_post_required_parameters is not None:
path = parameter_grouping_post_required_parameters.path
# Construct URL
url = '/parameterGrouping/postRequired/{path}'
path_format_arguments = {
'path': self._serialize.url("path", path, 'str')
}
url = url.format(**path_format_arguments)
# Construct parameters
query_parameters = {}
if query is not None:
query_parameters['query'] = self._serialize.query("query", query, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if custom_header is not None:
header_parameters['customHeader'] = self._serialize.header("custom_header", custom_header, 'str')
# Construct body
body_content = self._serialize.body(body, 'int')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_optional(
self, parameter_grouping_post_optional_parameters=None, custom_headers={}, raw=False, **operation_config):
"""
Post a bunch of optional parameters grouped
:param parameter_grouping_post_optional_parameters: Additional
parameters for the operation
:type parameter_grouping_post_optional_parameters:
ParameterGroupingPostOptionalParameters or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
custom_header = None
if parameter_grouping_post_optional_parameters is not None:
custom_header = parameter_grouping_post_optional_parameters.custom_header
query = None
if parameter_grouping_post_optional_parameters is not None:
query = parameter_grouping_post_optional_parameters.query
# Construct URL
url = '/parameterGrouping/postOptional'
# Construct parameters
query_parameters = {}
if query is not None:
query_parameters['query'] = self._serialize.query("query", query, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if custom_header is not None:
header_parameters['customHeader'] = self._serialize.header("custom_header", custom_header, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_multiple_parameter_groups(
self, first_parameter_group=None, parameter_grouping_post_multiple_parameter_groups_second_parameter_group=None, custom_headers={}, raw=False, **operation_config):
"""
Post parameters from multiple different parameter groups
:param first_parameter_group: Additional parameters for the operation
:type first_parameter_group: FirstParameterGroup or None
:param
parameter_grouping_post_multiple_parameter_groups_second_parameter_group:
Additional parameters for the operation
:type
parameter_grouping_post_multiple_parameter_groups_second_parameter_group:
ParameterGroupingPostMultipleParameterGroupsSecondParameterGroup or
None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
header_one = None
if first_parameter_group is not None:
header_one = first_parameter_group.header_one
query_one = None
if first_parameter_group is not None:
query_one = first_parameter_group.query_one
header_two = None
if parameter_grouping_post_multiple_parameter_groups_second_parameter_group is not None:
header_two = parameter_grouping_post_multiple_parameter_groups_second_parameter_group.header_two
query_two = None
if parameter_grouping_post_multiple_parameter_groups_second_parameter_group is not None:
query_two = parameter_grouping_post_multiple_parameter_groups_second_parameter_group.query_two
# Construct URL
url = '/parameterGrouping/postMultipleParameterGroups'
# Construct parameters
query_parameters = {}
if query_one is not None:
query_parameters['query-one'] = self._serialize.query("query_one", query_one, 'int')
if query_two is not None:
query_parameters['query-two'] = self._serialize.query("query_two", query_two, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if header_one is not None:
header_parameters['header-one'] = self._serialize.header("header_one", header_one, 'str')
if header_two is not None:
header_parameters['header-two'] = self._serialize.header("header_two", header_two, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post_shared_parameter_group_object(
self, first_parameter_group=None, custom_headers={}, raw=False, **operation_config):
"""
Post parameters with a shared parameter group object
:param first_parameter_group: Additional parameters for the operation
:type first_parameter_group: FirstParameterGroup or None
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
header_one = None
if first_parameter_group is not None:
header_one = first_parameter_group.header_one
query_one = None
if first_parameter_group is not None:
query_one = first_parameter_group.query_one
# Construct URL
url = '/parameterGrouping/sharedParameterGroupObject'
# Construct parameters
query_parameters = {}
if query_one is not None:
query_parameters['query-one'] = self._serialize.query("query_one", query_one, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if header_one is not None:
header_parameters['header-one'] = self._serialize.header("header_one", header_one, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
"""Get app info with AppKit via objc bridge."""
from __future__ import print_function, unicode_literals, absolute_import
import time
import unicodedata
from AppKit import NSWorkspace
def decode(s):
"""Decode bytestring to Unicode."""
if isinstance(s, str):
s = unicode(s, 'utf-8')
elif not isinstance(s, unicode):
raise TypeError("str or unicode required, not {}".format(type(s)))
return unicodedata.normalize('NFC', s)
def get_frontmost_app():
"""Return (name, bundle_id and path) of frontmost application.
Raise a `RuntimeError` if frontmost application cannot be
determined.
"""
for app in NSWorkspace.sharedWorkspace().runningApplications():
if app.isActive():
app_name = app.localizedName()
bundle_id = app.bundleIdentifier()
app_path = app.bundleURL().fileSystemRepresentation()
return (app_name, bundle_id, app_path)
else:
raise RuntimeError("Couldn't get frontmost application.")
if __name__ == '__main__':
s = time.time()
get_frontmost_app()
d = time.time() - s
|
"""Contains utility methods used by and with the pyebnf package."""
import math
def esc_split(text, delimiter=" ", maxsplit=-1, escape="\\", *, ignore_empty=False):
"""Escape-aware text splitting:
Split text on on a delimiter, recognizing escaped delimiters."""
is_escaped = False
split_count = 0
yval = []
for char in text:
if is_escaped:
is_escaped = False
yval.append(char)
else:
if char == escape:
is_escaped = True
elif char in delimiter and split_count != maxsplit:
if yval or not ignore_empty:
yield "".join(yval)
split_count += 1
yval = []
else:
yval.append(char)
yield "".join(yval)
def esc_join(iterable, delimiter=" ", escape="\\"):
"""Join an iterable by a delimiter, replacing instances of delimiter in items
with escape + delimiter.
"""
rep = escape + delimiter
return delimiter.join(i.replace(delimiter, rep) for i in iterable)
def get_newline_positions(text):
"""Returns a list of the positions in the text where all new lines occur. This is used by
get_line_and_char to efficiently find coordinates represented by offset positions.
"""
pos = []
for i, c in enumerate(text):
if c == "\n":
pos.append(i)
return pos
def get_line_and_char(newline_positions, position):
"""Given a list of newline positions, and an offset from the start of the source code
that newline_positions was pulled from, return a 2-tuple of (line, char) coordinates.
"""
if newline_positions:
for line_no, nl_pos in enumerate(newline_positions):
if nl_pos >= position:
if line_no == 0:
return (line_no, position)
else:
return (line_no, position - newline_positions[line_no - 1] - 1)
return (line_no + 1, position - newline_positions[-1] - 1)
else:
return (0, position)
def point_to_source(source, position, fmt=(2, True, "~~~~~", "^")):
"""Point to a position in source code.
source is the text we're pointing in.
position is a 2-tuple of (line_number, character_number) to point to.
fmt is a 4-tuple of formatting parameters, they are:
name default description
---- ------- -----------
surrounding_lines 2 the number of lines above and below the target line to print
show_line_numbers True if true line numbers will be generated for the output_lines
tail_body "~~~~~" the body of the tail
pointer_char "^" the character that will point to the position
"""
surrounding_lines, show_line_numbers, tail_body, pointer_char = fmt
line_no, char_no = position
lines = source.split("\n")
line = lines[line_no]
if char_no >= len(tail_body):
tail = " " * (char_no - len(tail_body)) + tail_body + pointer_char
else:
tail = " " * char_no + pointer_char + tail_body
if show_line_numbers:
line_no_width = int(math.ceil(math.log10(max(1, line_no + surrounding_lines))) + 1)
line_fmt = "{0:" + str(line_no_width) + "}: {1}"
else:
line_fmt = "{1}"
pivot = line_no + 1
output_lines = [(pivot, line), ("", tail)]
for i in range(surrounding_lines):
upper_ofst = i + 1
upper_idx = line_no + upper_ofst
lower_ofst = -upper_ofst
lower_idx = line_no + lower_ofst
if lower_idx >= 0:
output_lines.insert(0, (pivot + lower_ofst, lines[lower_idx]))
if upper_idx < len(lines):
output_lines.append((pivot + upper_ofst, lines[upper_idx]))
return "\n".join(line_fmt.format(n, c) for n, c in output_lines)
|
import glob
import sys
sys.path.append('..')
from lib.mppaper import *
import lib.mpsetup as mpsetup
import lib.immune as immune
files = sorted((immune.parse_value(f, 'epsilon'), f) for f in glob.glob("data/*.npz"))
import run
sigma = run.sigma
epsilons = []
similarities = []
similaritiesQ = []
similaritiesPtilde = []
for epsilon, f in files:
npz = np.load(f)
P1 = npz['P1']
P2 = npz['P2']
Ptilde1 = npz['Ptilde1']
Ptilde2 = npz['Ptilde2']
Q1 = npz['Q1']
Q2 = npz['Q2']
epsilons.append(epsilon)
similarities.append(immune.similarity(P1, P2))
similaritiesQ.append(immune.similarity(Q1, Q2))
similaritiesPtilde.append(immune.similarity(Ptilde1, Ptilde2))
fig = plt.figure()
ax = fig.add_subplot(121)
ax.axhline(1.0, color=almostblack)
ax.plot(epsilons, similarities, label='$P_r^\star$', **linedotstyle)
ax.plot(epsilons, similaritiesQ, label='$Q_a$', **linedotstyle)
ax.plot(epsilons, similaritiesPtilde, label=r'$\tilde P_a$', **linedotstyle)
ax.set_xlabel('noise $\epsilon$')
ax.set_ylabel('Similarity')
ax.set_xlim(0.0, 0.5)
ax.set_ylim(0.0, 1.05)
ax.legend(ncol=1, loc='center right')
ax.xaxis.labelpad = axis_labelpad
ax.yaxis.labelpad = axis_labelpad
mpsetup.despine(ax)
fig.tight_layout(pad=tight_layout_pad)
fig.subplots_adjust(top=0.85)
epsilon_illustration = 0.2
epsilon, f = [tup for tup in files if tup[0] == epsilon_illustration][0]
npz = np.load(f)
P1 = npz['P1']
P2 = npz['P2']
Qbase = npz['Qbase']
Q1 = npz['Q1']
Q2 = npz['Q2']
x = npz['x']
axQ = fig.add_subplot(222)
for i, Q in enumerate([Q1, Q2]):
axQ.plot(x/sigma, Q, lw=0.5 * linewidth, label='ind. %g' % (i+1))
axQ.set_xlim(0, 10)
axQ.set_ylabel(r'$Q_a$')
axP = fig.add_subplot(224, sharex=axQ)
for i, p in enumerate([P1, P2]):
axP.plot(x/sigma, p, label='ind. %g' % (i+1), **linedotstyle)
axP.locator_params(axis='x', nbins=5, tight=True)
axP.set_xlim(0, 20)
axP.set_ylabel(r'$P_r^\star$')
axP.legend(ncol=2, handletextpad=0.1,
loc='upper right',
bbox_to_anchor=(1.05, 1.20))
for a in [axQ, axP]:
a.set_ylim(ymin=0.0)
mpsetup.despine(a)
a.set_yticks([])
a.xaxis.labelpad = axis_labelpad
a.yaxis.labelpad = axis_labelpad
axP.set_xlabel('$x \; / \; \sigma$')
plt.setp(axQ.get_xticklabels(), visible=False)
fig.tight_layout(pad=tight_layout_pad, h_pad=1.0)
fig.savefig('fig4.svg')
plt.show()
|
import random
import string
def random_string(n):
result = ''
for _ in range(10):
result += random.SystemRandom().choice(
string.ascii_uppercase + string.digits)
return result
|
from os import getcwd, listdir
from os.path import abspath, dirname, isdir, join as path_join
from shutil import rmtree
from sys import exc_info
from tempfile import mkdtemp
from unittest import TestCase
from mock import patch, MagicMock
from robot.libraries.BuiltIn import BuiltIn
CURDIR = abspath(dirname(__file__))
class TestScreenshot(TestCase):
def setUp(self):
self.mock = MagicMock()
self.patcher = patch.dict('sys.modules', {'pyautogui': self.mock})
self.patcher.start()
from ImageHorizonLibrary import ImageHorizonLibrary
self.lib = ImageHorizonLibrary()
def tearDown(self):
self.mock.reset_mock()
self.patcher.stop()
def _take_screenshot_many_times(self, expected_filename):
folder = path_join(CURDIR, 'reference_folder')
self.lib.set_screenshot_folder(folder)
for i in range(1, 15):
self.lib.take_a_screenshot()
self.mock.screenshot.assert_called_once_with(
path_join(folder, expected_filename % i))
self.mock.reset_mock()
def test_take_a_screenshot(self):
self._take_screenshot_many_times('ImageHorizon-screenshot-%d.png')
def test_take_a_screenshot_inside_robot(self):
with patch.object(BuiltIn, 'get_variable_value',
return_value='Suite Name'):
self._take_screenshot_many_times('SuiteName-screenshot-%d.png')
def test_take_a_screenshot_with_invalid_folder(self):
from ImageHorizonLibrary import ScreenshotFolderException
for index, invalid_folder in enumerate((None, 0, False), 1):
self.lib.screenshot_folder = invalid_folder
expected = path_join(getcwd(),
'ImageHorizon-screenshot-%d.png' % index)
self.lib.take_a_screenshot()
self.mock.screenshot.assert_called_once_with(expected)
self.mock.reset_mock()
for invalid_folder in (123, object()):
self.lib.screenshot_folder = invalid_folder
with self.assertRaises(ScreenshotFolderException):
self.lib.take_a_screenshot()
|
import tkinter.filedialog as tkFileDialog
import numpy as np
from numpy import sin,cos
import os
def InnerOrientation(mat1,mat2):
"""
mat1 为像素坐标,4*2,mat2为理论坐标4*2,
h0,h1,h2,k0,k1,k2,这六个参数由下列矩阵定义:
[x]=[h0]+[h1 h2] [i]
[y]=[k0]+[k1 k2] [j]
返回6个定向参数的齐次矩阵,x方向单位权方差,y方向单位权方差
[h1 h2 h0]
[k1 k2 k0]
[0 0 1 ]
"""
# mat1=np.matrix(mat1)
# mat2=np.matrix(mat2)
y=mat2.ravel()
y=y.T
xlist=[]
for i in range(int(y.size/2)):
x0=np.matrix([[1,mat1[i,0],mat1[i,1],0,0,0],[0,0,0,1,mat1[i,0],mat1[i,1]]])
xlist.append(x0)
x=np.vstack(xlist)
# print(x)
N=np.linalg.inv(x.T @ x)
beta=N @ x.T @ y
# print(beta)
r=(np.size(y)-6)
e=y-x@beta
ex=e[0::2]
ey=e[1::2]
sigmax=(np.linalg.norm(ex)/r)
sigmay=(np.linalg.norm(ey)/r)
# print(sigmax)
# print(sigmay)
return(np.matrix([[beta[1,0],beta[2,0],beta[0,0]],[beta[4,0],beta[5,0],beta[3,0]],[0,0,1]]),sigmax,sigmay)
def openkbfile():
#default_dir = r"C:\Users\lenovo\Desktop" # 设置默认打开目录
fname = tkFileDialog.askopenfilename(title=u"选择文件",filetypes=[("kb file", "*.kb"), ("all", "*.*")],initialdir=r"D:\学习\摄影测量\摄影测量实验数据-后方交会、前方交会")
f=open(fname,mode='r')
lines=f.readlines()
f.close()
mat=[]
for line in lines:
t=line.split()
mat.append([float(t[0]),float(t[1])])
#initialdir=(os.path.expanduser(default_dir))
# print(fname) # 返回文件全路径
mat1=mat[0::2]
mat2=mat[1::2]
mat,sigmax2,sigmay2=InnerOrientation(np.matrix(mat1),np.matrix(mat2))
print(mat,sigmax2,sigmay2)
if __name__=="__main__":
openkbfile()
|
"""Unit tests for mapi/endpoints/tmdb.py."""
import pytest
from mapi.endpoints import tmdb_find, tmdb_movies, tmdb_search_movies
from mapi.exceptions import MapiNotFoundException, MapiProviderException
from tests import JUNK_TEXT
GOONIES_IMDB_ID = "tt0089218"
GOONIES_TMDB_ID = 9340
JUNK_IMDB_ID = "tt1234567890"
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__imdb_success(tmdb_api_key):
expected_top_level_keys = {
"movie_results",
"person_results",
"tv_episode_results",
"tv_results",
"tv_season_results",
}
expected_movie_results_keys = {
"adult",
"backdrop_path",
"genre_ids",
"id",
"original_language",
"original_title",
"overview",
"poster_path",
"popularity",
"release_date",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_find(tmdb_api_key, "imdb_id", GOONIES_IMDB_ID)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert len(result.get("movie_results", {})) > 0
assert expected_movie_results_keys == set(
result.get("movie_results", {})[0].keys()
)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__api_key_fail():
with pytest.raises(MapiProviderException):
tmdb_find(JUNK_TEXT, "imdb_id", GOONIES_IMDB_ID, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__invalid_id_imdb(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_find(tmdb_api_key, "imdb_id", JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_find__not_found(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_find(tmdb_api_key, "imdb_id", JUNK_IMDB_ID)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__success(tmdb_api_key):
expected_top_level_keys = {
"adult",
"backdrop_path",
"belongs_to_collection",
"budget",
"genres",
"homepage",
"id",
"imdb_id",
"original_language",
"original_title",
"overview",
"popularity",
"poster_path",
"production_companies",
"production_countries",
"release_date",
"revenue",
"runtime",
"spoken_languages",
"status",
"tagline",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_movies(tmdb_api_key, GOONIES_TMDB_ID)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert result.get("original_title") == "The Goonies"
def test_tmdb_movies__api_key_fail():
with pytest.raises(MapiProviderException):
tmdb_movies(JUNK_TEXT, "", cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__id_tmdb_fail(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_movies(tmdb_api_key, JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_movies__not_found(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_movies(tmdb_api_key, "1" * 10)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__success(tmdb_api_key):
expected_top_level_keys = {
"page",
"results",
"total_pages",
"total_results",
}
expected_results_keys = {
"adult",
"backdrop_path",
"genre_ids",
"id",
"original_language",
"original_title",
"overview",
"popularity",
"poster_path",
"release_date",
"title",
"video",
"vote_average",
"vote_count",
}
result = tmdb_search_movies(tmdb_api_key, "the goonies", 1985)
assert isinstance(result, dict)
assert set(result.keys()) == expected_top_level_keys
assert isinstance(result["results"], list)
assert expected_results_keys == set(result.get("results", [{}])[0].keys())
assert len(result["results"]) == 1
assert result["results"][0]["original_title"] == "The Goonies"
result = tmdb_search_movies(tmdb_api_key, "the goonies")
assert len(result["results"]) > 1
def test_tmdb_search_movies__bad_api_key():
with pytest.raises(MapiProviderException):
tmdb_search_movies(JUNK_TEXT, "the goonies", cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__bad_title(tmdb_api_key):
with pytest.raises(MapiNotFoundException):
tmdb_search_movies(tmdb_api_key, JUNK_TEXT, cache=False)
@pytest.mark.usefixtures("tmdb_api_key")
def test_tmdb_search_movies__bad_year(tmdb_api_key):
with pytest.raises(MapiProviderException):
tmdb_search_movies(
tmdb_api_key, "the goonies", year=JUNK_TEXT, cache=False
)
|
"""Provide infrastructure to allow exploration of variations within populations.
Uses the gemini framework (https://github.com/arq5x/gemini) to build SQLite
database of variations for query and evaluation.
"""
import collections
import csv
from distutils.version import LooseVersion
import os
import subprocess
import toolz as tz
from bcbio import install, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import multiallelic, vcfutils
def prep_gemini_db(fnames, call_info, samples, extras):
"""Prepare a gemini database from VCF inputs prepared with snpEff.
"""
data = samples[0]
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
name, caller, is_batch = call_info
gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller))
multisample_vcf = get_multisample_vcf(fnames, name, caller, data)
gemini_vcf = multiallelic.to_single(multisample_vcf, data)
use_gemini_quick = (do_db_build(samples) and
any(vcfutils.vcf_has_variants(f) for f in fnames))
if not utils.file_exists(gemini_db) and use_gemini_quick:
use_gemini = do_db_build(samples) and any(vcfutils.vcf_has_variants(f) for f in fnames)
if use_gemini:
ped_file = create_ped_file(samples + extras, gemini_vcf)
gemini_db = create_gemini_db(gemini_vcf, data, gemini_db, ped_file)
return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None,
"vcf": multisample_vcf if is_batch else None}]]
def create_gemini_db(gemini_vcf, data, gemini_db=None, ped_file=None):
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not utils.file_exists(gemini_db):
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
if "program_versions" in data["config"].get("resources", {}):
gemini_ver = programs.get_version("gemini", config=data["config"])
else:
gemini_ver = None
# Recent versions of gemini allow loading only passing variants
load_opts = ""
if not gemini_ver or LooseVersion(gemini_ver) > LooseVersion("0.6.2.1"):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if gemini_ver and LooseVersion(gemini_ver) > LooseVersion("0.6.4"):
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
if gemini_ver and LooseVersion(gemini_ver) >= LooseVersion("0.7.0"):
gemini_dir = install.get_gemini_dir(data)
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
tmpdir = os.path.dirname(tx_gemini_db)
eanns = _get_effects_flag(data)
# Apply custom resource specifications, allowing use of alternative annotation_dir
resources = config_utils.get_resources("gemini", data["config"])
gemini_opts = " ".join([str(x) for x in resources["options"]]) if resources.get("options") else ""
cmd = ("{gemini} {gemini_opts} load {load_opts} -v {gemini_vcf} {eanns} --cores {num_cores} "
"--tempdir {tmpdir} {tx_gemini_db}")
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s" % gemini_vcf, data)
if ped_file:
cmd = [gemini, "amend", "--sample", ped_file, tx_gemini_db]
do.run(cmd, "Add PED file to gemini database", data)
return gemini_db
def _get_effects_flag(data):
effects_config = tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
if effects_config == "snpeff":
return "-t snpEff"
elif effects_config == "vep":
return "-t VEP"
else:
return ""
def get_affected_status(data):
"""Retrieve the affected/unaffected status of sample.
Uses unaffected (1), affected (2), unknown (0) coding from PED files:
http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
"""
affected = set(["tumor", "affected"])
unaffected = set(["normal", "unaffected"])
phenotype = str(tz.get_in(["metadata", "phenotype"], data, "")).lower()
if phenotype in affected:
return 2
elif phenotype in unaffected:
return 1
else:
return 0
def create_ped_file(samples, base_vcf):
"""Create a GEMINI-compatible PED file, including gender, family and phenotype information.
Checks for a specified `ped` file in metadata, and will use sample information from this file
before reconstituting from metadata information.
"""
def _code_gender(data):
g = dd.get_gender(data)
if g and str(g).lower() in ["male", "m"]:
return 1
elif g and str(g).lower() in ["female", "f"]:
return 2
else:
return 0
out_file = "%s.ped" % utils.splitext_plus(base_vcf)[0]
sample_ped_lines = {}
header = ["#Family_ID", "Individual_ID", "Paternal_ID", "Maternal_ID", "Sex", "Phenotype", "Ethnicity"]
for md_ped in list(set([x for x in [tz.get_in(["metadata", "ped"], data)
for data in samples] if x is not None])):
with open(md_ped) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for parts in reader:
if parts[0].startswith("#") and len(parts) > len(header):
header = header + parts[len(header):]
else:
sample_ped_lines[parts[1]] = parts
if not utils.file_exists(out_file):
with file_transaction(samples[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(header)
batch = _find_shared_batch(samples)
for data in samples:
sname = dd.get_sample_name(data)
if sname in sample_ped_lines:
writer.writerow(sample_ped_lines[sname])
else:
writer.writerow([batch, sname, "-9", "-9",
_code_gender(data), get_affected_status(data), "-9"])
return out_file
def _find_shared_batch(samples):
for data in samples:
batch = tz.get_in(["metadata", "batch"], data, dd.get_sample_name(data))
if not isinstance(batch, (list, tuple)):
return batch
def _is_small_vcf(vcf_file):
"""Check for small VCFs which we want to analyze quicker.
"""
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
count += 1
if count > small_thresh:
return False
return True
def get_multisample_vcf(fnames, name, caller, data):
"""Retrieve a multiple sample VCF file in a standard location.
Handles inputs with multiple repeated input files from batches.
"""
unique_fnames = []
for f in fnames:
if f not in unique_fnames:
unique_fnames.append(f)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
if len(unique_fnames) > 1:
gemini_vcf = os.path.join(out_dir, "%s-%s.vcf.gz" % (name, caller))
vrn_file_batch = None
for variant in data["variants"]:
if variant["variantcaller"] == caller and variant.get("vrn_file_batch"):
vrn_file_batch = variant["vrn_file_batch"]
if vrn_file_batch:
utils.symlink_plus(vrn_file_batch, gemini_vcf)
return gemini_vcf
else:
return vcfutils.merge_variant_files(unique_fnames, gemini_vcf, data["sam_ref"],
data["config"])
else:
gemini_vcf = os.path.join(out_dir, "%s-%s%s" % (name, caller, utils.splitext_plus(unique_fnames[0])[1]))
utils.symlink_plus(unique_fnames[0], gemini_vcf)
return gemini_vcf
def _has_gemini(data):
from bcbio import install
gemini_dir = install.get_gemini_dir(data)
return ((os.path.exists(gemini_dir) and len(os.listdir(gemini_dir)) > 0)
and os.path.exists(os.path.join(os.path.dirname(gemini_dir), "gemini-config.yaml")))
def do_db_build(samples, need_bam=True, gresources=None):
"""Confirm we should build a gemini database: need gemini + human samples + not in tool_skip.
"""
genomes = set()
for data in samples:
if not need_bam or data.get("align_bam"):
genomes.add(data["genome_build"])
if "gemini" in utils.get_in(data, ("config", "algorithm", "tools_off"), []):
return False
if len(genomes) == 1:
if not gresources:
gresources = samples[0]["genome_resources"]
return (tz.get_in(["aliases", "human"], gresources, False)
and _has_gemini(samples[0]))
else:
return False
def get_gemini_files(data):
"""Enumerate available gemini data files in a standard installation.
"""
try:
from gemini import annotations, config
except ImportError:
return {}
return {"base": config.read_gemini_config()["annotation_dir"],
"files": annotations.get_anno_files().values()}
def _group_by_batches(samples, check_fn):
"""Group data items into batches, providing details to retrieve results.
"""
batch_groups = collections.defaultdict(list)
singles = []
out_retrieve = []
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch = tz.get_in(["metadata", "batch"], data)
name = str(data["name"][-1])
if batch:
out_retrieve.append((str(batch), data))
else:
out_retrieve.append((name, data))
for vrn in data["variants"]:
if vrn.get("population", True):
if batch:
batch_groups[(str(batch), vrn["variantcaller"])].append((vrn["vrn_file"], data))
else:
singles.append((name, vrn["variantcaller"], data, vrn["vrn_file"]))
else:
extras.append(data)
return batch_groups, singles, out_retrieve, extras
def _has_variant_calls(data):
if data.get("align_bam"):
for vrn in data["variants"]:
if vrn.get("vrn_file") and vcfutils.vcf_has_variants(vrn["vrn_file"]):
return True
return False
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.iteritems():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
if len(samples) > 0 and not do_db_build([x[0] for x in samples]) and not has_batches:
return samples
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out
|
from Operators.Mutation.Mutator import Mutator
from Operators.Mutation.DisplacementMutator import DisplacementMutator
from Operators.Mutation.InversionMutator import InversionMutator
|
class Solution(object):
def dfs(self,rooms):
# get all gate position
queue=[(i,j,0) for i,rows in enumerate(rooms) for j,v in enumerate(rows) if not v]
while queue:
i,j,depth=queue.pop()
# has a min path to gate and update
if depth<rooms[i][j]:
rooms[i][j]=depth
for newi,newj in (i+1,j),(i-1,j),(i,j-1),(i,j+1):
if 0<=newi<len(rooms) and 0<=newj<len(rooms[0]) and depth<rooms[newi][newj]:
queue.append((newi,newj,depth+1))
def bfs(self,rooms):
# get all gate position
queue=[(i,j) for i,rows in enumerate(rooms) for j,v in enumerate(rows) if not v]
while queue:
# pop the fist insert
i,j=queue.pop(0)
for newi,newj in (i+1,j),(i-1,j),(i,j-1),(i,j+1):
if 0<=newi<len(rooms) and 0<=newj<len(rooms[0]) and rooms[newi][newj]==2147483647:
rooms[newi][newj]=rooms[i][j]+1
queue.append((newi,newj))
def wallsAndGates(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: void Do not return anything, modify rooms in-place instead.
"""
self.bfs(rooms)
|
from .base import BASE_DIR, INSTALLED_APPS, MIDDLEWARE_CLASSES, REST_FRAMEWORK
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'holonet',
'USER': 'holonet',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://127.0.0.1:6379/0',
}
}
EMAIL_BACKEND = 'django.utils.mail.backends.console.EmailBackend'
BROKER_URL = 'redis://127.0.0.1'
ELASTICSEARCH = {
'default': {
'hosts': [
'127.0.0.1:9200'
]
}
}
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] += ['rest_framework.renderers.BrowsableAPIRenderer']
INSTALLED_APPS += ('debug_toolbar', )
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', )
INTERNAL_IPS = ('127.0.0.1', )
POSTFIX_TRANSPORT_MAPS_LOCATION = '{0}/../mta/shared/'.format(BASE_DIR)
|
import requests
import copy
def fb_get_group_members(fb_page_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/members?limit=1000&access_token=%s' % (fb_page_id, access_token)
fb_group_members = {'status':'OK', 'data':{'members':[], 'users_count':0}}
while True:
response = requests.get(url)
if response.status_code == 200:
try:
keys = response.json().keys()
if 'data' in keys:
#title_id += [ id['id'] for id in content ]
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for member in content:
member['is_group_mamber'] = 1
fb_group_members['data']['members'].append(member)
if url =='':
break
except (KeyError, TypeError):
fb_group_members['status'] = 'Unknown error'
break
else:
fb_group_members['status'] = str(response.status_code)
break
fb_group_members['data']['users_count'] = len(fb_group_members['data']['members'])
return fb_group_members
def fb_get_group_data(fb_page_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/?fields=id,name&access_token=%s' % (fb_page_id, access_token)
fb_group = {'status':'OK', 'data':{'id':'','name':'', 'updated_time':'','members':[], 'users_count':0, 'all_users':[] }}
response =requests.get(url)
if response.status_code == 200:
fb_data = fb_group['data']
data = response.json()
keys = data.keys()
if 'id' in keys:
fb_data['id'] = data['id']
else:
fb_group['status'] = 'Missing group id'
if 'name' in keys:
fb_data['name'] = data['name']
else:
fb_group['status'] = 'Missing group name'
'''
if 'updated_time' in keys:
fb_data['updated_time'] = data['updated_time']
'''
members = fb_get_group_members(fb_page_id, access_token)
if members['status']== 'OK':
fb_group['data']['members'] = copy.deepcopy(members['data']['members'])
fb_group['data']['users_count'] = members['data']['users_count']
fb_group['data']['all_users'] = copy.deepcopy(members['data']['members'])
else:
fb_group['status'] = str(response.status_code)
return fb_group
def fb_get_all_posts(fb_page_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/feed?fields=id,name,link,message,from,updated_time,created_time&access_token=%s' % (fb_page_id, access_token)
fb_posts = {'status':'OK', 'data':{'posts':[],'posts_count':0}}
#fb_posts = {'status':'OK', 'data':{'id':'','name':'', 'updated_time':'','link':'', 'message':''}}
while True:
response = requests.get(url)
# print(response.status_code)
if response.status_code == 200:
try:
keys = response.json().keys()
#найти is jason
if 'data' in keys:
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for post in content:
fb_posts['data']['posts'].append(post)
if url =='':
break
except (KeyError, TypeError):
fb_posts['status'] = 'Unknown error'
break
else:
fb_posts['status'] = str(response.status_code)
break
fb_posts['data']['posts_count'] = len(fb_posts['data']['posts'])
return fb_posts
def fb_get_post_likes(post_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/reactions/?access_token=%s' % (post_id, access_token)
fb_likes = {'status':'OK', 'data':{'likes':[],'likes_count':0}}
while True:
response = requests.get(url)
if response.status_code == 200:
try:
keys = response.json().keys()
#найти is jason
if 'data' in keys:
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for fb_like in content:
fb_likes['data']['likes'].append(fb_like)
if url =='':
break
except (KeyError, TypeError):
fb_likes['status'] = 'Unknown error'
break
else:
fb_likes['status'] = str(response.status_code)
break
fb_likes['data']['likes_count'] = len(fb_likes['data']['likes'])
return fb_likes
def fb_get_post_comments(post_id, access_token):
url = 'https://graph.facebook.com/v2.8/%s/comments/?fields=id,message,from,updated_time,created_time&access_token=%s' % (post_id, access_token)
fb_comments = {'status':'OK', 'data':{'comments':[],'comments_count':0}}
while True:
response = requests.get(url)
if response.status_code == 200:
try:
keys = response.json().keys()
#найти is jason
if 'data' in keys:
content = response.json()['data']
keys = response.json().keys()
url = ''
if 'paging' in keys:
keys = response.json()['paging'].keys()
if 'next' in keys:
url = response.json()['paging']['next']
for fb_comment in content:
fb_comments['data']['comments'].append(fb_comment)
if url =='':
break
except (KeyError, TypeError):
fb_comments['status'] = 'Unknown error'
break
else:
fb_comments['status'] = str(response.status_code)
break
fb_comments['data']['comments_count'] = len(fb_comments['data']['comments'])
return fb_comments
def fb_get_all_data(fb_page_id, access_token):
#считываем все данные группы
fb_group = fb_get_group_data(fb_page_id, access_token)
if fb_group['status'] == 'OK':
print('Group id: %s name: %s' % (fb_group['data']['id'], fb_group['data']['name']))
print('User in group: %s' % fb_group['data']['users_count'])
#пишем в бд в БД
else:
print(fb_group['status'])
exit()
#считываем все посты
print('*************считываем все посты************')
data = fb_get_all_posts(fb_page_id, access_token)
if data['status'] == 'OK':
fb_posts = copy.deepcopy(data['data']['posts'])
posts_count = data['data']['posts_count']
# print(fb_posts[0])
print('Posts in group: %s' % posts_count)
for fb_post in fb_posts:
print('*************Пост %s от %s ************' % (fb_post['id'], fb_post['created_time']))
# тестиирум лайки к посту
data = fb_get_post_likes(fb_post['id'], access_token)
if data['status'] == 'OK':
fb_post['likes'] = copy.deepcopy(data['data']['likes'])
fb_post['likes_count'] = data['data']['likes_count']
#print('post likes to users')
for user_like in fb_post['likes']:
#print(user_like)
if not user_like['id'] in fb_group['data']['members']:
#print('new user %s'% user_like)
fb_group['data']['all_users'].append({'id':user_like['id'],'name':user_like['name'], 'is_group_mamber':0})
print('Likes count: %s' % fb_post['likes_count'])
else:
print(data['status'])
#тестируем комментарии к посту
data = fb_get_post_comments(fb_post['id'], access_token)
if data['status'] == 'OK':
fb_post['comments'] = copy.deepcopy(data['data']['comments'])
fb_post['comments_count'] = data['data']['comments_count']
print('Comments count: %s' % fb_post['comments_count'] )
#print('post likes to users')
for user_comments in fb_post['comments']:
#print(user_like)
keys = user_comments.keys()
comment_user_id = user_comments['from']['id'] if 'from' in keys else ''
comment_user_name = user_comments['from']['name'] if 'from' in keys else ''
if (not comment_user_id == '') and (not user_comments in fb_group['data']['members']):
#print('new user %s'% user_like)
fb_group['data']['all_users'].append({'id':comment_user_id,'name':comment_user_name, 'is_group_mamber':0})
else:
print(data['status'])
fb_group['data']['posts'] = copy.deepcopy(fb_posts)
fb_group['data']['posts_count'] = posts_count
else:
print(data['status'])
#print('до %s' % fb_group)
return fb_group
if __name__ == '__main__':
print ('Not runns in console')
|
"""
frest - flask restful api frame
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This project is the frame of the restful api server created with flask.
:copyright: (C) 2017 h4wldev@gmail.com
:license: MIT, see LICENSE for more details.
"""
import os
from flask_script import Server, Manager
from flask_migrate import Migrate, MigrateCommand
from app import app, db, routes, handler
from app.config import APP_DEFAULT_PORT, APP_SECRET_KEY, ENVIRONMENT
from app.modules.auth import login, token
if __name__ == '__main__':
port = int(os.environ.get('PORT', APP_DEFAULT_PORT))
app.secret_key = APP_SECRET_KEY
db.create_all()
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if ENVIRONMENT == 'production' or ENVIRONMENT == 'testing':
manager.add_command('runserver', Server(host='0.0.0.0', port=port, use_debugger=False))
else:
manager.add_command('runserver', Server(host='0.0.0.0', port=port, use_debugger=True))
manager.run()
|
import sys
class abstract:
params = {}
windowId = None
terminated = False
def initParams(self):
return self
def __init__(self):
self.initParams().init()
return
def init(self):
return
def mouse(self, button, state, x, y):
return
def mouseMotion(self, x, y):
return
def keyboard(self, asciiCode, x, y):
return
def keyboardSpecial(self, key, x, y):
return
def idle(self):
return
def timer(self, value):
return
def render(self):
return
def reshape(self, width, height):
return
def run(self):
return self
def destroy(self):
del self
return
def select(self):
return self.activate()
def activate(self):
return self
def redisplay(self):
return self
def hide(self):
return self
def show(self):
return self
def title(self, title):
return self
def setPosition(self, x, y):
return self
def setResolution(self, width, height):
return self
|
import event
import nxt.locator
import nxt.motor as motor
brick = nxt.locator.find_one_brick()
height_motor = motor.Motor(brick, motor.PORT_A)
height_motor.turn(127, 5000, brake=False)
|
from GestureAgentsTUIO.Tuio import TuioAgentGenerator
import GestureAgentsPygame.Screen as Screen
from pygame.locals import *
class MouseAsTuioAgentGenerator(object):
def __init__(self):
self.pressed = False
self.myagent = None
self.sid = -1
self.screensize = Screen.size
def event(self, e):
if e.type == MOUSEBUTTONDOWN:
self.pressed = True
self.myagent = TuioAgentGenerator.makeCursorAgent()
self._updateAgent(self.myagent, e)
self.myagent.newAgent(self.myagent)
self.myagent.newCursor(self.myagent)
elif e.type == MOUSEBUTTONUP:
self.pressed = False
self._updateAgent(self.myagent, e)
self.myagent.removeCursor(self.myagent)
self.myagent.finish()
self.myagent = None
elif e.type == MOUSEMOTION:
if self.pressed:
self._updateAgent(self.myagent, e)
self.myagent.updateCursor(self.myagent)
def _updateAgent(self, a, e):
a.pos = e.pos
a.posx = e.pos[0]
a.posy = e.pos[1]
a.sessionid = self.sid
a.xmot = 0
a.ymot = 0
a.mot_accel = 0
|
"""
A Machine learning algorithm for K mean clustering.
Date: 24th March, 2015 pm
"""
__author__ = "Anthony Wanjohi"
__version__ = "1.0.0"
import random, fractions
def euclidean_distance(point, centroid):
'''Returns the euclidean distance between two points'''
assert type(point) is tuple
assert type(centroid) is tuple
#x and y values for the point and the centroid
point_x, point_y = point
centroid_x, centroid_y = centroid
#get euclidean distance
distance = ( (point_x - centroid_x) ** 2 ) + ( (point_y - centroid_y) ** 2 )
distance = distance ** (0.5)
return round(distance, 4)
def get_coordinates(points):
#get coordinates for the points given in tuple form
print("Please provide coordinates for the {} points. (x, y)".format(points))
coordinates = []
for coords in range(points):
#read as a tuple i.e (x, y)
user_coords = input()
user_coords = user_coords.split(',')
x, y = int(user_coords[0]), int(user_coords[1])
coordinates.append((x, y))
return coordinates
def get_coords(file_name):
'''Get coordinates from a file.'''
file_handle = open(file_name, "r")
file_coords = []
for content in file_handle:
content = content.replace(' ', "").replace("\n", "").replace('"', "").split(',')
coord = int(content[0]), int(content[1])
file_coords.append(coord)
return file_coords
def get_group_matrix(coords, centroid_one, centroid_two):
'''Returns a group matrix'''
euclid_distance = []
grp_matrix = []
for y in coords:
#get distance for each point in regard to centroid one
distance_one = euclidean_distance(y, centroid_one)
#get distance for each point in regard to centroid two
distance_two = euclidean_distance(y, centroid_two)
euclid_distance.append((distance_one, distance_two))
#group matrix condtions
if distance_one > distance_two:
grp_matrix.append((0, 1))
elif distance_one < distance_two:
grp_matrix.append((1, 0))
return grp_matrix
def get_avg_centroid(x_y_index, coords):
'''Returns new centroid coordinates if more than 1 point eppears in any cluster'''
x_coords, y_coords = [], []
for index in x_y_index:
#new_points.append(coords[index])
x, y = coords[index]
x_coords.append(x)
y_coords.append(y)
#get average of both x and y coords
x_coords = round(sum(x_coords) / (len(x_coords) * 1.0), 4)
y_coords = round(sum(y_coords) / (len(y_coords) * 1.0), 4)
centroid = (x_coords, y_coords)
return centroid
def k_means_clustering(points):
'''Return the group matrix given coordinates'''
coords = get_coordinates(points)
centroids = []
euclid_distance = []
group_distance = []
grp_matrix = []
#create an alphabet number mapping
alphabets = dict(A = 1,B = 2,C = 3,D = 4,E = 5,F = 6,G = 7,H = 8,I = 9,J = 10,K = 11,L = 12,M = 13,
N = 14,O = 15,P = 16,Q = 17,R = 18,S = 19,T = 20,U = 21,V = 22,W = 23,X = 24,Y = 25,Z = 26)
#get two random centroids
i = 0
limit = 2
#ensure that the points are not similar
while i <= limit:
k = random.randint(0, (points-1))
if k not in centroids:
centroids.append(k)
if len(centroids) is not 2:
limit *= 2
else:
break
#get the centroids as per the above rand positions
centroids = tuple(centroids)
i, j = centroids
centroid_one = coords[i]
centroid_two = coords[j]
print("\nRandom Centroids->",centroid_one, centroid_two)
#get the group matrix
grp_matrix = get_group_matrix(coords, centroid_one, centroid_two)
while True:
#iterate till group matrix is stable
#get the number of points in each cluster
a, b, m_index_values, n_index_values = [], [], [], []
for index, x_y_values in enumerate(grp_matrix):
m, n = x_y_values
a.append(m)
b.append(n)
if m == 1:
m_index_values.append(index)
elif n == 1:
n_index_values.append(index)
cluster_one_elems = sum(a)
cluster_two_elems = sum(b)
if cluster_one_elems == 1:
#use the same centroid from the previous one
centroid_one = centroid_one
elif cluster_one_elems > 1:
#new centroid is the average of the elements
centroid_one = get_avg_centroid(m_index_values, coords)
if cluster_two_elems == 1:
#use the same centroid used in the last iteration
centroid_two = centroid_two
elif cluster_two_elems > 1:
#new centroid is the average of the elements
centroid_two= get_avg_centroid(n_index_values, coords)
print("New Centroids->",centroid_one, centroid_two)
#get new group matrix
new_grp_matrix = get_group_matrix(coords, centroid_one, centroid_two)
#when no more change happens, stop iteration
if new_grp_matrix == grp_matrix:
return grp_matrix
grp_matrix = new_grp_matrix
if __name__ == "__main__":
guess = int(input('Enter the number of coordinates to input : '))
print(k_means_clustering(guess))
|
import subprocess
import os
BASE_DIR = '/usr/bin/'
DEFAULT = BASE_DIR + 'openscad'
def build_with_openscad(state):
args = build_args_from_state(state)
out_call = ''
for arg in args:
out_call += ' ' + arg
print 'args:', out_call
try:
subprocess.check_call(args)
return True
except subprocess.CalledProcessError as (e):
print str(e)
return False
def build_args_from_state(state):
#executable = ScadConfig.open_scad if ScadConfig.open_scad else DEFAULT
executable = 'openscad'
replace = []
if state.params:
print 'state params:', state.params
# TODO: Handle string exceptions
replace = ['-D ' + '='.join((key, str(value)))
for key, value in state.params.iteritems()]
print 'state replace:', replace
# TODO: Handle different output types
output = os.path.join(state.output_directory, state.name + ".stl")
args = [executable, '-o', output]
if len(replace) >= 1:
args.extend(replace)
args.append(state.main_path)
return args
|
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.contributor_email_v30_rc1 import ContributorEmailV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.contributor_orcid_v30_rc1 import ContributorOrcidV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.credit_name_v30_rc1 import CreditNameV30Rc1 # noqa: F401,E501
from orcid_api_v3.models.funding_contributor_attributes_v30_rc1 import FundingContributorAttributesV30Rc1 # noqa: F401,E501
class FundingContributorV30Rc1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'contributor_orcid': 'ContributorOrcidV30Rc1',
'credit_name': 'CreditNameV30Rc1',
'contributor_email': 'ContributorEmailV30Rc1',
'contributor_attributes': 'FundingContributorAttributesV30Rc1'
}
attribute_map = {
'contributor_orcid': 'contributor-orcid',
'credit_name': 'credit-name',
'contributor_email': 'contributor-email',
'contributor_attributes': 'contributor-attributes'
}
def __init__(self, contributor_orcid=None, credit_name=None, contributor_email=None, contributor_attributes=None): # noqa: E501
"""FundingContributorV30Rc1 - a model defined in Swagger""" # noqa: E501
self._contributor_orcid = None
self._credit_name = None
self._contributor_email = None
self._contributor_attributes = None
self.discriminator = None
if contributor_orcid is not None:
self.contributor_orcid = contributor_orcid
if credit_name is not None:
self.credit_name = credit_name
if contributor_email is not None:
self.contributor_email = contributor_email
if contributor_attributes is not None:
self.contributor_attributes = contributor_attributes
@property
def contributor_orcid(self):
"""Gets the contributor_orcid of this FundingContributorV30Rc1. # noqa: E501
:return: The contributor_orcid of this FundingContributorV30Rc1. # noqa: E501
:rtype: ContributorOrcidV30Rc1
"""
return self._contributor_orcid
@contributor_orcid.setter
def contributor_orcid(self, contributor_orcid):
"""Sets the contributor_orcid of this FundingContributorV30Rc1.
:param contributor_orcid: The contributor_orcid of this FundingContributorV30Rc1. # noqa: E501
:type: ContributorOrcidV30Rc1
"""
self._contributor_orcid = contributor_orcid
@property
def credit_name(self):
"""Gets the credit_name of this FundingContributorV30Rc1. # noqa: E501
:return: The credit_name of this FundingContributorV30Rc1. # noqa: E501
:rtype: CreditNameV30Rc1
"""
return self._credit_name
@credit_name.setter
def credit_name(self, credit_name):
"""Sets the credit_name of this FundingContributorV30Rc1.
:param credit_name: The credit_name of this FundingContributorV30Rc1. # noqa: E501
:type: CreditNameV30Rc1
"""
self._credit_name = credit_name
@property
def contributor_email(self):
"""Gets the contributor_email of this FundingContributorV30Rc1. # noqa: E501
:return: The contributor_email of this FundingContributorV30Rc1. # noqa: E501
:rtype: ContributorEmailV30Rc1
"""
return self._contributor_email
@contributor_email.setter
def contributor_email(self, contributor_email):
"""Sets the contributor_email of this FundingContributorV30Rc1.
:param contributor_email: The contributor_email of this FundingContributorV30Rc1. # noqa: E501
:type: ContributorEmailV30Rc1
"""
self._contributor_email = contributor_email
@property
def contributor_attributes(self):
"""Gets the contributor_attributes of this FundingContributorV30Rc1. # noqa: E501
:return: The contributor_attributes of this FundingContributorV30Rc1. # noqa: E501
:rtype: FundingContributorAttributesV30Rc1
"""
return self._contributor_attributes
@contributor_attributes.setter
def contributor_attributes(self, contributor_attributes):
"""Sets the contributor_attributes of this FundingContributorV30Rc1.
:param contributor_attributes: The contributor_attributes of this FundingContributorV30Rc1. # noqa: E501
:type: FundingContributorAttributesV30Rc1
"""
self._contributor_attributes = contributor_attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FundingContributorV30Rc1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FundingContributorV30Rc1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from django.contrib import admin
from Players.models import Player
@admin.register(Player)
class PlayerAdmin(admin.ModelAdmin):
view_on_site = True
list_display = ('pk', 'first_name', 'last_name', 'number', 'team', 'position', 'age', 'height', 'weight')
list_filter = ['team', 'position']
search_fields = ['first_name', 'last_name']
# Disable delete when team has 5 players
def has_delete_permission(self, request, obj=None):
try:
return False if Player.objects.filter(team=obj.team).count() == 5 else True
except AttributeError:
pass
# Disable delete action form the list; not ideal, disables delete for all players
def get_actions(self, request):
actions = super(PlayerAdmin, self).get_actions(request)
del actions['delete_selected']
return actions
|
"""Th tests for the Rfxtrx component."""
import unittest
import time
from homeassistant.bootstrap import _setup_component
from homeassistant.components import rfxtrx as rfxtrx
from tests.common import get_test_home_assistant
class TestRFXTRX(unittest.TestCase):
"""Test the Rfxtrx component."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
def tearDown(self):
"""Stop everything that was started."""
rfxtrx.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx.RFX_DEVICES = {}
if rfxtrx.RFXOBJECT:
rfxtrx.RFXOBJECT.close_connection()
self.hass.stop()
def test_default_config(self):
"""Test configuration."""
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
while len(rfxtrx.RFX_DEVICES) < 1:
time.sleep(0.1)
self.assertEqual(len(rfxtrx.RFXOBJECT.sensors()), 1)
def test_valid_config(self):
"""Test configuration."""
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}}))
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True,
'debug': True}}))
def test_invalid_config(self):
"""Test configuration."""
self.assertFalse(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {}
}))
self.assertFalse(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'invalid_key': True}}))
def test_fire_event(self):
"""Test fire event."""
self.assertTrue(_setup_component(self.hass, 'rfxtrx', {
'rfxtrx': {
'device': '/dev/serial/by-id/usb' +
'-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0',
'dummy': True}
}))
self.assertTrue(_setup_component(self.hass, 'switch', {
'switch': {'platform': 'rfxtrx',
'automatic_add': True,
'devices':
{'0b1100cd0213c7f210010f51': {
'name': 'Test',
rfxtrx.ATTR_FIREEVENT: True}
}}}))
calls = []
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(rfxtrx.EVENT_BUTTON_PRESSED, record_event)
entity = rfxtrx.RFX_DEVICES['213c7f216']
self.assertEqual('Test', entity.name)
self.assertEqual('off', entity.state)
self.assertTrue(entity.should_fire_event)
event = rfxtrx.get_rfx_object('0b1100cd0213c7f210010f51')
event.data = bytearray([0x0b, 0x11, 0x00, 0x10, 0x01, 0x18,
0xcd, 0xea, 0x01, 0x01, 0x0f, 0x70])
rfxtrx.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.hass.pool.block_till_done()
self.assertEqual(event.values['Command'], "On")
self.assertEqual('on', entity.state)
self.assertEqual(1, len(rfxtrx.RFX_DEVICES))
self.assertEqual(1, len(calls))
self.assertEqual(calls[0].data,
{'entity_id': 'switch.test', 'state': 'on'})
|
import sys
import os.path
import subprocess
PY3 = sys.version >= '3'
from setuptools import setup, find_packages
version_py = os.path.join(os.path.dirname(__file__), 'dame', 'version.py')
try:
version_git = subprocess.check_output(
["git", "describe", "--always"]).rstrip()
# Convert bytes to str for Python3
if PY3:
version_git = version_git.decode()
except:
with open(version_py, 'r') as fh:
version_git = fh.read().strip().split('=')[-1].replace('"', '')
version_msg = ("# Do not edit this file, "
"pipeline versioning is governed by git tags")
with open(version_py, 'w') as fh:
fh.write(version_msg + os.linesep +
"__version__='{}'\n".format(version_git))
setup(
name="dame",
author="Richard Lindsley",
version=version_git,
packages=find_packages(),
license="MIT",
entry_points={
'gui_scripts': [
'dame = dame.dame:main'
]
},
)
|
import socket, traceback
host=''
port=8080
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print "Waiting for connections..."
s.listen(1)
while True:
try:
clientsock, clientaddr=s.accept()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
continue
try:
print "Got connection from", clientsock.getpeername()
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
try:
clientsock.close()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
|
'''
Auxiliary code providing vector-valued scoring functions
for convenient use in the parameter-free Ladder Mechanism.
Original algorithm by Avrim Blum and Moritz Hardt
Python implementation by Jamie Hall and Moritz Hardt
MIT License
'''
from sklearn.utils import check_consistent_length
from functools import wraps
import numpy as np
def validate_input(metric):
@wraps(metric)
def wrapped(y, y_pred):
_check_y_shapes(y, y_pred)
return metric(y, y_pred)
return wrapped
@validate_input
def squared_error(y, y_pred):
return ((y - y_pred)**2)
@validate_input
def absolute_error(y, y_pred):
return np.abs(y-y_pred)
@validate_input
def zero_one_loss(y, y_pred):
return np.sum(y != y_pred)
def _check_y_shapes(y_true, y_pred):
''' Check that two target vectors are compatible.
This is based on sklearn's validation in sklearn.metrics.regression._check_reg_targets
and sklearn.metrics.classification._check_targets.
'''
check_consistent_length(y_true, y_pred)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
|
import sys
import os
import glob
import inspect
import pylab as pl
from numpy import *
from scipy import optimize
import pickle
import time
import copy
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]) + "/templates")
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from templutils import *
import pylabsetup
pl.ion()
def minfunc(p, y, x, e, secondg, plot=False):
'''
p is the parameter list
if secondg=1: secondgaussian added
if secondg=0: secondgaussian not
parameters are:
p[0]=first gaussian normalization (negative if fitting mag)
p[1]=first gaussian mean
p[2]=first gaussian sigma
p[3]=linear decay offset
p[4]=linear decay slope
p[5]=exponxential rise slope
p[6]=exponential zero point
p[7]=second gaussian normalization (negative if fitting mag)
p[8]=second gaussian mean
p[9]=second gaussian sigma
'''
if plot:
pl.figure(3)
pl.errorbar(x, y, yerr=e, color='k')
import time
# time.sleep(1)
# print sum(((y-mycavvaccaleib(x,p,secondg=True))**2))
if secondg > 0:
return sum(((y - mycavvaccaleib(x, p, secondg=True)) ** 2) / e ** 2)
else:
return sum(((y - mycavvaccaleib(x, p, secondg=False)) ** 2) / e ** 2)
import scipy.optimize
if __name__ == '__main__':
lcv = np.loadtxt(sys.argv[1], unpack=True)
secondg = False
try:
if int(sys.argv[2]) > 0:
secondg = True
except:
pass
x = lcv[1]
y = lcv[2]
e = lcv[3]
mjd = lcv[0]
ax = pl.figure(0, figsize=(10,5)).add_subplot(111)
#pl.errorbar(x, y, yerr=e, color="#47b56c", label="data")
p0 = [0] * 10
p0[0] = -4
peakdate = x[np.where(y == min(y))[0]]
if len(peakdate) > 1:
peakdate = peakdate[0]
p0[1] = peakdate + 5
p0[2] = 10 # sigma
#pl.draw()
lintail = np.where(x > peakdate + 50)[0]
if len(lintail) < 1:
print "no tail data"
linfit = np.polyfit(x[-2:], y[-2:], 1)
p0[3] = linfit[1]
p0[4] = linfit[0]
else:
linfit = np.polyfit(x[lintail], y[lintail], 1)
p0[3] = linfit[1]
p0[4] = linfit[0]
p0[5] = 0.1
p0[6] = peakdate - 20
p0[7] = -1
p0[8] = peakdate + 25
p0[9] = 10
pl.figure(3)
pl.clf()
# pf= scipy.optimize.minimize(minfunc,p0,args=(y,x,1), method='Powell')#,options={'maxiter':5})
if secondg:
p0[0] += 1.5
p0[1] *= 2
pl.plot(x[10:], mycavvaccaleib(x[10:], p0, secondg=True), 'm')
pf = scipy.optimize.minimize(minfunc, p0, args=(y[10:], x[10:], e[10:], 1), method='Powell') # ,options={'maxiter':5})
else:
pl.plot(x[10:], mycavvaccaleib(x[10:], p0, secondg=False), 'k')
pf = scipy.optimize.minimize(minfunc, p0, args=(y[10:], x[10:], e[10:], 0), method='Powell') # ,options={'maxiter':5})
#pl.figure(4)
pl.figure(0)
ax.errorbar(mjd+0.5-53000, y, yerr=e, fmt=None, ms=7,
alpha = 0.5, color='k', markersize=10,)
ax.plot(mjd+0.5-53000, y, '.', ms=7,
alpha = 0.5, color='#47b56c', markersize=10,
label = "SN 19"+sys.argv[1].split('/')[-1].\
replace('.dat', '').replace('.', ' '))
# mycavvaccaleib(x,pf.x, secondg=True)
mycavvaccaleib(x, pf.x, secondg=secondg)
ax.plot(mjd[10:]+0.5-53000, mycavvaccaleib(x[10:], pf.x, secondg=secondg), 'k',
linewidth=2, label="vacca leibundgut fit") # , alpha=0.5)
# pl.plot(x,mycavvaccaleib(x,pf.x, secondg=True), 'k',linewidth=2, label="fit")
xlen = mjd.max() - mjd.min()
ax.set_xlim(mjd.min()-xlen*0.02+0.5-53000, mjd.max()+xlen*0.02+0.5-53000)
ax.set_ylim(max(y + 0.1), min(y - 0.1))
ax2 = ax.twiny()
Vmax = 2449095.23-2453000
ax2.tick_params('both', length=10, width=1, which='major')
ax2.tick_params('both', length=5, width=1, which='minor')
ax2.set_xlabel("phase (days)")
ax2.set_xlim((ax.get_xlim()[0] - Vmax, ax.get_xlim()[1] - Vmax))
# pl.ylim(10,21)
pl.draw()
pl.legend()
ax.set_xlabel("JD - 24530000")
ax.set_ylabel("magnitude")
#pl.title(sys.argv[1].split('/')[-1].replace('.dat', '').replace('.', ' '))
#pl.show()
pl.tight_layout()
pl.savefig("../fits/" + sys.argv[1].split('/')[-1].replace('.dat', '.vdfit.pdf'))
cmd = "pdfcrop " + "../fits/" + sys.argv[1].split('/')[-1].replace('.dat', '.vdfit.pdf')
print cmd
os.system(cmd)
|
from tailorscad.builder.openscad import build_with_openscad
from tailorscad.builder.coffeescad import build_with_coffeescad
from tailorscad.constants import OPENSCAD
from tailorscad.constants import COFFEESCAD
def build_from_state(state):
if state.scad_type is OPENSCAD:
build_with_openscad(state)
if state.scad_type is COFFEESCAD:
build_with_coffeescad(state)
|
import yaml
from os import makedirs
from os.path import join,dirname,realpath,isdir
script_dir = dirname(realpath(__file__))
default_yml_filepath = join(script_dir,'defaults.yml')
defaults = {
"output_dir": 'output',
"header_img_dir": 'imgs/headers/',
"scaled_img_dir": 'imgs/scaled/',
"original_img_dir": 'imgs/original/',
"header_img_url": 'imgs/headers/',
"scaled_img_url": 'imgs/scaled/',
"original_img_url": 'imgs/original/',
"template_dir": join(script_dir,'templates'),
"max_article_img_width": 710,
"max_avatar_width": 710,
"database_file": "database.yml",
"static_dir": join(script_dir,'static'),
"copyright_msg": None,
"extra_links": [],
"import_to_discourse": False,
"strapline": None,
}
config = dict()
def getConfig():
if not config:
raise RuntimeError('config not loaded yet')
return config
def loadConfig(yml_filepath):
config.update(defaults)
with open(yml_filepath) as f:
patch = yaml.load(f.read())
config.update(patch)
# make paths absolute
config['header_img_dir'] = join(config['output_dir'],config['header_img_dir'])
config['scaled_img_dir'] = join(config['output_dir'],config['scaled_img_dir'])
config['original_img_dir'] = join(config['output_dir'],config['original_img_dir'])
config['database_file'] = join(config['output_dir'],config['database_file'])
def makeDirs():
if not config:
raise RuntimeError('config not loaded yet')
for key in ['header_img_dir','scaled_img_dir','original_img_dir']:
path = config[key]
if not isdir(path):
makedirs(path)
|
import sys
text = sys.stdin.read()
print 'Text:',text
words = text.split()
print 'Words:',words
wordcount = len(words)
print 'Wordcount:',wordcount
|
import socket
import sys
HOST, PORT = "24.21.106.140", 8080
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
print "Connected to ", HOST, ":", PORT, "\n","Awaiting input\n"
data = sys.stdin.readline()
sock.connect((HOST, PORT))
print "Connected to ", HOST, ":", PORT, "\n","Awaiting input\n"
exit = False
while exit != True:
sock.sendall(data + "\n")
if data.strip() == 'bye':
exit = True
received = sock.recv(1024)
print "Sent: " , data
print "Received: " , received
data = sys.stdin.readline()
# Receive data from the server and shut down
finally:
sock.close()
|
DATASET_DIR = '/tmp'
BRAIN_DIR = '/tmp'
GENRES = [
'blues', 'classical', 'country', 'disco', 'hiphop',
'jazz', 'metal', 'pop', 'reggae', 'rock'
]
NUM_BEATS = 10
KEEP_FRAMES = 0
TRAIN_TEST_RATIO = [7, 3]
MODE = 'nn'
PCA = False
FEATURES = ['mfcc', 'dwt', 'beat']
MFCC_EXTRA = ['delta', 'ddelta', 'energy']
DWT = ['mean', 'std', 'max', 'min']
FEATURES_LENGTH = {
'mfcc' : 160,
'dwt' : 112,
'beat' : 11
}
FRAME_LENGTH = 0.025
HOP_LENGTH = 0.005
N_MFCC = 13
W_FRAME_SCALE = 10
NN = {
'NUM_HIDDEN_LAYERS' : 2,
'HIDDEN_INPUTS' : [1024, 1024],
'RANDOM' : True,
'BATCH_SIZE' : 100,
'TRAINING_CYCLES' : 1000,
'LEARNING_RATE' : 0.01,
'DROPOUT_PROB' : 0.6
}
CNN = {
'NUM_HIDDEN_LAYERS' : 2,
'NUM_DENSE_LAYERS' : 1,
'HIDDEN_FEATURES' : [32, 64],
'DENSE_INPUTS' : [128],
'INPUT_SHAPE' : [16, 17],
'PATCH_SIZE' : [5, 5],
'RANDOM' : False,
'STRIDES' : [1, 1, 1, 1],
'BATCH_SIZE' : 100,
'TRAINING_CYCLES' : 1000,
'LEARNING_RATE' : 0.01,
'DROPOUT_PROB' : 0.6
}
|
'''
@author: KyleLevien
'''
from ..defaultpackage.package import Package
class _Ghostview(Package):
def __init__(self):
Package.__init__(self)
|
import os, sys
module = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, module)
from netkiller.docker import *
extra_hosts = [
'mongo.netkiller.cn:172.17.195.17', 'eos.netkiller.cn:172.17.15.17',
'cfca.netkiller.cn:172.17.15.17'
]
nginx = Services('nginx')
nginx.image('nginx:latest')
nginx.container_name('nginx')
nginx.extra_hosts(extra_hosts)
nginx.depends_on('test')
sms = Services('sms')
sms.image('sms:latest')
sms.container_name('nginx')
sms.hostname("7899")
sms.depends_on(['aaa', 'bbb', 'ccc'])
test = Services('test')
test.image('test:latest')
test.depends_on(nginx)
test.logging('fluentd', {
'fluentd-address': 'localhost:24224',
'tag': 'dev.redis.sfzito.com'
})
development = Composes('development')
development.version('3.9')
development.services(nginx)
development.services(sms)
development.services(test)
development.workdir('/tmp/compose')
testing = Composes('testing')
testing.version('3.9')
testing.services(nginx)
testing.services(sms)
testing.services(test)
testing.workdir('/tmp/compose')
staging = Composes('staging')
staging.version('3.9')
staging.services(sms)
if __name__ == '__main__':
try:
docker = Docker({
'DOCKER_HOST': 'ssh://root@192.168.30.11',
'SSS': 'sdfff'
})
docker.sysctl({'neo': '1'})
docker.environment(development)
docker.environment(testing)
docker.main()
except KeyboardInterrupt:
print("Crtl+C Pressed. Shutting down.")
|
"""
Module for running keyword-driven tests
"""
from __future__ import with_statement
import time
import datetime
import re
from adapterlib.ToolProtocol import *
from adapterlib.ToolProtocolHTTP import *
import adapterlib.keyword as keyword
import adapterlib.keywordproxy as keywordproxy
from adapterlib.logger import KeywordLogger
class AdapterCompleter(object):
""" Simple class for doing tab-completion in interactive mode"""
def __init__(self, keywords ):
self.keywords = sorted(keywords)
def complete(self, text, state ):
response = None
if state == 0:
if text:
self.matches = [s for s in self.keywords if s and s.startswith(text)]
else:
self.matches = self.keywords[:]
try:
response = self.matches[state]
except IndexError:
response = None
return response
class Target(object):
def __init__(self,name,):
self.__name = name
def setup(self):
raise NotImplementedError()
def cleanup(self):
raise NotImplementedError()
@property
def name(self):
return self.__name
def takeScreenShot(self, path):
return False
class TestRunner(object):
"""
TestRunner class is used to run Keyword-driven tests.
The class allows test to be run interactively (given through stdin), from
file or from server.
To run tests from a server, TestRunner uses classes ToolProtocol and
ToolProtocolHTTP.
"""
def __init__(self, targets, delay, record = False ):
"""
Initializer.
@type targets: list
@param targets: list of System under test (SUT) identifiers.
@type delay: float
@param delay: Wait-time between consequtive keywords (in seconds)
@type record: boolean
@param record: Is the test recorded to html-file
"""
self._targetNames = targets
self._targets = []
self.delay = delay
self._rec_process = None
self._kwCount = 1
self._logger = None
self._separator = " "
if record:
self._logger = KeywordLogger()
self._kw_cache = {}
# Special commands listed here for interactive mode completer
self._commands = {}
self._commands["exit"] = ["quit","q","exit"]
self._commands["kws"] = ["list","kws","list full","kws full"]
self._commands["info"] = ["info"]
self._commands["special"] = []
def _setupTestAutomation(self):
"""Sets up test automation environment
@rtype: boolean
@returns: True if success, False otherwise
"""
raise NotImplementedError()
def _cleanupTestAutomation(self):
"""Cleans up test automation environment"""
raise NotImplementedError()
def __setTarget(self,targetName):
if re.match("['\"].*['\"]",targetName):
targetName = targetName[1:-1]
if targetName == "test" or targetName == "testi":
print "Warning: 'test' and 'testi' considered dummy targets."
return True
for t in self._targets:
if t.name == targetName:
self._activeTarget = t
return True
return False
def initTest(self):
"""
Inits a test run.
Creates a log file and starts recording if defined.
"""
print "Setting up testing environment..."
if not self._setupTestAutomation():
return False
print "setup complete"
self._activeTarget = self._targets[0]
if self._logger:
print "Recording test to a file"
self._logger.startLog()
return True
def _stopTest(self):
"""
Stops a test run.
Closes the log-file and stops recording process.
"""
print "Cleaning up testing environment..."
self._cleanupTestAutomation()
print "clean up complete"
if self._logger:
self._logger.endLog()
print "Test finished"
def endTest(self):
print "Shutting down"
self._stopTest()
def keywordInfo(self, kw ):
kws = self._getKeywords()
if kw in kws:
print kw
self.printKw(kw,"#",kws[kw][1])
def printKw(self,kw,header,text):
print header*len(kw)
print
docstring = text.splitlines()
strip_len = 0
if len(docstring[0]) == 0:
docstring = docstring[1:]
for line in docstring:
if len(line.strip()) > 0:
first_line = line.lstrip()
strip_len = len(line) - len(first_line)
break
for line in docstring:
print line[strip_len:].rstrip()
print
def listKeywords(self, basekw = keyword.Keyword,full=False,header="#"):
kws = self._getKeywords({},basekw)
kws_keys = sorted(kws.keys())
for kw in kws_keys:
print kw
if full:
self.printKw(kw,header,kws[kw][1])
def _getKeywords(self, kw_dictionary = {}, basekw = keyword.Keyword):
use_cache = len(kw_dictionary) == 0
if use_cache and basekw in self._kw_cache:
return self._kw_cache[basekw]
for kw in basekw.__subclasses__():
kw_name = str(kw)[str(kw).rfind('.')+1:str(kw).rfind("'")]
if not kw_name.endswith("Keyword"):
kw_dictionary[kw_name] = (str(kw.__module__),str(kw.__doc__))
self._getKeywords(kw_dictionary,kw)
if use_cache:
self._kw_cache[basekw] = kw_dictionary
return kw_dictionary
def __instantiateKeywordProxyObject(self,kwproxy, kwName,kwAttr,kwproxy_class):
kwobject = None
try:
kwmodule = __import__(kwproxy_class, globals(), locals(), [kwproxy], -1)
kwobject = getattr(kwmodule,kwproxy)()
if not kwobject.initialize(kwName, kwAttr,self._activeTarget):
kwobject = None
if kwobject:
print 'Recognized keyword: %s' % kwName
print 'Attributes: %s' % kwAttr
except Exception, e:
print e
print "Error: KeywordProxy error"
kwobject = None
return kwobject
def __instantiateKeywordObject(self,kw_name,attributes,kw_class):
kwobject = None
try:
kwmodule = __import__(kw_class, globals(), locals(), [kw_name], -1)
kwobject = getattr(kwmodule,kw_name)()
print 'Recognized keyword: %s' % kw_name
print 'Attributes: %s' % attributes
if not kwobject.initialize(attributes,self._activeTarget):
print "Invalid parameters"
kwobject = None
except Exception, e:
print e
print "Error: Keyword not recognized!"
kwobject = None
return kwobject
def _instantiateKeyword(self, kwName, kwAttr):
kw_dictionary = self._getKeywords()
kwproxy_dictionary = self._getKeywords({}, keywordproxy.KeywordProxy)
kwobject = None
for kw in kw_dictionary:
if kw.lower() == kwName.lower():
kwobject = self.__instantiateKeywordObject(kw,kwAttr,kw_dictionary[kw][0])
break
else:
for kwproxy in kwproxy_dictionary:
kwobject = self.__instantiateKeywordProxyObject(kwproxy, kwName,kwAttr,kwproxy_dictionary[kwproxy][0])
if kwobject:
break
if not kwobject:
print "Error: Keyword not recognized!"
return kwobject
def __executeKeyword(self, kw):
"""
Executes a single keyword.
Searches a corresponding keyword object from the list of keywords and executes the keyword with that object.
@type kw: string
@param kw: executed keyword
@rtype: boolean or string
@return: True if execution was succesfull; False if execution was succesdfull, but the keyword returned False;
Error if there was problems in the execution.
"""
print ""
print "Executing keyword: %s" % kw
#Which keyword
result = False
kw = kw.strip()
if kw.startswith("kw_"):
kw = kw[3:].strip()
# Testengine-note: generate-taskswitcher uses space as separator
if kw.startswith("LaunchApp") or kw.startswith("SetTarget"):
if not (kw.startswith("LaunchApp#") or kw.startswith("SetTarget#")):
kw = kw.replace(" ",self._separator,1)
kw_split = kw.split(self._separator,1)
kwName = kw_split[0].strip()
if len(kw_split) == 2:
kwAttr = kw_split[1].strip()
else:
kwAttr = ""
#Changing target
if kwName.lower() == "settarget":
result = self.__setTarget(kwAttr)
print 'result: %s' % str(result)
return result
kwobject = self._instantiateKeyword(kwName,kwAttr)
if not kwobject:
return "ERROR"
startTime = datetime.datetime.now()
result = kwobject.execute()
execTime = datetime.datetime.now() - startTime
print 'result: %s' % str(result)
kwDelay = kwobject.delay
if kwDelay != -1:
if self.delay > kwDelay:
kwDelay = self.delay
time.sleep(kwDelay)
if self._logger:
self._logger.logKeyword(self._activeTarget, kwobject, result, str(execTime))
self.kwCount = self._kwCount + 1
return result
def _handleSpecialCommands(self,command):
return False
def runInteractive(self):
"""
Runs an interactive test.
Keywords are read from stdin.
"""
# Only import here, so that we can use completion mechanism
# Readline only available in unix
try:
import readline
kws = self._getKeywords({}, keyword.Keyword).keys()
for command_list in self._commands.values():
kws.extend(command_list)
readline.set_completer(AdapterCompleter(kws).complete)
readline.parse_and_bind('tab: complete')
except:
pass
while True:
try:
kw = raw_input(">").strip()
if kw in self._commands["exit"]:
return
elif kw == "":
continue
kw_split = kw.split(" ")
if kw_split[0] in self._commands["kws"]:
if len(kw_split) > 1 and kw_split[1]=="full" and " ".join(kw_split[0:2] )in self._commands["kws"]:
if len(kw_split) == 3:
char = kw_split[2]
else:
char = "#"
self.listKeywords(full=True,header=char)
else:
self.listKeywords(full=False)
elif kw_split[0] in self._commands["info"] and len(kw_split) == 2:
self.keywordInfo(kw_split[1])
elif not self._handleSpecialCommands(kw):
self.__executeKeyword(kw)
except EOFError:
break
def runFromServer(self, address, port, username = None, protocol= None ):
"""
Runs a test from server.
@type address: string
@param address: Address of the server
@type port: integer
@param port: Port of the server
@type username: string
@param username: Username is required when using http or https protocol
@type protocol: string
@param protocol: Protocol that is used in the connection. Options are http and https.
Plain socketis used if parameter not given.
"""
toolProtocol = None
#while True:
if(address != None and port != None):
if(protocol):
base,path = address.split("/",1)
toolProtocol = ToolProtocolHTTP()
toolProtocol.init(base,path,port,username,protocol)
else:
toolProtocol = ToolProtocol()
toolProtocol.init(address,port)
if toolProtocol.hasConnection() == False:
#print "Connection to the MBT server failed, reconnecting..."
print "Connection to the MBT server failed."
# time.sleep(5)
return
#else:
# break
while True:
kw = ""
#if passive:
# kw = toolProtocol.receiveKeyword()
#else:
kw = toolProtocol.getKeyword()
if (kw == '' or kw =='\n' or kw == "ERROR"):
return
result = self.__executeKeyword(kw)
if(result == "ERROR"):
toolProtocol.putResult(False)
toolProtocol.bye()
return
toolProtocol.putResult(result)
def runFromFile(self, fileName ):
"""
Runs a test from file.
@type fileName: string
@param fileName: path to the file that contains the test
"""
try:
with open(fileName,'r') as inputFile:
for line in inputFile:
kw = line.strip()
if not kw:
break
result = self.__executeKeyword(kw)
if(result == "ERROR"):
break
except IOError:
print "Error when reading file: %s" % fileName
|
HOST = '127.0.0.1'
PORT = 8080
from Tkinter import *
import tkColorChooser
import socket
import thread
import spots
def main():
global hold, fill, draw, look
hold = []
fill = '#000000'
connect()
root = Tk()
root.title('Paint 1.0')
root.resizable(False, False)
upper = LabelFrame(root, text='Your Canvas')
lower = LabelFrame(root, text='Their Canvas')
draw = Canvas(upper, bg='#ffffff', width=400, height=300, highlightthickness=0)
look = Canvas(lower, bg='#ffffff', width=400, height=300, highlightthickness=0)
cursor = Button(upper, text='Cursor Color', command=change_cursor)
canvas = Button(upper, text='Canvas Color', command=change_canvas)
draw.bind('<Motion>', motion)
draw.bind('<ButtonPress-1>', press)
draw.bind('<ButtonRelease-1>', release)
draw.bind('<Button-3>', delete)
upper.grid(padx=5, pady=5)
lower.grid(padx=5, pady=5)
draw.grid(row=0, column=0, padx=5, pady=5, columnspan=2)
look.grid(padx=5, pady=5)
cursor.grid(row=1, column=0, padx=5, pady=5, sticky=EW)
canvas.grid(row=1, column=1, padx=5, pady=5, sticky=EW)
root.mainloop()
def connect():
try:
start_client()
except:
start_server()
thread.start_new_thread(processor, ())
def start_client():
global QRI
server = socket.socket()
server.connect((HOST, PORT))
QRI = spots.qri(server)
def start_server():
global QRI
server = socket.socket()
server.bind(('', PORT))
server.listen(1)
QRI = spots.qri(server.accept()[0])
def processor():
while True:
ID, (func, args, kwargs) = QRI.query()
getattr(look, func)(*args, **kwargs)
def call(func, *args, **kwargs):
try:
QRI.call((func, args, kwargs), 0.001)
except:
pass
def change_cursor():
global fill
color = tkColorChooser.askcolor(color=fill)[1]
if color is not None:
fill = color
def change_canvas():
color = tkColorChooser.askcolor(color=draw['bg'])[1]
if color is not None:
draw['bg'] = color
draw.config(bg=color)
call('config', bg=color)
def motion(event):
if hold:
hold.extend([event.x, event.y])
event.widget.create_line(hold[-4:], fill=fill, tag='TEMP')
call('create_line', hold[-4:], fill=fill, tag='TEMP')
def press(event):
global hold
hold = [event.x, event.y]
def release(event):
global hold
if len(hold) > 2:
event.widget.delete('TEMP')
event.widget.create_line(hold, fill=fill, smooth=True)
call('delete', 'TEMP')
call('create_line', hold, fill=fill, smooth=True)
hold = []
def delete(event):
event.widget.delete(ALL)
call('delete', ALL)
if __name__ == '__main__':
main()
|
/*Owner & Copyrights: Vance King Saxbe. A.*/""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @vsaxbe@yahoo.com. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""#!/usr/bin/env python3
import _thread
import os
import sys
import time
import gc
from src import googlequotemachine
from src import yahooquotemachine
from src import bloombergquotemachine
from src import createtablesgooglefinance
from src import createtablesyahoofinance
from src import createtablesbloomberg
from time import localtime, strftime
start1 = []
sys.setrecursionlimit(1000000)
database = "data/"
markettime = {}
with open("conf/MarketTimings.conf") as fillees:
mlist = fillees.read().splitlines()
fillees.close()
for line in mlist:
items = line.split(", ")
key, values = items[0], items[1]
markettime[key] = values
with open('conf/symbolname.conf') as fille:
synamelist = fille.read().splitlines()
fille.close()
timetorun = 1800
cycle = 1
while("TRUE"):
with open('conf/urls.conf') as openedfile:
fileaslist = openedfile.read().splitlines()
openedfile.close()
a_lock = _thread.allocate_lock()
thr = []
with a_lock:
print("locks placed and Market engine is running for the...", cycle)
for lines in fileaslist:
lisj = lines.split('", "')
mtime = markettime[lisj[2].replace('"','')]
mktime = mtime.split("-")
if mktime[1] < mktime[0]:
righto = mktime[1].split(":")
close = str(str(int(righto[0])+24)+":"+righto[1])
else:
close = mktime[1]
rightnow = strftime("%H:%M", localtime())
if rightnow < strftime("04:00"):
right = rightnow.split(":")
rightnow = str(str(int(right[0])+24)+":"+right[1])
if (close > rightnow > mktime[0]):
print("Market ", lisj[2].replace('.db"',''), " is starting at cycle ", cycle)
if lisj[1] =='g':
thr.append(_thread.start_new_thread(googlequotemachine.actionking, (a_lock, start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
elif lisj[1] =='y':
thr.append(_thread.start_new_thread(yahooquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
else:
thr.append(_thread.start_new_thread(bloombergquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,) ))
time.sleep(0.00001)
print("locks placed and Market engine is running for the....", cycle, " time...with threads", thr )
time.sleep(timetorun)
gc.collect()
print("locks released and Market engine is restarting for the...", cycle, " time...")
cycle = cycle + 1
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from textwrap import dedent
import pytest
import pytablewriter
from ...._common import print_test_result
from ....data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_iter,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
from .._common import regexp_ansi_escape, strip_ansi_escape
normal_test_data_list = [
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
expected=dedent(
"""\
.. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 3, 4, 3
"""
),
),
Data(
table=None,
indent=0,
header=None,
value=value_matrix,
expected=dedent(
"""\
.. csv-table::
:widths: 1, 5, 5, 3, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=1,
header=headers,
value=value_matrix,
expected=""" .. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
""",
),
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix_with_none,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 5, 4, 6
1, , "a", 1.0,
, 2.2, , 2.2, 2.2
3, 3.3, "ccc", , "cccc"
, , , ,
"""
),
),
Data(
table="table name",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "i", "f", "c", "if", "ifc", "bool", "inf", "nan", "mix_num", "time"
:widths: 3, 4, 6, 4, 5, 6, 8, 5, 9, 27
1, 1.10, "aa", 1.0, 1, True, Infinity, NaN, 1, 2017-01-01T00:00:00
2, 2.20, "bbb", 2.2, 2.2, False, Infinity, NaN, Infinity, "2017-01-02 03:04:05+09:00"
3, 3.33, "cccc", -3.0, "ccc", True, Infinity, NaN, NaN, 2017-01-01T00:00:00
"""
),
),
]
table_writer_class = pytablewriter.RstCsvTableWriter
class Test_RstCsvTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_RstCsvTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_styles(self):
writer = table_writer_class()
writer.from_tabledata(vut_style_tabledata)
writer.column_styles = vut_styles
expected = dedent(
"""\
.. csv-table:: style test
:header: "none", "empty", "tiny", "small", "medium", "large", "null w/ bold", "L bold", "S italic", "L bold italic"
:widths: 6, 7, 6, 7, 8, 7, 14, 8, 10, 15
111, 111, 111, 111, "111", 111, , **111**, *111*, **111**
1234, 1234, 1234, 1234, "1,234", 1 234, , **1234**, *1234*, **1234**
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_normal_empty(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
assert str(writer) == ""
class Test_RstCsvTableWriter_write_table_iter:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
.. csv-table:: tablename
:header: "ha", "hb", "hc"
:widths: 5, 5, 5
1, 2, 3
11, 12, 13
1, 2, 3
11, 12, 13
101, 102, 103
1001, 1002, 1003
"""
),
]
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in null_test_data_list],
)
def test_normal_smoke(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.write_table_iter()
|
"""
demos reading HiST camera parameters from XML file
"""
from histutils.hstxmlparse import xmlparam
from argparse import ArgumentParser
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("fn", help="xml filename to parse")
p = p.parse_args()
params = xmlparam(p.fn)
print(params)
|
from traits.api import HasTraits, Float
class Person(HasTraits):
weight = Float(150.0)
|
import struct
import uuid
from enum import IntEnum
from typing import List, Optional, Set
from .sid import SID
class ACEFlag(IntEnum):
""" ACE type-specific control flags. """
OBJECT_INHERIT = 0x01
CONTAINER_INHERIT = 0x02
NO_PROPAGATE_INHERIT = 0x04
INHERIT_ONLY = 0x08
INHERITED = 0x10
SUCCESSFUL_ACCESS = 0x40
FAILED_ACCESS = 0x80
@property
def short_name(self) -> str:
""" The SDDL short name of the flag. """
short_names = {
"OBJECT_INHERIT": "OI",
"CONTAINER_INHERIT": "CI",
"NO_PROPAGATE_INHERIT": "NP",
"INHERIT_ONLY": "IO",
"INHERITED": "ID",
"SUCCESSFUL_ACCESS": "SA",
"FAILED_ACCESS": "FA",
}
return short_names[self.name]
class ACEType(IntEnum):
""" Type of the ACE. """
ACCESS_ALLOWED = 0
ACCESS_DENIED = 1
SYSTEM_AUDIT = 2
SYSTEM_ALARM = 3
ACCESS_ALLOWED_COMPOUND = 4
ACCESS_ALLOWED_OBJECT = 5
ACCESS_DENIED_OBJECT = 6
SYSTEM_AUDIT_OBJECT = 7
SYSTEM_ALARM_OBJECT = 8
ACCESS_ALLOWED_CALLBACK = 9
ACCESS_DENIED_CALLBACK = 10
ACCESS_ALLOWED_CALLBACK_OBJECT = 11
ACCESS_DENIED_CALLBACK_OBJECT = 12
SYSTEM_AUDIT_CALLBACK = 13
SYSTEM_ALARM_CALLBACK = 14
SYSTEM_AUDIT_CALLBACK_OBJECT = 15
SYSTEM_ALARM_CALLBACK_OBJECT = 16
SYSTEM_MANDATORY_LABEL = 17
SYSTEM_RESOURCE_ATTRIBUTE = 18
SYSTEM_SCOPED_POLICY_ID = 19
@property
def short_name(self) -> str:
""" The SDDL short name of the type. """
short_names = {
"ACCESS_ALLOWED": "A",
"ACCESS_DENIED": "D",
"SYSTEM_AUDIT": "AU",
"SYSTEM_ALARM": "AL",
"ACCESS_ALLOWED_COMPOUND": "",
"ACCESS_ALLOWED_OBJECT": "OA",
"ACCESS_DENIED_OBJECT": "OD",
"SYSTEM_AUDIT_OBJECT": "OU",
"SYSTEM_ALARM_OBJECT": "OL",
"ACCESS_ALLOWED_CALLBACK": "XA",
"ACCESS_DENIED_CALLBACK": "XD",
"ACCESS_ALLOWED_CALLBACK_OBJECT": "ZA",
"ACCESS_DENIED_CALLBACK_OBJECT": "ZD",
"SYSTEM_AUDIT_CALLBACK": "XU",
"SYSTEM_ALARM_CALLBACK": "XL",
"SYSTEM_AUDIT_CALLBACK_OBJECT": "ZU",
"SYSTEM_ALARM_CALLBACK_OBJECT": "ZL",
"SYSTEM_MANDATORY_LABEL": "ML",
"SYSTEM_RESOURCE_ATTRIBUTE": "RA",
"SYSTEM_SCOPED_POLICY_ID": "SP",
}
return short_names[self.name]
@property
def is_object_type(self) -> bool:
""" Flag for ACE types with objects. """
return self in (
ACEType.ACCESS_ALLOWED_OBJECT,
ACEType.ACCESS_DENIED_OBJECT,
ACEType.SYSTEM_AUDIT_OBJECT,
ACEType.SYSTEM_ALARM_OBJECT,
ACEType.ACCESS_ALLOWED_CALLBACK_OBJECT,
ACEType.ACCESS_DENIED_CALLBACK_OBJECT,
ACEType.SYSTEM_AUDIT_CALLBACK_OBJECT,
ACEType.SYSTEM_ALARM_CALLBACK_OBJECT,
)
class ACERight(IntEnum):
""" The rights of the ACE. """
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x4000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
MAXIMUM_ALLOWED = 0x02000000
ACCESS_SYSTEM_SECURITY = 0x01000000
SYNCHRONIZE = 0x00100000
WRITE_OWNER = 0x00080000
WRITE_DACL = 0x00040000
READ_CONTROL = 0x00020000
DELETE = 0x00010000
DS_CONTROL_ACCESS = 0x00000100
DS_CREATE_CHILD = 0x00000001
DS_DELETE_CHILD = 0x00000002
ACTRL_DS_LIST = 0x00000004
DS_SELF = 0x00000008
DS_READ_PROP = 0x00000010
DS_WRITE_PROP = 0x00000020
DS_DELETE_TREE = 0x00000040
DS_LIST_OBJECT = 0x00000080
@property
def short_name(self) -> str:
""" The SDDL short name of the access right. """
short_names = {
"GENERIC_READ": "GR",
"GENERIC_WRITE": "GW",
"GENERIC_EXECUTE": "GX",
"GENERIC_ALL": "GA",
"MAXIMUM_ALLOWED": "MA",
"ACCESS_SYSTEM_SECURITY": "AS",
"SYNCHRONIZE": "SY",
"WRITE_OWNER": "WO",
"WRITE_DACL": "WD",
"READ_CONTROL": "RC",
"DELETE": "SD",
"DS_CONTROL_ACCESS": "CR",
"DS_CREATE_CHILD": "CC",
"DS_DELETE_CHILD": "DC",
"ACTRL_DS_LIST": "LC",
"DS_SELF": "SW",
"DS_READ_PROP": "RP",
"DS_WRITE_PROP": "WP",
"DS_DELETE_TREE": "DT",
"DS_LIST_OBJECT": "LO",
}
return short_names[self.name]
class ACLRevision(IntEnum):
""" The ACL revision. """
ACL_REVISION = 0x02
ACL_REVISION_DS = 0x04
class ACE:
"""
A class for the access control entry, that encodes the user rights
afforded to a principal.
:param ACEType ace_type: the type of the ACE.
:param Set[ACEFlag] flags: the set of flags for the ACE.
:param int mask: the access mask to encode the user rights as an int.
:param SID trustee_sid: the SID of the trustee.
:param uuid.UUID|None object_type: a UUID that identifies a property
set, property, extended right, or type of child object.
:param uuid.UUID|None inherited_object_type: a UUID that identifies the
type of child object that can inherit the ACE.
:param bytes application_data: optional application data.
"""
def __init__(
self,
ace_type: ACEType,
flags: Set[ACEFlag],
mask: int,
trustee_sid: SID,
object_type: Optional[uuid.UUID],
inherited_object_type: Optional[uuid.UUID],
application_data: bytes,
) -> None:
self.__type = ace_type
self.__flags = flags
self.__mask = mask
self.__object_type = object_type
self.__inherited_object_type = inherited_object_type
self.__trustee_sid = trustee_sid
self.__application_data = application_data
@classmethod
def from_binary(cls, data: bytes) -> "ACE":
"""
Create an ACE object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACE instance.
:rtype: ACE
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACE
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
object_type = None
inherited_object_type = None
application_data = None
ace_type, flags, size, mask = struct.unpack("<BBHL", data[:8])
pos = 8
if ACEType(ace_type).is_object_type:
obj_flag = struct.unpack("<I", data[8:12])[0]
pos += 4
if obj_flag & 0x00000001:
object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
if obj_flag & 0x00000002:
inherited_object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
trustee_sid = SID(bytes_le=data[pos:])
pos += trustee_sid.size
application_data = data[pos:size]
this = cls(
ACEType(ace_type),
{flg for flg in ACEFlag if flags & flg},
mask,
trustee_sid,
object_type,
inherited_object_type,
application_data,
)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACE, {err}")
def __str__(self):
""" Return the SDDL string representation of the ACE object. """
flags = "".join(
flg.short_name for flg in sorted(self.flags, key=lambda f: f.value)
)
rights = "".join(
rgt.short_name for rgt in sorted(self.rights, key=lambda r: r.value)
)
object_guid = self.object_type if self.object_type else ""
inherit_object_guid = (
self.inherited_object_type if self.inherited_object_type else ""
)
sid = (
self.trustee_sid.sddl_alias
if self.trustee_sid.sddl_alias
else str(self.trustee_sid)
)
return f"({self.type.short_name};{flags};{rights};{object_guid};{inherit_object_guid};{sid})"
def to_binary(self) -> bytes:
"""
Convert ACE object to binary form with little-endian byte order.
:returns: Bytes of the binary ACE instance
:rtype: bytes
"""
size = self.size
data = bytearray(size)
struct.pack_into(
"<BBHL", data, 0, self.type.value, sum(self.flags), size, self.mask
)
pos = 8
if self.type.is_object_type:
obj_flag = 0x00000001 if self.object_type else 0
obj_flag |= 0x00000002 if self.inherited_object_type else 0
struct.pack_into("<L", data, pos, obj_flag)
pos += 4
if self.object_type:
data[pos : pos + 16] = self.object_type.bytes_le
pos += 16
if self.inherited_object_type:
data[pos : pos + 16] = self.inherited_object_type.bytes_le
pos += 16
data[pos : pos + self.trustee_sid.size] = self.trustee_sid.bytes_le
pos += self.trustee_sid.size
data[pos : pos + size] = self.application_data
return bytes(data)
@property
def type(self) -> ACEType:
""" The type of the ACE. """
return self.__type
@property
def flags(self) -> Set[ACEFlag]:
""" The flags of the ACE. """
return self.__flags
@property
def size(self) -> int:
""" The binary size of ACE in bytes. """
size = 8
if self.type.is_object_type:
size += 4
if self.object_type:
size += 16
if self.inherited_object_type:
size += 16
size += self.trustee_sid.size
size += len(self.application_data)
return size
@property
def mask(self) -> int:
""" The acces mask """
return self.__mask
@property
def rights(self) -> Set[ACERight]:
""" The set of ACERights based on the access mask."""
return {rgt for rgt in ACERight if self.mask & rgt}
@property
def object_type(self) -> Optional[uuid.UUID]:
""" The uuid of the object type. """
return self.__object_type
@property
def inherited_object_type(self) -> Optional[uuid.UUID]:
""" The uuid of the inherited object type. """
return self.__inherited_object_type
@property
def trustee_sid(self) -> SID:
""" The sid of the trustee. """
return self.__trustee_sid
@property
def application_data(self) -> bytes:
""" The possible application data. """
return self.__application_data
class ACL:
"""
The access control list (ACL) is used to specify a list of individual
access control entries (ACEs). An ACL and an array of ACEs comprise a
complete access control list.
:param ACLRevision revision: the revision of the ACL.
:param List[ACE] aces: list of :class:`ACE`.
"""
def __init__(self, revision: ACLRevision, aces: List[ACE]) -> None:
self.__revision = revision
self.__aces = aces
@classmethod
def from_binary(cls, data: bytes) -> "ACL":
"""
Create an ACL object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACL instance.
:rtype: ACL
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACL
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
# Unwanted values are the reserved sbz1, size and sbz2.
rev, _, _, count, _ = struct.unpack("<BBHHH", data[:8])
pos = 8
aces = []
for _ in range(count):
ace = ACE.from_binary(data[pos:])
aces.append(ace)
pos += ace.size
this = cls(ACLRevision(rev), aces)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACL, {err}")
def to_binary(self) -> bytes:
"""
Convert ACL object to binary form with little-endian byte order.
:returns: Bytes of the binary ACL instance
:rtype: bytes
"""
size = self.size
data = bytearray(8)
struct.pack_into("<BBHHH", data, 0, self.revision, 0, size, len(self.aces), 0)
pos = 8
for ace in self.aces:
data.extend(ace.to_binary())
return bytes(data)
@property
def revision(self) -> ACLRevision:
""" The revision of ACL. """
return self.__revision
@property
def size(self) -> int:
""" The binary size in bytes. """
return 8 + sum(ace.size for ace in self.aces)
@property
def aces(self) -> List[ACE]:
""" The list of :class:`ACE` objects. """
return self.__aces
|
from django import forms
from djwed.wedding.models import *
from djwed.wedding.admin_actions import *
from django.contrib import admin
class RequireOneFormSet(forms.models.BaseInlineFormSet):
"""Require at least one form in the formset to be completed."""
def clean(self):
"""Check that at least one form has been completed."""
super(RequireOneFormSet, self).clean()
if not self.is_valid():
return
for cleaned_data in self.cleaned_data:
# form has data and we aren't deleting it.
if cleaned_data and not cleaned_data.get('DELETE', False):
# we can break out after the first complete form
return
raise forms.ValidationError("At least one %s is required." %
(self.model._meta.verbose_name,))
class InviteeNotesInline(admin.TabularInline):
model = InviteeNotes
extra = 0
verbose_name_plural = "invitee notes"
class RSVPInline(admin.TabularInline):
model = RSVP
extra = 2
class GuestInline(admin.StackedInline):
model = Guest
extra = 1
class FoodOptionInline(admin.StackedInline):
model = FoodOption
extra = 3
class CommentInline(admin.StackedInline):
model = Comment
extra = 0
exclude = ('rsvp',)
readonly_fields = ('text',)
verbose_name_plural = "comments from invitees"
class GiftThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
verbose_name = "Source"
verbose_name_plural = "Sources"
formset = RequireOneFormSet
class InviteeThankYouInline(admin.TabularInline):
model = ThankYou
extra = 0
class InviteeAdmin(admin.ModelAdmin):
#fieldsets = [
# (None, {'fields': ['question']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
#]
inlines = [GuestInline,InviteeNotesInline,CommentInline,InviteeThankYouInline]
list_display = ('full_name', 'tags', 'full_address', 'state','country')
list_editable = ('tags',)
list_filter = ['side', 'association','country','state']
search_fields = ['full_name_override','invite_code','guest__first_name', 'guest__last_name', 'guest__nickname']
actions = [
export_as_csv_action("Export addresses as CSV",
fields=['full_name', 'full_address']),
]
#date_hierarchy = 'pub_date'
class LongFoodChoiceField(forms.ModelChoiceField):
#widget = forms.widgets.RadioSelect()
def label_from_instance(self, obj):
return obj.long_desc
class GuestAdmin(admin.ModelAdmin):
inlines = [RSVPInline,]
list_display = ('full_name', 'email', 'tags')
list_filter = ['rsvp__status', 'role', 'invitee__side', 'invitee__association']
search_fields = ['first_name', 'last_name']
list_editable = ('email', 'tags')
class RSVPAdminForm(forms.ModelForm):
class Meta: model = RSVP
def clean(self, *args, **kwargs):
sret = super(RSVPAdminForm, self).clean(*args,**kwargs)
if self.cleaned_data['food_selection'] and self.cleaned_data['food_selection'].venue != self.cleaned_data['venue']:
raise ValidationError('Food selected from another venue')
if self.cleaned_data['venue'].site != u'MA' and self.cleaned_data['bus_selection']:
raise ValidationError('Bus selection for a site with no bus')
rsvp_filter = RSVP.objects.filter(venue = self.cleaned_data['venue'],
guest = self.cleaned_data['guest'])
if rsvp_filter.count()>1 or (rsvp_filter.count() == 1
and rsvp_filter.all()[0] != self.instance):
raise ValidationError('Only one RSVP allowed per person')
return sret
class RSVPAdmin(admin.ModelAdmin):
#inlines = [GuestInline,]
#food_selection = LongFoodChoiceField([], required=False, empty_label = "--- Please choose from a dinner selection below ---")
list_display = (
'guest_site',
'venue',
'status',
'food_selection',
'bus_selection',
'last_updated',
'prelim',
'guest_invitee',
'last_update_source',
'guest',
'table_assign',
)
search_fields = [
'guest__first_name',
'guest__last_name',
'guest__invitee__guest__last_name',
'guest__invitee__invite_code',
]
list_editable = (
'status',
'food_selection',
'bus_selection',
'prelim',
'last_update_source',
'table_assign',
)
form = RSVPAdminForm
list_filter = ('venue','status', 'guest__invitee__side',
'guest__invitee__association', 'guest__invitee__country',
'guest__invitee__state',
)
def guest_site(self,rsvp):
return u"%s (%s)"%(rsvp.guest.full_name(), unicode(rsvp.venue.site))
guest_site.short_description = "Guest (Site)"
def guest_invitee(self,rsvp):
return rsvp.guest.invitee
guest_invitee.short_description = "Invitee"
def guest_invitee_association(self,rsvp):
return rsvp.guest.invitee.association
guest_invitee_association.short_description = "Association"
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "guest":
kwargs["queryset"] = Guest.objects.all().order_by('last_name','first_name')
return db_field.formfield(**kwargs)
return super(RSVPAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class InviteeNotesAdmin(admin.ModelAdmin):
search_fields = ['invitee__guest__first_name',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = [ 'invitee',
'likely_site',
'ma_likelihood',
'ca_likelihood',
'or_likelihood',
'savedate',
'batch',
'invitee_rsvp_count',
'adults',
'children',
'invitee_country',
]
list_editable = ['ma_likelihood',
'ca_likelihood',
'savedate',
'batch',
]
def invitee_rsvp_count(self,inote):
counts = inote.invitee.rsvp_yes_counts()
return ', '.join('%s: %d' % (venue, counts[venue])
for venue in sorted(counts.keys()))
invitee_rsvp_count.short_description = "RSVP Yes"
def invitee_country(self,inote):
return str(inote.invitee.country)
invitee_country.short_description = "Country"
class CommentAdmin(admin.ModelAdmin):
list_filter = ['type']
search_fields = ['invitee__guest__first_name','text',
'invitee__guest__last_name','invitee__guest__nickname']
list_display = ['id','invitee','type','last_updated','text']
class VenueAdmin(admin.ModelAdmin):
inlines = [FoodOptionInline,]
class PageSnippetAdmin(admin.ModelAdmin):
list_display = ['key','title','last_updated']
class GiftAdmin(admin.ModelAdmin):
search_fields = [
'sources__guest__first_name',
'sources__guest__nickname',
'sources__guest__last_name',
'notes',
'description',
]
list_filter = ['status','registry','assignment']
list_display = ['source_names','received','description','notes',
'assignment','registry','status']
list_editable = ('status', 'assignment')
inlines = [GiftThankYouInline,]
radio_fields = {
'assignment': admin.HORIZONTAL,
'registry': admin.VERTICAL,
'status': admin.HORIZONTAL,
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "source" and request.META['REQUEST_METHOD'] != 'POST':
kwargs["queryset"] = Invitee.objects.all().order_by('guest__last_name','guest__first_name')
return db_field.formfield(**kwargs)
return super(GiftAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def source_names(self, gift):
return u"; ".join(unicode(inv) for inv in gift.sources.all())
source_names.short_description = "Sources"
class ThankYouAdmin(admin.ModelAdmin):
list_display = [
'invitee',
'status',
'sent',
]
list_editable = ['status', 'sent']
list_filter = [
'status',
'sent',
'gift__assignment',
'gift__received',
'invitee__side',
]
search_fields = [
'invitee__guest__first_name',
'invitee__guest__last_name',
'invitee__guest__nickname',
'gift__description',
'gift__notes',
]
class TableAdmin(admin.ModelAdmin):
search_fields = ['rsvp__guest__first_name','name','number','notes',
'rsvp__guest__last_name','invitee__guest__nickname']
list_display = ['number','name','venue','table_count','table_guests','notes','position']
list_editable = ('name','notes')
list_filter = ['venue',]
def table_count(self,table):
return str(table.rsvp_set.count())
table_count.short_description = "# people"
def table_guests(self,table):
guests = []
for r in table.rsvp_set.all():
guests.append(unicode(r.guest))
guests.sort()
return u" , \n".join(guests)
table_guests.short_description = "guests"
class RSVPOptionAdmin(admin.ModelAdmin):
list_display = ['short_desc', 'likelihood', 'rsvp_count', 'long_desc']
def rsvp_count(self, option):
return str(option.rsvp_set.count())
rsvp_count.short_description = "# people"
admin.site.register(Invitee, InviteeAdmin)
admin.site.register(InviteeNotes, InviteeNotesAdmin)
admin.site.register(Guest, GuestAdmin)
admin.site.register(Venue, VenueAdmin)
admin.site.register(PageSnippet, PageSnippetAdmin)
admin.site.register(RSVP, RSVPAdmin)
admin.site.register(RSVPOption, RSVPOptionAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Gift, GiftAdmin)
admin.site.register(ThankYou, ThankYouAdmin)
admin.site.register(Table, TableAdmin)
|
import calendar
import collections
try:
from collections.abc import Iterable
except:
from collections import Iterable
from time import strptime
from six import string_types
from lxml import etree
from itertools import chain
def remove_namespace(tree):
"""
Strip namespace from parsed XML
"""
for node in tree.iter():
try:
has_namespace = node.tag.startswith("{")
except AttributeError:
continue # node.tag is not a string (node is a comment or similar)
if has_namespace:
node.tag = node.tag.split("}", 1)[1]
def read_xml(path, nxml=False):
"""
Parse tree from given XML path
"""
try:
tree = etree.parse(path)
if ".nxml" in path or nxml:
remove_namespace(tree) # strip namespace when reading an XML file
except:
try:
tree = etree.fromstring(path)
except Exception:
print(
"Error: it was not able to read a path, a file-like object, or a string as an XML"
)
raise
return tree
def stringify_children(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(chain(*([c.text, c.tail] for c in node.getchildren())))
+ [node.tail]
)
return "".join(filter(None, parts))
def stringify_affiliation(node):
"""
Filters and removes possible Nones in texts and tails
ref: http://stackoverflow.com/questions/4624062/get-all-text-inside-a-tag-in-lxml
"""
parts = (
[node.text]
+ list(
chain(
*(
[c.text if (c.tag != "label" and c.tag != "sup") else "", c.tail]
for c in node.getchildren()
)
)
)
+ [node.tail]
)
return " ".join(filter(None, parts))
def stringify_affiliation_rec(node):
"""
Flatten and join list to string
ref: http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python
"""
parts = _recur_children(node)
parts_flatten = list(_flatten(parts))
return " ".join(parts_flatten).strip()
def _flatten(l):
"""
Flatten list into one dimensional
"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, string_types):
for sub in _flatten(el):
yield sub
else:
yield el
def _recur_children(node):
"""
Recursive through node to when it has multiple children
"""
if len(node.getchildren()) == 0:
parts = (
([node.text or ""] + [node.tail or ""])
if (node.tag != "label" and node.tag != "sup")
else ([node.tail or ""])
)
return parts
else:
parts = (
[node.text or ""]
+ [_recur_children(c) for c in node.getchildren()]
+ [node.tail or ""]
)
return parts
def month_or_day_formater(month_or_day):
"""
Parameters
----------
month_or_day: str or int
must be one of the following:
(i) month: a three letter month abbreviation, e.g., 'Jan'.
(ii) day: an integer.
Returns
-------
numeric: str
a month of the form 'MM' or a day of the form 'DD'.
Note: returns None if:
(a) the input could not be mapped to a known month abbreviation OR
(b) the input was not an integer (i.e., a day).
"""
if month_or_day.replace(".", "") in filter(None, calendar.month_abbr):
to_format = strptime(month_or_day.replace(".", ""), "%b").tm_mon
elif month_or_day.strip().isdigit() and "." not in str(month_or_day):
to_format = int(month_or_day.strip())
else:
return None
return ("0" if to_format < 10 else "") + str(to_format)
def pretty_print(node):
"""
Pretty print a given lxml node
"""
print(etree.tostring(node, pretty_print=True).decode("utf-8"))
|
import time
import random
class Battle:
def __init__(self, user1, user2):
self.user1 = user1
self.user2 = user2
self.turn = user1
self.notTurn = user2
self.accepted = False
self.finished = False
self.auto = False
self.turnCount = 1
def fight(self, spell):
attacker = self.turn.getActivePokemon()
defender = self.notTurn.getActivePokemon()
message = attacker.fight(spell, defender)
if defender.life <= 0:
message += defender.name + " n'a plus de points de vie. "
if self.notTurn.hasAlivePokemon():
message += self.notTurn.username + " doit invoquer un nouveau pokemon. "
else:
message += self.notTurn.username + " a perdu. " + self.turn.username + " a gagne. "
message += attacker.name + " gagne " + str(attacker.calcGainedExp(defender)) + " points d'experience. "
old = attacker.level
attacker.gainExp(defender)
if attacker.level != old:
message += attacker.name + " passe niveau " + str(attacker.level) + "!"
self.finished = True
self.turn, self.notTurn = self.notTurn, self.turn
self.turnCount += 1
return message
def itemUsed(self):
self.turn, self.notTurn = self.notTurn, self.turn
def nextStep(self):
if self.finished:
self.user1.battle = None
self.user2.battle = None
return False
elif self.auto and self.turnCount % 2 == 0:
time.sleep(2)
return self.fight(self.turn.getActivePokemon().spells[random.randint(0, len(self.turn.getActivePokemon().spells) - 1)].name)
|
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
import misaka
from groups.models import Group
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
user = models.ForeignKey(User, related_name='posts')
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = models.TextField(editable=False)
group = models.ForeignKey(Group, related_name='posts', null=True, blank=True)
def __str__(self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk})
class Meta:
ordering = ['-created_at']
unique_together = ['user', 'message']
|
import mysql.connector
import time
import datetime
conn = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="drupal")
cann = mysql.connector.connect(host="localhost",user="spike",password="valentine", database="content_delivery_weather")
cursor = conn.cursor()
cursar = cann.cursor()
cursor.execute("""SELECT uid, mail FROM users""")
rows = cursor.fetchall()
for row in rows:
if row[0] != 0:
print('{0} : {1} '.format(row[0], row[1]))
#print('UPDATE new_v4_users_probes_edit SET email = {0} WHERE uid = {1}'.format(row[1], row[0]))
cursar.execute("""UPDATE new_v4_users_probes_edit SET email = %s WHERE userid = %s""",(row[1], row[0]))
cursar.execute("""SELECT probename, probeid FROM new_v4_sonde""")
rows = cursar.fetchall()
for row in rows:
cursar.execute("""SHOW TABLES LIKE %s""",("%" + row[0] + "%",))
rowsbis = cursar.fetchall()
for rowbis in rowsbis:
result = rowbis[0].split("_")
month = 1 + int(result[4])
s = "01/" + str(month) + "/" + result[3]
timestamp = time.mktime(datetime.datetime.strptime(s, "%d/%m/%Y").timetuple())
print('{0} : {1} year: {2} month: {3} timestamp: {4}'.format(row[0], rowbis[0], result[3], result[4], round(timestamp,0)))
cursar.execute("""SELECT firsttime FROM new_v4_sonde WHERE probeid = %s""",(row[1],))
rowsbisbis = cursar.fetchall()
for rowbisbis in rowsbisbis:
if rowbisbis[0] == None:
cursar.execute("""UPDATE new_v4_sonde SET firsttime = %s WHERE probeid = %s""",(timestamp,row[1]))
print('firsttime: {0}'.format(rowbisbis[0],))
conn.close()
cann.close()
|
""" Start the containers """
import argparse
from lib.docker_compose_tools import set_up
def main():
""" Start the containers """
parser = argparse.ArgumentParser(description="Set up testing environment.")
parser.add_argument("--pg", help="PostgreSQL version")
parser.add_argument("--es", help="Elasticsearch version")
args = parser.parse_args()
pg_version = args.pg
es_version = args.es
print(
"Starting environment with PostgreSQL {pg_version} with Elasticsearch {es_version}...".format(
pg_version=pg_version, es_version=es_version
)
)
set_up(pg_version, es_version)
if __name__ == "__main__":
main()
|
"""
Shopify Trois
---------------
Shopify API for Python 3
"""
from setuptools import setup
setup(
name='shopify-trois',
version='1.1-dev',
url='http://masom.github.io/shopify-trois',
license='MIT',
author='Martin Samson',
author_email='pyrolian@gmail.com',
maintainer='Martin Samson',
maintainer_email='pyrolian@gmail.com',
description='Shopify API for Python 3',
long_description=__doc__,
packages=[
'shopify_trois', 'shopify_trois.models', 'shopify_trois.engines',
'shopify_trois.engines.http'
],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'requests>=1.2.3'
],
test_suite='nose.collector',
tests_require=[
'pytest', 'nose', 'mock'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
"""
RPG: Operation
These are any operations we want to carry out from our YAML files. Operations
are strings that are tied to Python code, to carry out things that arent
possible to easily make YAML tags for directly.
"""
from rpg_log import Log
import rpg_combat
def HandleOperation(game, operation, data):
"""Handle the operation.
Args:
game: Game object
operation: string, name of the operation to look up
data: dict, data at the level the operation was specified in, which may
contain information the operation needs to operate. Operation specific.
"""
# Pay for a room's night sleep
if operation == 'RoomSleepPay':
Log('RoomSleepPay: You are rested!')
# Max up the player's current health
#NOTE(g): Uses a percentage based increase from the game data. If not
# present, assume full recovery.
modifier = game.data['game'].get('sleep_regeneration_percent', 1.0)
# Add the modified version of the full health
game.player.health_current += game.player.attributes['health'] * modifier
# Max out at full health
if game.player.health_current > game.player.attributes['health']:
game.player.health_current = game.player.attributes['health']
# No longer fatigued (running and such)
game.player.fatigued = False
# Combat with the Player
elif operation == 'CombatPlayer':
if game.dialogue:
# If a map is specified for the encouter, then fight
map = data.get('map', None)
# Set combat to be with the given actor
game.combat = rpg_combat.Combat(game, [game.dialogue.actor], map=map)
# Clear the dialogue. The time for talking is OVER!
game.dialogue = None
else:
Log('Operatino: CombatPlayer: Not initiated from Dialogue. Unknown actor.')
# Close the Dialogue
if operation == 'CloseDialogue':
game.dialogue = None
|
"""
Data exportor (Excel, CSV...)
"""
import io
import math
from datetime import datetime
from xlsxwriter.workbook import Workbook
import tablib
from utils.tools import get_product_size
def get_customers(customer_list=None, file_format='csv'):
"""Generate customer data file for download."""
if customer_list is None:
customer_list = []
data = tablib.Dataset()
data.headers = ('客戶代碼', '客戶名稱')
for c in customer_list:
data.append((c.c_code, c.c_name))
if file_format == 'csv':
return data.csv
return data
def get_maintenance_log(log_list=None, file_format='csv'):
"""Generate maintenance log to csv file for download."""
if log_list is None:
log_list = []
data = tablib.Dataset()
data.headers = ('機台', '維修項目', '開始時間',
'員工', '結束時間', '員工',
'總計時間')
for log in log_list:
m_code = log['m_code'].replace('<br>', '\n')
data.append((log['machine_id'], m_code, log['start_time'],
log['who_start'], log['end_time'], log['who_end'],
log['total_time'][0])
)
if file_format == 'csv':
return data.csv
return data
def get_w_m_performance_report(file_format='xls'):
"""Generate excel file for download by worker and machine performance."""
row_number = 11
data = tablib.Dataset()
data.append(['個人效率期間表 ({})'.format(
datetime.now().strftime("%Y/%m/%d"))] + [''] * (row_number - 1))
data.append(['工號', '姓名', '日期', '標準量', '效率標準量',
'實質生產量', '總稼動時間', '總停機時間', '稼動 %', '數量效率 %',
'平均效率 %'])
if file_format == 'xls':
return data.xls
return data
def get_loss_rate_report(report_data, file_format='csv'):
"""Generate csv file for download by machine loss rate."""
data = tablib.Dataset()
data.headers = ('機台', '機型', '良品數', '不良品數', '損耗率(%)',
'損耗金額(RMB)', '損耗率排名')
rank = 0
old_loss_rate = None
for r in sorted(report_data, key=lambda k: k['loss_rate'], reverse=True):
if old_loss_rate != r['loss_rate']:
rank += 1
old_loss_rate = r['loss_rate']
record = [r['machine_id'], r['machine_type'], r['count_qty'],
r['event_qty'], r['loss_rate'], r['total_loss_money'],
rank]
data.append(record)
if file_format == 'csv':
return data.csv
return data
def get_loss_rate_detail_report(report_data, file_format='csv'):
"""Generate csv file for download by machine loss rate detail."""
data = tablib.Dataset()
data.headers = ('日期', '良品數', '不良品數', '損耗率(%)',
'損耗金額(RMB)')
for r in sorted(report_data, key=lambda k: k['record_date']):
record = [r['record_date'], r['count_qty'], r['event_qty'],
r['loss_rate'], r['total_loss_money']]
data.append(record)
if file_format == 'csv':
return data.csv
return data
def get_uptime_report(report_data='', file_format='xls'):
"""Generate excel file for download by uptime information."""
data = tablib.Dataset()
data.append_separator('製造部各工程稼動率一覽表')
data.append(['月份:10', '星期', '', '', '', '', '',
'目標', '', '', '', ''])
data.append(['', '', '加締卷取(%)', '組立(%)', '老化(%)',
'CUTTING(%)', 'TAPPING(%)', '加締卷取',
'組立', '老化', 'CUTTING', 'TAPPING'])
if file_format == 'xls':
return data.xls
return data
def get_work_order_report(report_data, file_format='csv'):
"""Generate csv file for download by work order."""
# data = tablib.Dataset()
# data.headers = ('製令編號', '料號', '客戶', '產品規格',
# '投入數', '應繳庫數',
# '加締捲取', '組立', '老化', '選別', '加工切角')
# for r in sorted(report_data, key=lambda k: k['order_no']):
# try:
# intput_count = int(r['input_count'])
# except (TypeError, ValueError):
# intput_count = -1
# record = [r['order_no'], r['part_no'], r['customer'], r['product'],
# intput_count, math.floor(intput_count / 1.03),
# r['step1_status'], r['step2_status'], r['step3_status'],
# r['step4_status'], r['step5_status']]
# data.append(record)
# if file_format == 'csv':
# return data.csv
# return data
output = io.BytesIO()
if file_format == 'xls':
workbook = Workbook(output, {'in_memory': True})
worksheet = workbook.add_worksheet()
# merge_format = workbook.add_format({
# 'bold': 1,
# 'border': 1,
# 'align': 'center',
# 'valign': 'vcenter'})
worksheet.merge_range('A1:A3', '製令編號')
worksheet.merge_range('B1:B3', '料號')
worksheet.merge_range('C1:C3', '客戶')
worksheet.merge_range('D1:D3', '產品規格')
worksheet.merge_range('E1:E3', '投入數')
worksheet.merge_range('F1:F3', '應繳庫數')
worksheet.write('G1', '加締捲取')
worksheet.write('H1', '組立')
worksheet.write('I1', '老化')
worksheet.write('J1', '選別')
worksheet.write('K1', '加工切角')
for col_name in ('G', 'H', 'I', 'J', 'K'):
worksheet.write(col_name + '2', '機器')
worksheet.write(col_name + '3', '良品數')
row = 4
for r in sorted(report_data, key=lambda k: k['order_no']):
try:
intput_count = int(r['input_count'])
except (TypeError, ValueError):
intput_count = -1
worksheet.merge_range('A{}:A{}'.format(row, row + 2),
r['order_no'])
worksheet.merge_range('B{}:B{}'.format(row, row + 2), r['part_no'])
worksheet.merge_range('C{}:C{}'.format(row, row + 2),
r['customer'])
worksheet.merge_range('D{}:D{}'.format(row, row + 2), r['product'])
worksheet.merge_range('E{}:E{}'.format(row, row + 2), intput_count)
worksheet.merge_range('F{}:F{}'.format(row, row + 2),
math.floor(intput_count / 1.03))
for process in range(1, 6):
row_tag = chr(71 + process - 1)
worksheet.write_string('{}{}'.format(row_tag, row),
r['step{}_status'.format(process)])
machine = r['step{}_machine'.format(process)]
count = r['step{}_count'.format(process)]
worksheet.write_string('{}{}'.format(row_tag, row + 1),
machine if machine else '')
worksheet.write_string('{}{}'.format(row_tag, row + 2),
str(count) if count else '')
row += 3
workbook.close()
output.seek(0)
return output.read()
def get_order_report(report_data, file_format='csv'):
"""Generate csv file for download by machine loss rate detail."""
data = tablib.Dataset()
data.headers = ('製令編號', '客戶', '規格', '投入數', '需求數',
'加締捲曲', '組立', '老化', '選別', '加工切腳')
for r in sorted(report_data, key=lambda k: k['order_no']):
record = [r['order_no'], r['customer'], get_product_size(r['part_no']),
r['input_count'], r['require_count'],
r['step1_prod_qty'], r['step2_prod_qty'],
r['step3_prod_qty'], r['step4_prod_qty'],
r['step5_prod_qty']]
data.append(record)
if file_format == 'csv':
return data.csv
return data
|
{'level_mc': {'_txt': {'text': '6'},
'currentLabel': 'up',
'progress_mc': {'currentLabel': '_0'}}}
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProviderOperationsOperations(object):
"""ProviderOperationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ProviderOperationResult"]
"""Result of the request to list REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderOperationResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.devtestlabs.models.ProviderOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ProviderOperationResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.DevTestLab/operations'} # type: ignore
|
import os
from kivy.lang import Builder
from kivy.properties import NumericProperty, StringProperty
from kivy.uix.anchorlayout import AnchorLayout
from cobiv.modules.core.hud import Hud
Builder.load_file(os.path.abspath(os.path.join(os.path.dirname(__file__), 'progresshud.kv')))
class ProgressHud(Hud, AnchorLayout):
value = NumericProperty(0)
caption = StringProperty("")
def __init__(self, **kwargs):
super(ProgressHud, self).__init__(**kwargs)
|
"""Non-application-specific convenience methods for GPkit"""
import numpy as np
def te_exp_minus1(posy, nterm):
"""Taylor expansion of e^{posy} - 1
Arguments
---------
posy : gpkit.Posynomial
Variable or expression to exponentiate
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of e^{posy} - 1, carried to nterm terms
"""
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= i
res += posy**i / factorial_denom
return res
def te_secant(var, nterm):
"""Taylor expansion of secant(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of secant(x), carried to nterm terms
"""
# The first 12 Euler Numbers
E2n = np.asarray([1.0,
5,
61,
1385,
50521,
2702765,
199360981,
19391512145,
2404879675441,
370371188237525,
69348874393137901,
15514534163557086905])
if nterm > 12:
n_extend = np.asarray(range(13, nterm+1))
E2n_add = (8 * np.sqrt(n_extend/np.pi)
* (4*n_extend/(np.pi * np.exp(1)))**(2*n_extend))
E2n = np.append(E2n, E2n_add)
res = 1
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res = res + var**(2*i) * E2n[i-1] / factorial_denom
return res
def te_tangent(var, nterm):
"""Taylor expansion of tangent(var).
Arguments
---------
var : gpkit.monomial
Variable or expression argument
nterm : int
Number of non-constant terms in resulting Taylor expansion
Returns
-------
gpkit.Posynomial
Taylor expansion of tangent(x), carried to nterm terms
"""
if nterm > 15:
raise NotImplementedError("Tangent expansion not implemented above"
" 15 terms")
# The first 15 Bernoulli Numbers
B2n = np.asarray([1/6,
-1/30,
1/42,
-1/30,
5/66,
-691/2730,
7/6,
-3617/510,
43867/798,
-174611/330,
854513/138,
-236364091/2730,
8553103/6,
-23749461029/870,
8615841276005/14322])
res = 0
factorial_denom = 1
for i in range(1, nterm + 1):
factorial_denom *= ((2*i)*(2*i-1))
res += ((-1)**(i-1) * 2**(2*i) * (2**(2*i) - 1) *
B2n[i-1] / factorial_denom * var**(2*i-1))
return res
|
from django.contrib import admin
from modeltranslation.admin import TabbedTranslationAdmin
from .models import Person, Office, Tag
class PersonAdmin(TabbedTranslationAdmin):
list_display = ('name', 'surname', 'security_level', 'gender')
list_filter = ('security_level', 'tags', 'office', 'name', 'gender')
actions = ['copy_100']
def copy_100(self, request, queryset):
for item in queryset.all():
item.populate()
copy_100.short_description = 'Copy 100 objects with random data'
class PersonStackedInline(admin.TabularInline):
model = Person
extra = 0
class OfficeAdmin(admin.ModelAdmin):
inlines = (PersonStackedInline,)
list_display = ('office', 'address')
class TagAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Person, PersonAdmin)
admin.site.register(Office, OfficeAdmin)
admin.site.register(Tag, TagAdmin)
|
import atexit
from flask import Flask, jsonify, g, request, make_response
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_jwt_extended import JWTManager, set_access_cookies, jwt_required, unset_jwt_cookies
from apscheduler.schedulers.background import BackgroundScheduler
import logging
logging.basicConfig()
app = Flask(__name__)
app.config.from_pyfile('./config/config.py')
db = SQLAlchemy(app)
CORS(app, supports_credentials=True)
migrate = Migrate(app, db)
jwt = JWTManager(app)
from tasks.next_reservation_check import check_all_users
from DB.User import User
from endpoints import reservation, user
scheduler = BackgroundScheduler(daemon=True)
scheduler.add_job(check_all_users,'interval', hours=4)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
@app.route('/')
def index():
return "Hello, this is an API, Swagger documentation will follow here..."
@app.route('/token')
def get_auth_token():
if not request.authorization:
response = make_response(jsonify({'error':'Login required'}))
response.headers.set('WWW-Authenticate', 'Basic realm="patklaey.ch"')
return response, 401
if not verify_password(request.authorization.username, request.authorization.password):
response = jsonify({'error':'Invalid username or password'})
return response, 401
token = g.user.generate_auth_token()
response = jsonify({'token': token})
set_access_cookies(response, token)
return response, 200
def verify_password(username, password):
user = User.query.filter_by(username=username, active=True).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@app.route('/logout', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
|
from __future__ import absolute_import, division, print_function
from abc import abstractmethod, ABCMeta
from collections import OrderedDict
from ....core.tools.logging import log
from ..component import FittingComponent
class ModelGenerator(FittingComponent):
"""
This class...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(ModelGenerator, self).__init__()
# The dictionary with the parameter ranges
self.ranges = OrderedDict()
# The dictionary with the list of the model parameters
self.parameters = OrderedDict()
# -----------------------------------------------------------------
@property
def parameter_names(self):
"""
This function ...
:return:
"""
return self.ranges.keys()
# -----------------------------------------------------------------
@property
def nparameters(self):
"""
This function ...
:return:
"""
return len(self.ranges)
# -----------------------------------------------------------------
@property
def nmodels(self):
"""
This function ...
:return:
"""
return len(self.parameters[self.ranges.keys()[0]])
# -----------------------------------------------------------------
@property
def parameter_minima(self):
"""
This function ...
:return:
"""
minima = []
for name in self.ranges: minima.append(self.ranges[name].min)
# Return the minimal parameter values
return minima
# -----------------------------------------------------------------
@property
def parameter_maxima(self):
"""
This function ...
:return:
"""
maxima = []
for name in self.ranges: maxima.append(self.ranges[name].max)
# Return the maximal parameter values
return maxima
# -----------------------------------------------------------------
def add_parameter(self, name, par_range):
"""
This function ...
:param name:
:param par_range:
:return:
"""
self.ranges[name] = par_range
# -----------------------------------------------------------------
def run(self):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup()
# 2. Load the necessary input
self.load_input()
# 3. Initialize the animations
self.initialize_animations()
# 4. Generate the model parameters
self.generate()
# -----------------------------------------------------------------
def setup(self):
"""
This function ...
:return:
"""
# Call the setup of the base class
super(ModelGenerator, self).setup()
# -----------------------------------------------------------------
def load_input(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def initialize_animations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Initializing the animations ...")
# Initialize the scatter animation
self.scatter_animation = ScatterAnimation(self.ranges["FUV young"], self.ranges["FUV ionizing"],
self.ranges["Dust mass"])
self.scatter_animation.x_label = "FUV luminosity of young stars"
self.scatter_animation.y_label = "FUV luminosity of ionizing stars"
self.scatter_animation.z_label = "Dust mass"
# Initialize the young FUV luminosity distribution animation
self.fuv_young_animation = DistributionAnimation(self.ranges["FUV young"][0], self.ranges["FUV young"][1],
"FUV luminosity of young stars", "New models")
self.fuv_young_animation.add_reference_distribution("Previous models", self.distributions["FUV young"])
# Initialize the ionizing FUV luminosity distribution animation
self.fuv_ionizing_animation = DistributionAnimation(self.ranges["FUV ionizing"][0],
self.ranges["FUV ionizing"][1],
"FUV luminosity of ionizing stars", "New models")
self.fuv_ionizing_animation.add_reference_distribution("Previous models", self.distributions["FUV ionizing"])
# Initialize the dust mass distribution animation
self.dust_mass_animation = DistributionAnimation(self.ranges["Dust mass"][0], self.ranges["Dust mass"][1],
"Dust mass", "New models")
self.dust_mass_animation.add_reference_distribution("Previous models", self.distributions["Dust mass"])
# -----------------------------------------------------------------
@abstractmethod
def generate(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
def update_animations(self, young_luminosity, ionizing_luminosity, dust_mass):
"""
This function ...
:param young_luminosity:
:param ionizing_luminosity:
:param dust_mass:
:return:
"""
# Add the point (and thus a frame) to the animation of parameter points
self.scatter_animation.add_point(young_luminosity, ionizing_luminosity, dust_mass)
# Update the distribution animations
if self.nmodels > 1:
# Add a frame to the animation of the distribution of the FUV luminosity of young starss
self.fuv_young_animation.add_value(young_luminosity)
# Add a frame to the animation of the distribution of the FUV luminosity of ionizing stars
self.fuv_ionizing_animation.add_value(ionizing_luminosity)
# Add a frame to the animation of the distribution of the dust mass
self.dust_mass_animation.add_value(dust_mass)
|
from django.apps import AppConfig
class DonateAppConfig(AppConfig):
name = 'readthedocs.donate'
verbose_name = 'Donate'
def ready(self):
import readthedocs.donate.signals # noqa
|
from flask import Flask, Response, request
from twilio.util import TwilioCapability
app = Flask(__name__)
@app.route('/token', methods=['GET'])
def get_capability_token():
"""Respond to incoming requests."""
# Find these values at twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
capability = TwilioCapability(account_sid, auth_token)
# Twilio Application Sid
application_sid = 'APXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
capability.allow_client_outgoing(application_sid)
capability.allow_client_incoming(request.form["ClientName"])
token = capability.generate()
return Response(token, mimetype='application/jwt')
if __name__ == "__main__":
app.run(debug=True)
|
import os.path
import os
import argparse
import pickle
from util import *
from collections import defaultdict
import base64
import logging
import sys
import json
import shelve
from LangProc import docTerms
class DBMIndex:
pass
class BaseIndexer():
def __init__(self):
self.invertedIndex = defaultdict(list)
self.forwardIndex = dict()
self.idToUrl = dict() #url is too long
self.docCount =0
class ShelveIndexer():
def __init__(self):
self.invertedIndex = None
self.forwardIndex = None
self.idToUrl = None #url is too long
self.urlToId =dict()
self.docCount =0
def addDocument(self,url,parsedText):
assert url.encode("utf8") not in self.urlToId
self.docCount += 1
currentId = self.docCount
self.urlToId[url.encode("utf8")] = currentId
self.idToUrl[str(currentId)] = url;
self.forwardIndex[str(currentId)] = parsedText
for position,term in enumerate(parsedText):
stem = term.stem.encode("utf8")
documents = self.invertedIndex[stem] if stem in self.invertedIndex else []
documents.append((position,currentId))
self.invertedIndex[stem] = documents
def startIndexer(self,indexDir):
self.invertedIndex = shelve.open(os.path.join(indexDir,"invertedIndex"),'c')
self.forwardIndex = shelve.open(os.path.join(indexDir,"forwardIndex"),'c')
self.idToUrl = shelve.open(os.path.join(indexDir,"idToUrl"),'c')
def finishIndexer(self):
self.invertedIndex.close()
self.forwardIndex.close()
self.idToUrl.close()
def loadIndexer(self,indexDir):
self.invertedIndex = shelve.open(os.path.join(indexDir,"invertedIndex"),'r')
self.forwardIndex = shelve.open(os.path.join(indexDir,"forwardIndex"),'r')
self.idToUrl = shelve.open(os.path.join(indexDir,"idToUrl"),'r')
def getDocumentOfQuery(self,query):
return self.invertedIndex.get(query.stem.encode("utf8"),[])
def getDocumentOfId(self,id):
return self.forwardIndex.get(str(id),[])
def getUrl(self,id): # here we load all data from files thus the type is string !
return self.idToUrl[str(id)]
class MemoryIndexer():
def __init__(self):
self.invertedIndex = defaultdict(list)
self.forwardIndex = dict()
self.idToUrl = dict() #url is too long
self.docCount =0
# TOdo: remove this assumptions
# assumes that adddocument () is never called twice for a document
# assumes that a document has an unique url
# parsed text is a listed of terms
def addDocument(self,url,parsedText):
self.docCount += 1
currentId = self.docCount
self.idToUrl[currentId] = url;
self.forwardIndex[currentId] = parsedText
for position,term in enumerate(parsedText):
self.invertedIndex[term].append((position,currentId))
# dump as json
def dumpToDisk(self,IndexDir):
def pickleDumpToFile(source,fileName):
file = open(os.path.join(IndexDir,fileName),"w")
pickle.dump(source,file)
pickleDumpToFile(self.idToUrl,"idToUrl")
pickleDumpToFile(self.invertedIndex,"inverted")
pickleDumpToFile(self.forwardIndex,"forward")
def loadFromDisk(self,indexDir):
def pickleLoadFromFile(fileName):
file = open(os.path.join(indexDir,fileName),"r")
return pickle.load(file)
self.invertedIndex=pickleLoadFromFile("inverted")
self.idToUrl=pickleLoadFromFile("idToUrl")
self.forwardIndex=pickleLoadFromFile("forward")
def getDocumentOfQuery(self,query):
return self.invertedIndex.get(query,[])
def getDocumentOfId(self,id):
return self.forwardIndex.get(id,[])
def getUrl(self,id): # here we load all data from files thus the type is string !
return self.idToUrl[str(id)]
class Searcher():
def __init__(self,indexDir,implemention=ShelveIndexer):
self.index = implemention()
self.index.loadIndexer(indexDir)
def findDocument_AND(self,queryStr):
documentIdList = defaultdict(lambda:0)
for term in queryStr:
for id in set([item[1] for item in self.index.getDocumentOfQuery(term)]):
documentIdList[id] += 1
return [docId for docId,cnt in documentIdList.iteritems() if cnt ==len(queryStr)]
def getUrl(self,id):
return self.index.idToUrl[str(id)]
def getSnippets(self,queryStr,id):
currentWindow = [-1]*(len(queryStr))
keyLen = 0
minWindow = []
minSize = sys.maxint
bestIndenticaltermSize = 0
for pos,term in enumerate(self.index.getDocumentOfId(id)):
if term in queryStr:
currentWindow[queryStr.index(term)] = pos
if -1 not in currentWindow:
start = min(currentWindow)
end = pos
indenticaltermSize = len(set(self.index.getDocumentOfId(id)[start : end+1]))
if(minSize > end-start+1) or (indenticaltermSize > bestIndenticaltermSize and minSize+2 >= end-start+1):
minWindow = currentWindow[:]
minSize = end-start + 1
bestIndenticaltermSize = indenticaltermSize
docLength = len(self.index.getDocumentOfId(id))
snippetsStart = max(min(minWindow)-10,0)
snippetsEnd = min(docLength, max(minWindow)+1+10)
return [(term.originalWord,term in queryStr) for term in self.index.getDocumentOfId(id)[snippetsStart:snippetsEnd]] #excellent implemention:return list of truple make critical term be true in turple
'''
def createIndexDir(storedDocumentDir,indexDir):
indexer = MemoryIndexer()
indexCount = 0
for fileName in os.listdir(storedDocumentDir):
indexCount +=1
if indexCount % 100 ==0:
logging.info(u"Indexed {} documents".format(indexCount))
logging.info(u"Adding Document: {}".format(base64.b16decode(fileName)))
openFile = open(os.path.join(storedDocumentDir,fileName))
parsedText = docTerms(parseRedditPost(openFile.read()))
indexer.addDocument(base64.b16decode(fileName),parsedText)
indexer.dumpToDisk(indexDir)
'''
def createIndexDirApi(storedDocumentDir,indexDir,implemention=ShelveIndexer):
indexer = implemention()
indexer.startIndexer(indexDir)
indexCount = 0
for fileName in os.listdir(storedDocumentDir):
#logging.info(u"Adding Document: {}".format(base64.b16decode(fileName)))
openFile = open(os.path.join(storedDocumentDir,fileName))
try:
jsonFile = json.load(openFile)
parsedText = docTerms(jsonFile['text'])
indexer.addDocument(jsonFile['url'],parsedText)
indexCount +=1
if indexCount % 100 ==0:
logging.info(u"Indexed {} documents".format(indexCount))
except Exception as e:
logging.exception(e)
openFile.close()
indexer.finishIndexer()
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description = "Index/r/learnprogramming")
parser.add_argument("--storedDocumentDir", dest = "storedDocumentDir", required= True)
parser.add_argument("--indexDir", dest = "indexDir", required = True)
args = parser.parse_args()
createIndexDirApi(args.storedDocumentDir,args.indexDir)
if __name__ == "__main__": # if invoke from command line
main()
|
import mobula.layers as L
import numpy as np
def test_acc():
X = np.array([[0, 1, 2],
[1, 2, 0],
[0, 1, 2],
[1, 2, 0]])
Y = np.array([1, 0, 2, 1]).reshape((-1, 1))
# top-k
# 1 [False, False, True, True]
# 2 [True, True, True, True]
target = [np.array([False, False, True, True]), np.array([True, True, True, True])]
[data, label] = L.Data([X, Y])
for i in range(2):
l = L.Accuracy(data, label = label, top_k = 1 + i)
l.reshape()
l.forward()
assert l.Y == np.mean(target[i])
|
from distutils.core import setup
import strfrag
setup(
name='Strfrag',
version=strfrag.__version__,
description=('StringFragment type to represent parts of str objects; '
'used to avoid copying strings during processing'),
url="https://github.com/ludios/Strfrag",
author="Ivan Kozik",
author_email="ivan@ludios.org",
classifiers=[
'Programming Language :: Python :: 2',
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
],
py_modules=['strfrag', 'test_strfrag'],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.