code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Blockstore
~~~~~
copyright: (c) 2014 by Halfmoon Labs, Inc.
copyright: (c) 2015 by Blockstack.org
This file is part of Blockstore
Blockstore is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstore is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstore. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import logging
import os
import sys
import subprocess
import signal
import json
import datetime
import traceback
import httplib
import time
import socket
import math
from ConfigParser import SafeConfigParser
import pybitcoin
from txjsonrpc.netstring import jsonrpc
from lib import nameset as blockstore_state_engine
from lib import get_db_state
from lib.config import REINDEX_FREQUENCY, TESTSET, DEFAULT_DUST_FEE
from lib import *
import virtualchain
log = virtualchain.session.log
# global variables, for use with the RPC server and the twisted callback
blockstore_opts = None
bitcoind = None
bitcoin_opts = None
utxo_opts = None
blockchain_client = None
blockchain_broadcaster = None
indexer_pid = None
def get_bitcoind( new_bitcoind_opts=None, reset=False, new=False ):
"""
Get or instantiate our bitcoind client.
Optionally re-set the bitcoind options.
"""
global bitcoind
global bitcoin_opts
if reset:
bitcoind = None
elif not new and bitcoind is not None:
return bitcoind
if new or bitcoind is None:
if new_bitcoind_opts is not None:
bitcoin_opts = new_bitcoind_opts
new_bitcoind = None
try:
new_bitcoind = virtualchain.connect_bitcoind( bitcoin_opts )
if new:
return new_bitcoind
else:
# save for subsequent reuse
bitcoind = new_bitcoind
return bitcoind
except Exception, e:
log.exception( e )
return None
def get_bitcoin_opts():
"""
Get the bitcoind connection arguments.
"""
global bitcoin_opts
return bitcoin_opts
def get_utxo_opts():
"""
Get UTXO provider options.
"""
global utxo_opts
return utxo_opts
def get_blockstore_opts():
"""
Get blockstore configuration options.
"""
global blockstore_opts
return blockstore_opts
def set_bitcoin_opts( new_bitcoin_opts ):
"""
Set new global bitcoind operations
"""
global bitcoin_opts
bitcoin_opts = new_bitcoin_opts
def set_utxo_opts( new_utxo_opts ):
"""
Set new global chian.com options
"""
global utxo_opts
utxo_opts = new_utxo_opts
def get_pidfile_path():
"""
Get the PID file path.
"""
working_dir = virtualchain.get_working_dir()
pid_filename = blockstore_state_engine.get_virtual_chain_name() + ".pid"
return os.path.join( working_dir, pid_filename )
def get_tacfile_path():
"""
Get the TAC file path for our service endpoint.
Should be in the same directory as this module.
"""
working_dir = os.path.abspath(os.path.dirname(__file__))
tac_filename = blockstore_state_engine.get_virtual_chain_name() + ".tac"
return os.path.join( working_dir, tac_filename )
def get_logfile_path():
"""
Get the logfile path for our service endpoint.
"""
working_dir = virtualchain.get_working_dir()
logfile_filename = blockstore_state_engine.get_virtual_chain_name() + ".log"
return os.path.join( working_dir, logfile_filename )
def get_state_engine():
"""
Get a handle to the blockstore virtual chain state engine.
"""
return get_db_state()
def get_index_range():
"""
Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about.
"""
bitcoind_session = get_bitcoind( new=True )
first_block = None
last_block = None
while last_block is None:
first_block, last_block = virtualchain.get_index_range( bitcoind_session )
if last_block is None:
# try to reconnnect
time.sleep(1)
log.error("Reconnect to bitcoind")
bitcoind_session = get_bitcoind( new=True )
continue
else:
return first_block, last_block - NUM_CONFIRMATIONS
def die_handler_server(signal, frame):
"""
Handle Ctrl+C for server subprocess
"""
log.info('Exiting blockstored server')
stop_server()
sys.exit(0)
def die_handler_indexer(signal, frame):
"""
Handle Ctrl+C for indexer processe
"""
db = get_state_engine()
virtualchain.stop_sync_virtualchain( db )
sys.exit(0)
def json_traceback():
exception_data = traceback.format_exc().splitlines()
return {
"error": exception_data[-1],
"traceback": exception_data
}
def get_utxo_provider_client():
"""
Get or instantiate our blockchain UTXO provider's client.
Return None if we were unable to connect
"""
# acquire configuration (which we should already have)
blockstore_opts, blockchain_opts, utxo_opts, dht_opts = configure( interactive=False )
try:
blockchain_client = connect_utxo_provider( utxo_opts )
return blockchain_client
except:
log.exception(e)
return None
def get_tx_broadcaster():
"""
Get or instantiate our blockchain UTXO provider's transaction broadcaster.
fall back to the utxo provider client, if one is not designated
"""
# acquire configuration (which we should already have)
blockstore_opts, blockchain_opts, utxo_opts, dht_opts = configure( interactive=False )
# is there a particular blockchain client we want for importing?
if 'tx_broadcaster' not in blockstore_opts:
return get_utxo_provider_client()
broadcaster_opts = default_utxo_provider_opts( blockstore_opts['tx_broadcaster'] )
try:
blockchain_broadcaster = connect_utxo_provider( broadcaster_opts )
return blockchain_broadcaster
except:
log.exception(e)
return None
def get_name_cost( name ):
"""
Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return None if the namespace has not been declared
"""
db = get_state_engine()
namespace_id = get_namespace_from_name( name )
if namespace_id is None or len(namespace_id) == 0:
return None
namespace = db.get_namespace( namespace_id )
if namespace is None:
# maybe importing?
namespace = db.get_namespace_reveal( namespace_id )
if namespace is None:
# no such namespace
return None
name_fee = price_name( get_name_from_fq_name( name ), namespace )
return name_fee
class BlockstoredRPC(jsonrpc.JSONRPC):
"""
Blockstored not-quote-JSON-RPC server.
We say "not quite" because the implementation serves data
via Netstrings, not HTTP, and does not pay attention to
the 'id' or 'version' fields in the JSONRPC spec.
This endpoint does *not* talk to a storage provider, but only
serves back information from the blockstore virtual chain.
The client is responsible for resolving this information
to data, via an ancillary storage provider.
"""
def jsonrpc_ping(self):
reply = {}
reply['status'] = "alive"
return reply
def jsonrpc_lookup(self, name):
"""
Lookup the profile for a name.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockstore_state_engine = get_state_engine()
name_record = blockstore_state_engine.get_name( name )
if name is None:
return {"error": "Not found."}
else:
return name_record
def jsonrpc_getinfo(self):
"""
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
bitcoind = get_bitcoind()
info = bitcoind.getinfo()
reply = {}
reply['blocks'] = info['blocks']
db = get_state_engine()
reply['consensus'] = db.get_current_consensus()
return reply
def jsonrpc_preorder(self, name, register_addr, privatekey):
""" Preorder a name
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
db = get_state_engine()
consensus_hash = db.get_current_consensus()
if not consensus_hash:
# consensus hash must exist
return {"error": "Nameset snapshot not found."}
if db.is_name_registered( name ):
# name can't be registered
return {"error": "Name already registered"}
namespace_id = get_namespace_from_name( name )
if not db.is_namespace_ready( namespace_id ):
# namespace must be ready; otherwise this is a waste
return {"error": "Namespace is not ready"}
name_fee = get_name_cost( name )
log.debug("The price of '%s' is %s satoshis" % (name, name_fee))
try:
resp = preorder_name(str(name), str(register_addr), str(consensus_hash), str(privatekey), blockchain_client_inst, name_fee, testset=TESTSET)
except:
return json_traceback()
log.debug('preorder <%s, %s>' % (name, privatekey))
return resp
def jsonrpc_register(self, name, register_addr, privatekey, renewal_fee=None):
""" Register a name
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
log.info("name: %s" % name)
db = get_state_engine()
if db.is_name_registered( name ) and renewal_fee is None:
# *must* be given, so we don't accidentally charge
return {"error": "Name already registered"}
try:
resp = register_name(str(name), str(register_addr), str(privatekey), blockchain_client_inst, renewal_fee=renewal_fee, testset=TESTSET)
except:
return json_traceback()
return resp
def jsonrpc_update(self, name, data_hash, privatekey):
"""
Update a name with new data.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
log.debug('update <%s, %s, %s>' % (name, data_hash, privatekey))
blockchain_client_inst = get_utxo_provider_client()
db = get_state_engine()
consensus_hash = db.get_current_consensus()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
try:
resp = update_name(str(name), str(data_hash), str(consensus_hash), str(privatekey), blockchain_client_inst, testset=TESTSET)
except:
return json_traceback()
return resp
def jsonrpc_transfer(self, name, address, keep_data, privatekey):
""" Transfer a name
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
db = get_state_engine()
consensus_hash = db.get_current_consensus()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
try:
resp = transfer_name(str(name), str(address), bool(keep_data), str(consensus_hash), str(privatekey), blockchain_client_inst, testset=TESTSET)
except:
return json_traceback()
log.debug('transfer <%s, %s, %s>' % (name, address, privatekey))
return resp
def jsonrpc_renew(self, name, privatekey):
""" Renew a name
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
# renew the name for the caller
db = get_state_engine()
name_rec = db.get_name( name )
if name_rec is None:
return {"error": "Name is not registered"}
# renew to the caller
register_addr = name_rec['address']
renewal_fee = get_name_cost( name )
return self.jsonrpc_register( name, register_addr, privatekey, renewal_fee=renewal_fee )
def jsonrpc_revoke( self, name, privatekey ):
""" Revoke a name and all of its data.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
try:
resp = revoke_name(str(name), str(privatekey), blockchain_client_inst, testset=TESTSET)
except:
return json_traceback()
log.debug("revoke <%s>" % name )
return resp
def jsonrpc_name_import( self, name, recipient_address, update_hash, privatekey ):
"""
Import a name into a namespace.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
broadcaster_client_inst = get_tx_broadcaster()
if broadcaster_client_inst is None:
return {"error": "Failed to connect to blockchain transaction broadcaster"}
db = get_state_engine()
try:
resp = name_import( str(name), str(recipient_address), str(update_hash), str(privatekey), blockchain_client_inst, blockchain_broadcaster=broadcaster_client_inst, testset=TESTSET )
except:
return json_traceback()
log.debug("import <%s>" % name )
return resp
def jsonrpc_namespace_preorder( self, namespace_id, register_addr, privatekey ):
"""
Define the properties of a namespace.
Between the namespace definition and the "namespace begin" operation, only the
user who created the namespace can create names in it.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
db = get_state_engine()
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
consensus_hash = db.get_current_consensus()
namespace_fee = price_namespace( namespace_id )
log.debug("Namespace '%s' will cost %s satoshis" % (namespace_id, namespace_fee))
try:
resp = namespace_preorder( str(namespace_id), str(register_addr), str(consensus_hash), str(privatekey), blockchain_client_inst, namespace_fee, testset=TESTSET )
except:
return json_traceback()
log.debug("namespace_preorder <%s>" % (namespace_id))
return resp
def jsonrpc_namespace_reveal( self, namespace_id, register_addr, lifetime, coeff, base, bucket_exponents, nonalpha_discount, no_vowel_discount, privatekey ):
"""
Reveal and define the properties of a namespace.
Between the namespace definition and the "namespace begin" operation, only the
user who created the namespace can create names in it.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
try:
resp = namespace_reveal( str(namespace_id), str(register_addr), int(lifetime),
int(coeff), int(base), list(bucket_exponents),
int(nonalpha_discount), int(no_vowel_discount),
str(privatekey), blockchain_client_inst, testset=TESTSET )
except:
return json_traceback()
log.debug("namespace_reveal <%s, %s, %s, %s, %s, %s, %s>" % (namespace_id, lifetime, coeff, base, bucket_exponents, nonalpha_discount, no_vowel_discount))
return resp
def jsonrpc_namespace_ready( self, namespace_id, privatekey ):
"""
Declare that a namespace is open to accepting new names.
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
blockchain_client_inst = get_utxo_provider_client()
if blockchain_client_inst is None:
return {"error": "Failed to connect to blockchain UTXO provider"}
try:
resp = namespace_ready( str(namespace_id), str(privatekey), blockchain_client_inst, testset=TESTSET )
except:
return json_traceback()
log.debug("namespace_ready %s" % namespace_id )
return resp
def jsonrpc_get_name_cost( self, name ):
"""
Return the cost of a given name, including fees
Return value is in satoshis
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
if len(name) > LENGTHS['blockchain_id_name']:
return {"error": "Name too long"}
ret = get_name_cost( name )
if ret is None:
return {"error": "Unknown/invalid namespace"}
return {"satoshis": int(math.ceil(ret))}
def jsonrpc_get_namespace_cost( self, namespace_id ):
"""
Return the cost of a given namespace, including fees.
Return value is in satoshis
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
if len(namespace_id) > LENGTHS['blockchain_id_namespace_id']:
return {"error": "Namespace ID too long"}
ret = price_namespace(namespace_id)
return {"satoshis": int(math.ceil(ret))}
def jsonrpc_lookup_namespace( self, namespace_id ):
"""
Return the readied namespace with the given namespace_id
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
db = get_state_engine()
ns = db.get_namespace( namespace_id )
if ns is None:
return {"error": "No such ready namespace"}
else:
return ns
def jsonrpc_get_all_names( self, offset, count ):
"""
Return all names
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
db = get_state_engine()
return db.get_all_names( offset=offset, count=count )
def jsonrpc_get_names_in_namespace( self, namespace_id, offset, count ):
"""
Return all names in a namespace
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
db = get_state_engine()
return db.get_names_in_namespace( namespace_id, offset=offset, count=count )
def jsonrpc_get_consensus_at( self, block_id ):
"""
Return the consensus hash at a block number
"""
# are we doing our initial indexing?
if is_indexing():
return {"error": "Indexing blockchain"}
db = get_state_engine()
return db.get_consensus_at( block_id )
def run_indexer():
"""
Continuously reindex the blockchain, but as a subprocess.
"""
# set up this process
signal.signal( signal.SIGINT, die_handler_indexer )
signal.signal( signal.SIGQUIT, die_handler_indexer )
signal.signal( signal.SIGTERM, die_handler_indexer )
bitcoind_opts = get_bitcoin_opts()
_, last_block_id = get_index_range()
blockstore_state_engine = get_state_engine()
while True:
time.sleep( REINDEX_FREQUENCY )
virtualchain.sync_virtualchain( bitcoind_opts, last_block_id, blockstore_state_engine )
_, last_block_id = get_index_range()
return
def stop_server():
"""
Stop the blockstored server.
"""
global indexer_pid
# Quick hack to kill a background daemon
pid_file = get_pidfile_path()
try:
fin = open(pid_file, "r")
except Exception, e:
return
else:
pid_data = fin.read()
fin.close()
os.remove(pid_file)
pid = int(pid_data)
try:
os.kill(pid, signal.SIGKILL)
except Exception, e:
return
if indexer_pid is not None:
try:
os.kill(indexer_pid, signal.SIGTERM)
except Exception, e:
return
# stop building new state if we're in the middle of it
db = get_state_engine()
virtualchain.stop_sync_virtualchain( db )
set_indexing( False )
def get_indexing_lockfile():
"""
Return path to the indexing lockfile
"""
return os.path.join( virtualchain.get_working_dir(), "blockstore.indexing" )
def is_indexing():
"""
Is the blockstore daemon synchronizing with the blockchain?
"""
indexing_path = get_indexing_lockfile()
if os.path.exists( indexing_path ):
return True
else:
return False
def set_indexing( flag ):
"""
Set a flag in the filesystem as to whether or not we're indexing.
"""
indexing_path = get_indexing_lockfile()
if flag:
try:
fd = open( indexing_path, "w+" )
fd.close()
return True
except:
return False
else:
try:
os.unlink( indexing_path )
return True
except:
return False
def run_server( foreground=False ):
"""
Run the blockstored RPC server, optionally in the foreground.
"""
global indexer_pid
bt_opts = get_bitcoin_opts()
tac_file = get_tacfile_path()
access_log_file = get_logfile_path() + ".access"
indexer_log_file = get_logfile_path() + ".indexer"
pid_file = get_pidfile_path()
start_block, current_block = get_index_range()
argv0 = os.path.normpath( sys.argv[0] )
if os.path.exists("./%s" % argv0 ):
indexer_command = ("%s indexer" % (os.path.join( os.getcwd(), argv0))).split()
else:
# hope its in the $PATH
indexer_command = ("%s indexer" % argv0).split()
logfile = None
if not foreground:
api_server_command = ('twistd --pidfile=%s --logfile=%s -noy %s' % (pid_file,
access_log_file,
tac_file)).split()
try:
if os.path.exists( indexer_log_file ):
logfile = open( indexer_log_file, "a" )
else:
logfile = open( indexer_log_file, "a+" )
except OSError, oe:
log.error("Failed to open '%s': %s" % (indexer_log_file, oe.strerror))
sys.exit(1)
# become a daemon
child_pid = os.fork()
if child_pid == 0:
# child! detach, setsid, and make a new child to be adopted by init
sys.stdin.close()
os.dup2( logfile.fileno(), sys.stdout.fileno() )
os.dup2( logfile.fileno(), sys.stderr.fileno() )
os.setsid()
daemon_pid = os.fork()
if daemon_pid == 0:
# daemon!
os.chdir("/")
elif daemon_pid > 0:
# parent!
sys.exit(0)
else:
# error
sys.exit(1)
elif child_pid > 0:
# parent
# wait for child
pid, status = os.waitpid( child_pid, 0 )
sys.exit(status)
else:
# foreground
api_server_command = ('twistd --pidfile=%s -noy %s' % (pid_file, tac_file)).split()
# start API server
blockstored = subprocess.Popen( api_server_command, shell=False)
set_indexing( False )
if start_block != current_block:
# bring us up to speed
set_indexing( True )
blockstore_state_engine = get_state_engine()
virtualchain.sync_virtualchain( bt_opts, current_block, blockstore_state_engine )
set_indexing( False )
# fork the indexer
if foreground:
indexer = subprocess.Popen( indexer_command, shell=False )
else:
indexer = subprocess.Popen( indexer_command, shell=False, stdout=logfile, stderr=logfile )
indexer_pid = indexer.pid
# wait for the API server to die (we kill it with `blockstored stop`)
blockstored.wait()
# stop our indexer subprocess
indexer_pid = None
os.kill( indexer.pid, signal.SIGINT )
indexer.wait()
logfile.flush()
logfile.close()
# stop building new state if we're in the middle of it
db = get_state_engine()
virtualchain.stop_sync_virtualchain( db )
return blockstored.returncode
def setup( return_parser=False ):
"""
Do one-time initialization.
Call this to set up global state and set signal handlers.
If return_parser is True, return a partially-
setup argument parser to be populated with
subparsers (i.e. as part of main())
Otherwise return None.
"""
global blockstore_opts
global blockchain_client
global blockchain_broadcaster
global bitcoin_opts
global utxo_opts
global blockstore_opts
global dht_opts
# set up our implementation
virtualchain.setup_virtualchain( blockstore_state_engine )
# acquire configuration, and store it globally
blockstore_opts, bitcoin_opts, utxo_opts, dht_opts = configure( interactive=True )
# merge in command-line bitcoind options
config_file = virtualchain.get_config_filename()
arg_bitcoin_opts = None
argparser = None
if return_parser:
arg_bitcoin_opts, argparser = virtualchain.parse_bitcoind_args( return_parser=return_parser )
else:
arg_bitcoin_opts = virtualchain.parse_bitcoind_args( return_parser=return_parser )
# command-line overrides config file
for (k, v) in arg_bitcoin_opts.items():
bitcoin_opts[k] = v
# store options
set_bitcoin_opts( bitcoin_opts )
set_utxo_opts( utxo_opts )
if return_parser:
return argparser
else:
return None
def reconfigure():
"""
Reconfigure blockstored.
"""
configure( force=True )
print "Blockstore successfully reconfigured."
sys.exit(0)
def clean( confirm=True ):
"""
Remove blockstore's db, lastblock, and snapshot files.
Prompt for confirmation
"""
delete = False
exit_status = 0
if confirm:
warning = "WARNING: THIS WILL DELETE YOUR BLOCKSTORE DATABASE!\n"
warning+= "Are you sure you want to proceed?\n"
warning+= "Type 'YES' if so: "
value = raw_input( warning )
if value != "YES":
sys.exit(exit_status)
else:
delete = True
else:
delete = True
if delete:
print "Deleting..."
db_filename = virtualchain.get_db_filename()
lastblock_filename = virtualchain.get_lastblock_filename()
snapshots_filename = virtualchain.get_snapshots_filename()
for path in [db_filename, lastblock_filename, snapshots_filename]:
try:
os.unlink( path )
except:
log.warning("Unable to delete '%s'" % path)
exit_status = 1
sys.exit(exit_status)
def run_blockstored():
"""
run blockstored
"""
argparser = setup( return_parser=True )
# get RPC server options
subparsers = argparser.add_subparsers(
dest='action', help='the action to be taken')
parser_server = subparsers.add_parser(
'start',
help='start the blockstored server')
parser_server.add_argument(
'--foreground', action='store_true',
help='start the blockstored server in foreground')
parser_server = subparsers.add_parser(
'stop',
help='stop the blockstored server')
parser_server = subparsers.add_parser(
'reconfigure',
help='reconfigure the blockstored server')
parser_server = subparsers.add_parser(
'clean',
help='remove all blockstore database information')
parser_server.add_argument(
'--force', action='store_true',
help='Do not confirm the request to delete.')
parser_server = subparsers.add_parser(
'indexer',
help='run blockstore indexer worker')
args, _ = argparser.parse_known_args()
log.debug( "bitcoin options: %s" % bitcoin_opts )
if args.action == 'start':
if os.path.exists( get_pidfile_path() ):
log.error("Blockstored appears to be running already. If not, please run '%s stop'" % (sys.argv[0]))
sys.exit(1)
if args.foreground:
log.info('Initializing blockstored server in foreground ...')
exit_status = run_server( foreground=True )
log.info("Service endpoint exited with status code %s" % exit_status )
else:
log.info('Starting blockstored server ...')
run_server()
elif args.action == 'stop':
stop_server()
elif args.action == 'reconfigure':
reconfigure()
elif args.action == 'clean':
clean( not args.force )
elif args.action == 'indexer':
run_indexer()
if __name__ == '__main__':
run_blockstored()
|
john-light/blockstore
|
blockstore/blockstored.py
|
Python
|
gpl-3.0
| 32,472
|
# -*- coding: utf-8 -*-
"""This file contains the analysis plugin manager class."""
from __future__ import unicode_literals
from plaso.analysis import definitions
class AnalysisPluginManager(object):
"""Analysis plugin manager."""
_plugin_classes = {}
_PLUGIN_TYPE_STRINGS = {
definitions.PLUGIN_TYPE_ANNOTATION: (
'Annotation/Tagging plugin'),
definitions.PLUGIN_TYPE_ANOMALY: (
'Anomaly plugin'),
definitions.PLUGIN_TYPE_REPORT: (
'Summary/Report plugin'),
definitions.PLUGIN_TYPE_STATISTICS: (
'Statistics plugin')
}
_PLUGIN_TYPE_STRINGS.setdefault('Unknown type')
@classmethod
def DeregisterPlugin(cls, plugin_class):
"""Deregisters an analysis plugin class.
The analysis plugin classes are identified by their lower case name.
Args:
plugin_class (type): class of the analysis plugin.
Raises:
KeyError: if an analysis plugin class is not set for the corresponding
name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name not in cls._plugin_classes:
raise KeyError('Plugin class not set for name: {0:s}.'.format(
plugin_class.NAME))
del cls._plugin_classes[plugin_name]
# TODO: refactor to match parsers manager.
@classmethod
def GetAllPluginInformation(cls):
"""Retrieves a list of the registered analysis plugins.
Returns:
list[tuple[str, str, str]]: the name, docstring and type string of each
analysis plugin in alphabetical order.
"""
results = []
for plugin_class in cls._plugin_classes.values():
plugin_object = plugin_class()
# TODO: Use a specific description variable, not the docstring.
doc_string, _, _ = plugin_class.__doc__.partition('\n')
type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)
information_tuple = (plugin_object.plugin_name, doc_string, type_string)
results.append(information_tuple)
return sorted(results)
@classmethod
def GetPluginNames(cls):
"""Retrieves the analysis plugin names.
Returns:
list[str]: analysis plugin names.
"""
return sorted(cls._plugin_classes.keys())
@classmethod
def GetPluginObjects(cls, plugin_names):
"""Retrieves the plugin objects.
Args:
plugin_names (list[str]): names of plugins that should be retrieved.
Returns:
dict[str, AnalysisPlugin]: analysis plugins per name.
"""
plugin_objects = {}
for plugin_name, plugin_class in cls._plugin_classes.items():
if plugin_name not in plugin_names:
continue
plugin_objects[plugin_name] = plugin_class()
return plugin_objects
@classmethod
def GetPlugins(cls):
"""Retrieves the registered analysis plugin classes.
Yields:
tuple: containing:
str: name of the plugin
type: plugin class
"""
for plugin_name, plugin_class in cls._plugin_classes.items():
yield plugin_name, plugin_class
@classmethod
def RegisterPlugin(cls, plugin_class):
"""Registers an analysis plugin class.
Then analysis plugin classes are identified based on their lower case name.
Args:
plugin_class (type): class of the analysis plugin.
Raises:
KeyError: if an analysis plugin class is already set for the corresponding
name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name in cls._plugin_classes:
raise KeyError('Plugin class already set for name: {0:s}.'.format(
plugin_class.NAME))
cls._plugin_classes[plugin_name] = plugin_class
@classmethod
def RegisterPlugins(cls, plugin_classes):
"""Registers analysis plugin classes.
The analysis plugin classes are identified based on their lower case name.
Args:
plugin_classes (list[type]): classes of the analysis plugin.
Raises:
KeyError: if an analysis plugin class is already set for the corresponding
name.
"""
for plugin_class in plugin_classes:
cls.RegisterPlugin(plugin_class)
|
rgayon/plaso
|
plaso/analysis/manager.py
|
Python
|
apache-2.0
| 4,070
|
#!/usr/bin/env python
# -*- coding: utf-8, utf-16 -*-
# ^ The above line is a hack for python2 with its poor unicode implementation to even support compiling this test.
# So long as the utf-16 strings are also utf-u compatible (albeit nonsense), it will work with python2.
#
# In this test we set the defaultIREncoding to "ascii" to make sure we are actually working with the data correctly,
# and using the correct encoding (i.e. to_unicode would try to encode as ascii by default, so we are sure it is using the provided
# encoding, and not some default from sys or defaultIREncoding)
# Copyright (c) 2017 Timothy Savannah under LGPL version 2.1. See LICENSE for more information.
#
# TestIRUnicodeField - Test the IRUnicodeField
#
# Import and apply the properties (like Redis connection parameters) for this test.
import TestProperties
# vim: set ts=4 sw=4 st=4 expandtab
import sys
import subprocess
from IndexedRedis import IndexedRedisModel, irNull
from IndexedRedis.compat_str import tobytes, getDefaultIREncoding, setDefaultIREncoding, to_unicode
from IndexedRedis.fields import IRUnicodeField, IRField, IRUnicodeField
# vim: ts=4 sw=4 expandtab
class TestIRUnicodeField(object):
'''
TestIRUnicodeField - Test some IRUnicodeField stuff
'''
KEEP_DATA = False
def setup_class(self):
self.origIREncoding = getDefaultIREncoding()
setDefaultIREncoding('ascii') # Make sure IRField stuff would normally fail with utf-8 specific codes
self.prettyPicturesUtf8 = b' \xe2\x9c\x8f \xe2\x9c\x90 \xe2\x9c\x91 \xe2\x9c\x92 \xe2\x9c\x93 \xe2\x9c\x94 \xe2\x9c\x95 \xe2\x9c\x96 \xe2\x9c\x97 \xe2\x9c\x98 \xe2\x9c\x99 \xe2\x9c\x9a \xe2\x9c\x9b \xe2\x9c\x9c \xe2\x9c\x9d \xe2\x9c\x9e \xe2\x9c\x9f \xe2\x9c\xa0 \xe2\x9c\xa1 \xe2\x9c\xa2 \xe2\x9c\xa3 \xe2\x9c\xa4 \xe2\x9c\xa5 \xe2\x9c\xa6 \xe2\x9c\xa7 \xe2\x9c\xa9 \xe2\x9c\xaa \xe2\x9c\xab '
# Note - just use the bytes value and decode, rather than rely on the syntax parser to do it properly (python2 has issues)
self.utf16DataBytes = b'\xff\xfe\x01\xd8\x0f\xdc\x01\xd8-\xdc\x01\xd8;\xdc\x01\xd8+\xdc'
self.utf16Data = self.utf16DataBytes.decode('utf-16')
self.utf16Data2Bytes = b'\xff\xfe\x01\xd8\x88\xdc\x01\xd8\x9d\xdc\x01\xd8\x91\xdc\x01\xd8\x9b\xdc\x01\xd8\x90\xdc\x01\xd8\x98\xdc\x01\xd8\x95\xdc\x01\xd8\x96\xdc'
self.utf16Data2 = self.utf16Data2Bytes.decode('utf-16')
def teardown_class(self):
setDefaultIREncoding(self.origIREncoding)
def setup_method(self, testMethod):
'''
setup_method - Called before every method. Should set "self.model" to the model needed for the test.
@param testMethod - Instance method of test about to be called.
'''
self.model = None
if testMethod in (self.test_general, ):
class Model_GeneralUnicode(IndexedRedisModel):
FIELDS = [
IRField('name'),
IRUnicodeField('value', defaultValue=irNull, encoding='utf-8'),
]
INDEXED_FIELDS = ['name']
KEY_NAME='TestIRUnicodeField__GeneralUnicode'
self.model = Model_GeneralUnicode
elif testMethod == self.test_defaultValue:
class Model_UnicodeDefaultValue(IndexedRedisModel):
FIELDS = [
IRField('name'),
IRUnicodeField('value', defaultValue=u'qqq', encoding='utf-8'),
]
INDEXED_FIELDS = ['name']
KEY_NAME = 'TestIRUnicodeField__UnicodeDefaultValue'
self.model = Model_UnicodeDefaultValue
elif testMethod == self.test_utf16:
class Model_Utf16(IndexedRedisModel):
FIELDS = [
IRField('name'),
IRUnicodeField('value', encoding='utf-16', defaultValue=irNull),
IRUnicodeField('value2', encoding='utf-16', defaultValue=''),
]
INDEXED_FIELDS = ['name', 'value']
KEY_NAME='TestIRUnicodeField__ModelUtf16'
self.model = Model_Utf16
# If KEEP_DATA is False (debug flag), then delete all objects before so prior test doesn't interfere
if self.KEEP_DATA is False and self.model:
self.model.deleter.destroyModel()
def teardown_method(self, testMethod):
'''
teardown_method - Called after every method.
If self.model is set, will delete all objects relating to that model. To retain objects for debugging, set TestIRField.KEEP_DATA to True.
'''
setDefaultIREncoding(self.origIREncoding) # Revert back to utf-8 encoding
if self.model and self.KEEP_DATA is False:
self.model.deleter.destroyModel()
def test_general(self):
Model = self.model
prettyPicturesUtf8 = self.prettyPicturesUtf8
prettyPicturesUtf8Unicode = to_unicode(prettyPicturesUtf8, encoding='utf-8')
obj = Model()
updatedFields = obj.getUpdatedFields()
assert updatedFields == {} , 'Expected no updated fields when object is first created.\nExpected: %s\nGot: %s' %(repr([]), repr(updatedFields) )
assert obj.value == irNull , 'Expected default value of IRUnicodeField to be irNull when defaultValue=irNull'
obj.name = 'one'
obj.save()
assert obj.getUpdatedFields() == {} , 'Expected no updated fields after object is saved'
obj.value = prettyPicturesUtf8
assert obj.value == prettyPicturesUtf8Unicode , 'Expected IRUnicodeField value to be some unicode after setting'
try:
dictConverted = obj.asDict(forStorage=False, strKeys=True)
dictForStorage = obj.asDict(forStorage=True, strKeys=True)
except Exception as e:
raise AssertionError('Expected to be able to convert to dict for both storage and non-storage. Got exception: %s %s' %(e.__class__.__name__, str(e)))
assert dictConverted['value'] == prettyPicturesUtf8Unicode , 'Expected asDict(forStorage=False) to contain IRUnicodeField value as unicode string. Got: %s' %(repr(dictConverted['value']), )
assert dictForStorage['value'] == prettyPicturesUtf8 , 'Expected asDict(forStorage=True) to contain IRUnicodeField that was bytes.\nExpected: %s\nGot: %s' %(repr(prettyPicturesUtf8), repr(dictForStorage['value']) )
updatedFields = obj.getUpdatedFields()
assert 'value' in updatedFields , 'Expected "value" to show in updated fields after updating'
assert updatedFields['value'][0] == irNull , 'Expected old value to be irNull in updatedFields. Got: %s' %(repr(updatedFields['value'][0]), )
assert updatedFields['value'][1] == prettyPicturesUtf8Unicode , 'Expected converted value to be new value in updatedFields. Got: %s' %(repr(updatedFields['value'][1]), )
obj.save()
updatedFields = obj.getUpdatedFields()
assert updatedFields == {} , 'Expected updatedFields to be clear after saving.'
fetchObj = Model.objects.filter(name='one').first()
assert fetchObj , 'Expected to be able to fetch object on name="one" after saving.'
obj = fetchObj
assert obj.value == prettyPicturesUtf8Unicode , 'Expected value of fetched to be unicode string, %s. Got: %s' %(repr(prettyPicturesUtf8Unicode), repr(fetchObj.value), )
updatedFields = obj.getUpdatedFields()
assert updatedFields == {} , 'Expected updatedFields to be clear after fetching'
try:
dictConverted = obj.asDict(forStorage=False, strKeys=True)
dictForStorage = obj.asDict(forStorage=True, strKeys=True)
except Exception as e:
raise AssertionError('Expected to be able to convert to dict for both storage and non-storage. Got exception: %s %s' %(e.__class__.__name__, str(e)))
assert dictConverted['value'] == prettyPicturesUtf8Unicode, 'After fetching, Expected asDict(forStorage=False) to contain IRUnicodeField value as unicode string. Got: %s' %(dictConverted['value'], )
assert dictForStorage['value'] == prettyPicturesUtf8, 'After fetching, Expected asDict(forStorage=True) to contain IRUnicodeField as bytes.\nExpected: %s\nGot: %s' %(repr(prettyPicturesUtf8), repr(dictForStorage['value']) )
obj.value = b'q123'
updatedFields = obj.getUpdatedFields()
assert 'value' in updatedFields , 'Expected "value" to show in updated fields after updating on fetched object'
assert updatedFields['value'][0] == prettyPicturesUtf8Unicode , 'Expected old value to be b"Hello World" in updatedFields. Got: %s' %(repr(updatedFields['value'][0]), )
assert updatedFields['value'][1] == u'q123' , 'Expected converted value to be new value in updatedFields. Got: %s' %(repr(updatedFields['value'][1]), )
dictConverted = obj.asDict(forStorage=False, strKeys=True)
dictForStorage = obj.asDict(forStorage=True, strKeys=True)
assert dictConverted['value'] == u'q123', 'After fetching, then updating, Expected asDict(forStorage=False) to contain IRUnicodeField value as unicode string. Got: %s' %(dictConverted['value'], )
assert dictForStorage['value'] == b'q123' , 'After fetching, then updating, Expected asDict(forStorage=True) to contain IRUnicodeField as bytes.\nExpected: %s\nGot: %s' %(repr(u'q123'), repr(dictForStorage['value']) )
obj.save()
updatedFields = obj.getUpdatedFields()
assert updatedFields == {} , 'Expected updatedFields to be clear after saving'
def test_defaultValue(self):
Model = self.model
obj = Model()
assert obj.value == u'qqq' , 'Expected defaultValue to be applied to a unicode field.\nExpected: b"woobley"\nGot: %s' %(repr(obj.value), )
obj.name = 'test'
obj.save()
assert obj.value == u'qqq' , 'Expected defaultValue to remain on a unicode field after saving'
objFetched = Model.objects.filter(name='test').first()
assert objFetched , 'Expected to be able to fetch object'
obj = objFetched
assert obj.value == u'qqq' , 'Expected defaultValue to remain on a unicode field after fetching'
obj.value = 'cheesy'
obj.save()
objFetched = Model.objects.filter(name='test').first()
assert objFetched , 'Expected to be able to fetch object'
obj = objFetched
assert obj.value == u'cheesy' , 'Expected to be able to change value from default.'
def test_utf16(self):
Model = self.model
obj = Model()
assert obj.getUpdatedFields() == {} , 'Expected getUpdatedFields to be blank on new object'
obj.name = 'one'
assert obj.value == irNull , 'Expected defaultValue=irNull to be honoured'
assert obj.value2 == u'', 'Expected defaultValue="" to be honoured'
updatedFields = obj.getUpdatedFields()
assert list(updatedFields.keys()) == ['name'] , 'Expected just one entry on updatedFields when just "name" has been changed. Got: %s' %(repr(updatedFields), )
ids = obj.save()
assert ids and ids[0] , 'Failed to save objhect'
fetchedObj = Model.objects.filter(name='one').first()
assert fetchedObj , 'Failed to fetch object'
obj = fetchedObj
updatedFields = obj.getUpdatedFields()
assert updatedFields == {}, 'Expected updatedFields to be empty after fetch'
assert obj.value == irNull , 'Expected defaultValue=irNull to be retained after no change and save then fetch'
assert obj.value2 == u'', 'Expected defaultValue="" to be retained after no change and save then fetch'
obj.value = self.utf16Data
obj.save()
otherObj = Model()
otherObj.name = 'two'
otherObj.value = self.utf16Data2Bytes
assert otherObj.value == self.utf16Data2 , 'Expected utf-16 data provided as bytes to be converted to unicode string'
otherObj.save()
fetchedObj = Model.objects.filter(value=self.utf16Data).first()
assert fetchedObj , 'Failed to fetch object on utf-16 data'
obj = fetchedObj
assert obj.name == 'one' , 'Fetched wrong object on utf-16 data'
assert obj.value == self.utf16Data , 'Expected fetched object to contain the correct data'
assert obj.value.encode('utf-16') == self.utf16DataBytes , 'Expected data to be encodable back into bytes'
if __name__ == '__main__':
sys.exit(subprocess.Popen('GoodTests.py -n1 "%s" %s' %(sys.argv[0], ' '.join(['"%s"' %(arg.replace('"', '\\"'), ) for arg in sys.argv[1:]]) ), shell=True).wait())
# vim: set ts=4 sw=4 expandtab
|
kata198/indexedredis
|
tests/UnitTests/test_IRUnicodeField.py
|
Python
|
lgpl-2.1
| 12,858
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_ap_status
short_description: Configure access point status (rogue | accepted | suppressed) in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller feature and ap_status category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_ap_status:
description:
- Configure access point status (rogue | accepted | suppressed).
default: null
type: dict
suboptions:
bssid:
description:
- Access Point's (AP's) BSSID.
type: str
id:
description:
- AP ID.
required: true
type: int
ssid:
description:
- Access Point's (AP's) SSID.
type: str
status:
description:
- "Access Point's (AP's) status: rogue, accepted, or suppressed."
type: str
choices:
- rogue
- accepted
- suppressed
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure access point status (rogue | accepted | suppressed).
fortios_wireless_controller_ap_status:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wireless_controller_ap_status:
bssid: "<your_own_value>"
id: "4"
ssid: "<your_own_value>"
status: "rogue"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wireless_controller_ap_status_data(json):
option_list = ['bssid', 'id', 'ssid',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_ap_status(data, fos):
vdom = data['vdom']
state = data['state']
wireless_controller_ap_status_data = data['wireless_controller_ap_status']
filtered_data = underscore_to_hyphen(filter_wireless_controller_ap_status_data(wireless_controller_ap_status_data))
if state == "present":
return fos.set('wireless-controller',
'ap-status',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller',
'ap-status',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wireless_controller(data, fos):
if data['wireless_controller_ap_status']:
resp = wireless_controller_ap_status(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_ap_status": {
"required": False, "type": "dict", "default": None,
"options": {
"bssid": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"ssid": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["rogue", "accepted", "suppressed"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
kustodian/ansible
|
lib/ansible/modules/network/fortios/fortios_wireless_controller_ap_status.py
|
Python
|
gpl-3.0
| 10,160
|
'''
Integration Test for creating KVM VM in MN HA mode with one mn host, which MN-VM is running on, network shutdown and recovery.
@author: Mirabel
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import time
import os
vm = None
mn_host = None
def test():
global vm
global mn_host
test_stub.skip_if_scenario_is_multiple_networks()
mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(mn_host) != 1:
test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
test_util.test_logger("shutdown host's network [%s] that mn vm is running on" % (mn_host[0].ip_))
test_stub.shutdown_host_network(mn_host[0], test_lib.all_scenario_config)
test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
time.sleep(20)
new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
count = 60
while count > 0:
new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(new_mn_host) == 1:
test_util.test_logger("management node VM run after its former host down for 30s")
break
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
time.sleep(5)
count -= 1
if len(new_mn_host) == 0:
test_util.test_fail("management node VM does not run after its former host down for 30s")
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
#node_ops.wait_for_management_server_start()
test_stub.wrapper_of_wait_for_management_server_start(600)
test_stub.ensure_hosts_connected(exclude_host=[mn_host[0]])
test_stub.ensure_bss_host_connected_from_stop(test_lib.scenario_file, test_lib.all_scenario_config, test_lib.deploy_config)
test_stub.ensure_pss_connected()
test_stub.ensure_bss_connected()
test_stub.return_pass_ahead_if_3sites("TEST PASS")
vm = test_stub.create_basic_vm()
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called what ever test result is
def env_recover():
test_stub.reopen_host_network(mn_host[0], test_lib.all_scenario_config)
test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
#test_stub.recover_host(mn_host[0], test_lib.all_scenario_config, test_lib.deploy_config)
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/mn_ha/test_one_mn_host_network_down_create_vm.py
|
Python
|
apache-2.0
| 3,288
|
#!/usr/bin/env python
# coding: utf8
from bottle import route, run, request
import json
import requests
plug_host = plug_port = None
@route('/register', method='POST')
def reg():
plug_data = request.json
print "Registered: %s at %s:%s" % (plug_data['plug'], plug_data['host'], plug_data['port'])
global plug_host, plug_port
plug_host = plug_data['host']
plug_port = plug_data['port']
print "Pinging"
response = _plug('ping', {'ping': plug_data['plug']})
if 'pong' in response and response['pong'] == plug_data['plug']:
print "Pong OK"
else:
print "Pong ERROR, got: %s" % response
print "Calling process"
response = _plug('process', {'data': "Florin"})
print "Got: %s" % response['result']
def _plug(action, data):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
return requests.post("http://%s:%d/%s" % (plug_host, plug_port, action), data=json.dumps(data), headers=headers).json()
if __name__ == '__main__':
run(host='localhost', port=8000, debug=True)
|
talpah/cortana
|
plugins/hellopy/cortana.py
|
Python
|
mit
| 1,076
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a docstring"""
WEEKS = (((19 % 10) + 100) + (2 ** 8)) / 7
print WEEKS
|
eliz79/is210-week-03-warmup
|
task_02.py
|
Python
|
mpl-2.0
| 129
|
# coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for meta_dataset.learners.experimental.metric_learners."""
import gin.tf
from meta_dataset.learners import base_test
from meta_dataset.learners.experimental import optimization_learners
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
tf.compat.v1.experimental.output_all_intermediates(True)
def mock_sgd():
def init(x0):
return x0
def update(i, grad, state):
del i
x = state
return x - 0.01 * grad
def get_params(state):
x = state
return x
return init, update, get_params
optimization_learner_kwargs = {
'backprop_through_moments': True,
'input_shape': [84, 84, 3],
'logit_dim': 5,
'is_training': True,
'update_fn': mock_sgd,
'additional_evaluation_update_steps': 5,
'clip_grad_norm': 10.0,
'num_update_steps': 5,
}
class FirstOrderMAMLTest(base_test.TestEpisodicLearner):
learner_cls = optimization_learners.MAML
learner_kwargs = dict(
**optimization_learner_kwargs, **{
'transductive_batch_norm': False,
'proto_maml_fc_layer_init': False,
'zero_fc_layer_init': False,
'first_order': False,
'adapt_batch_norm': True,
})
class VanillaMAMLTest(base_test.TestEpisodicLearner):
learner_cls = optimization_learners.MAML
learner_kwargs = dict(
**optimization_learner_kwargs, **{
'transductive_batch_norm': True,
'proto_maml_fc_layer_init': False,
'zero_fc_layer_init': False,
'first_order': False,
'adapt_batch_norm': False,
})
class ProtoMAMLTest(base_test.TestEpisodicLearner):
def setUp(self):
super().setUp()
gin.bind_parameter('proto_maml_fc_layer_init_fn.prototype_multiplier', 1.0)
def tearDown(self):
gin.clear_config()
super().tearDown()
learner_cls = optimization_learners.MAML
learner_kwargs = dict(
**optimization_learner_kwargs, **{
'transductive_batch_norm': False,
'proto_maml_fc_layer_init': True,
'zero_fc_layer_init': False,
'first_order': False,
'adapt_batch_norm': True,
})
class FirstOrderANILTest(base_test.TestEpisodicLearner):
learner_cls = optimization_learners.ANIL
learner_kwargs = dict(
**optimization_learner_kwargs, **{
'transductive_batch_norm': False,
'proto_maml_fc_layer_init': False,
'zero_fc_layer_init': False,
'first_order': False,
'adapt_batch_norm': True,
})
class VanillaANILTest(base_test.TestEpisodicLearner):
learner_cls = optimization_learners.ANIL
learner_kwargs = dict(
**optimization_learner_kwargs, **{
'transductive_batch_norm': True,
'proto_maml_fc_layer_init': False,
'zero_fc_layer_init': False,
'first_order': False,
'adapt_batch_norm': False,
})
class ProtoANILTest(base_test.TestEpisodicLearner):
def setUp(self):
super().setUp()
gin.bind_parameter('proto_maml_fc_layer_init_fn.prototype_multiplier', 1.0)
def tearDown(self):
gin.clear_config()
super().tearDown()
learner_cls = optimization_learners.ANIL
learner_kwargs = dict(
**optimization_learner_kwargs, **{
'transductive_batch_norm': False,
'proto_maml_fc_layer_init': True,
'zero_fc_layer_init': False,
'first_order': False,
'adapt_batch_norm': True,
})
if __name__ == '__main__':
tf.test.main()
|
google-research/meta-dataset
|
meta_dataset/learners/experimental/optimization_learners_test.py
|
Python
|
apache-2.0
| 4,094
|
# ENVISIoN
#
# Copyright (c) 2018 Jesper Ericsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
import sys, os, inspect
import os, sys, inspect, inviwopy
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
import envisionpy
import envisionpy.hdf5parser
from envisionpy.processor_network.PCFNetworkHandler import PCFNetworkHandler
VASP_DIR = path_to_current_folder + "/../unit_testing/resources/LiC_pair_corr_func"
HDF5_FILE = path_to_current_folder + "/../demo_pcf.hdf5"
# Parse for charge density visualisation.
envisionpy.hdf5parser.paircorrelation(HDF5_FILE, VASP_DIR)
inviwopy.app.network.clear()
networkHandler = PCFNetworkHandler(HDF5_FILE, inviwopy.app)
|
rartino/ENVISIoN
|
demo/PCF.py
|
Python
|
bsd-2-clause
| 2,129
|
from dask.local import get_sync
from dask.threaded import get as get_threaded
from dask.callbacks import Callback
from dask.utils_test import add
def test_start_callback():
flag = [False]
class MyCallback(Callback):
def _start(self, dsk):
flag[0] = True
with MyCallback():
get_sync({'x': 1}, 'x')
assert flag[0] is True
def test_start_state_callback():
flag = [False]
class MyCallback(Callback):
def _start_state(self, dsk, state):
flag[0] = True
assert dsk['x'] == 1
assert len(state['cache']) == 1
with MyCallback():
get_sync({'x': 1}, 'x')
assert flag[0] is True
def test_finish_always_called():
flag = [False]
class MyCallback(Callback):
def _finish(self, dsk, state, errored):
flag[0] = True
assert errored
dsk = {'x': (lambda: 1 / 0,)}
# `raise_on_exception=True`
try:
with MyCallback():
get_sync(dsk, 'x')
except Exception as e:
assert isinstance(e, ZeroDivisionError)
assert flag[0]
# `raise_on_exception=False`
flag[0] = False
try:
with MyCallback():
get_threaded(dsk, 'x')
except Exception as e:
assert isinstance(e, ZeroDivisionError)
assert flag[0]
# KeyboardInterrupt
def raise_keyboard():
raise KeyboardInterrupt()
dsk = {'x': (raise_keyboard,)}
flag[0] = False
try:
with MyCallback():
get_sync(dsk, 'x')
except BaseException as e:
assert isinstance(e, KeyboardInterrupt)
assert flag[0]
def test_nested_schedulers():
class MyCallback(Callback):
def _start(self, dsk):
self.dsk = dsk
def _pretask(self, key, dsk, state):
assert key in self.dsk
inner_callback = MyCallback()
inner_dsk = {'x': (add, 1, 2),
'y': (add, 'x', 3)}
def nested_call(x):
assert not Callback.active
with inner_callback:
return get_threaded(inner_dsk, 'y') + x
outer_callback = MyCallback()
outer_dsk = {'a': (nested_call, 1),
'b': (add, 'a', 2)}
with outer_callback:
get_threaded(outer_dsk, 'b')
assert not Callback.active
assert outer_callback.dsk == outer_dsk
assert inner_callback.dsk == inner_dsk
assert not Callback.active
def test_add_remove_mutates_not_replaces():
assert not Callback.active
with Callback():
assert Callback.active
assert not Callback.active
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/dask/tests/test_callbacks.py
|
Python
|
gpl-3.0
| 2,569
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the EnsemblRelease class, which extends the Genome class
to be specific to (a particular release of) Ensembl.
"""
from weakref import WeakValueDictionary
from .genome import Genome
from .ensembl_release_versions import check_release_number, MAX_ENSEMBL_RELEASE
from .species import check_species_object, human
from .ensembl_url_templates import (
ENSEMBL_FTP_SERVER,
make_gtf_url,
make_fasta_url
)
class EnsemblRelease(Genome):
"""
Bundles together the genomic annotation and sequence data associated with
a particular release of the Ensembl database.
"""
@classmethod
def normalize_init_values(cls, release, species, server):
"""
Normalizes the arguments which uniquely specify an EnsemblRelease
genome.
"""
release = check_release_number(release)
species = check_species_object(species)
return (release, species, server)
# Using a WeakValueDictionary instead of an ordinary dict to prevent a
# memory leak in cases where we test many different releases in sequence.
# When all the references to a particular EnsemblRelease die then that
# genome should also be removed from this cache.
_genome_cache = WeakValueDictionary()
@classmethod
def cached(
cls,
release=MAX_ENSEMBL_RELEASE,
species=human,
server=ENSEMBL_FTP_SERVER):
"""
Construct EnsemblRelease if it's never been made before, otherwise
return an old instance.
"""
init_args_tuple = cls.normalize_init_values(release, species, server)
if init_args_tuple in cls._genome_cache:
genome = cls._genome_cache[init_args_tuple]
else:
genome = cls._genome_cache[init_args_tuple] = cls(*init_args_tuple)
return genome
def __init__(
self,
release=MAX_ENSEMBL_RELEASE,
species=human,
server=ENSEMBL_FTP_SERVER):
self.release, self.species, self.server = self.normalize_init_values(
release=release, species=species, server=server)
self.gtf_url = make_gtf_url(
ensembl_release=self.release,
species=self.species,
server=self.server)
self.transcript_fasta_urls = [
make_fasta_url(
ensembl_release=self.release,
species=self.species.latin_name,
sequence_type="cdna",
server=server),
make_fasta_url(
ensembl_release=self.release,
species=self.species.latin_name,
sequence_type="ncrna",
server=server)
]
self.protein_fasta_urls = [
make_fasta_url(
ensembl_release=self.release,
species=self.species.latin_name,
sequence_type="pep",
server=self.server)]
self.reference_name = self.species.which_reference(self.release)
Genome.__init__(
self,
reference_name=self.reference_name,
annotation_name="ensembl",
annotation_version=self.release,
gtf_path_or_url=self.gtf_url,
transcript_fasta_paths_or_urls=self.transcript_fasta_urls,
protein_fasta_paths_or_urls=self.protein_fasta_urls)
def install_string(self):
return "pyensembl install --release %d --species %s" % (
self.release,
self.species.latin_name)
def __str__(self):
return "EnsemblRelease(release=%d, species='%s')" % (
self.release,
self.species.latin_name)
def __eq__(self, other):
return (
other.__class__ is EnsemblRelease and
self.release == other.release and
self.species == other.species)
def __hash__(self):
return hash((self.release, self.species))
def to_dict(self):
return {
"release": self.release,
"species": self.species,
"server": self.server
}
@classmethod
def from_dict(cls, state_dict):
"""
Deserialize EnsemblRelease without creating duplicate instances.
"""
return cls.cached(**state_dict)
def cached_release(release, species="human"):
"""
Create an EnsemblRelease instance only if it's hasn't already been made,
otherwise returns the old instance.
Keeping this function for backwards compatibility but this functionality
has been moving into the cached method of EnsemblRelease.
"""
return EnsemblRelease.cached(release=release, species=species)
|
hammerlab/pyensembl
|
pyensembl/ensembl_release.py
|
Python
|
apache-2.0
| 5,224
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import BitcoinTestFramework
class CreateCache(BitcoinTestFramework):
# Test network and test nodes are not required:
def set_test_params(self):
self.num_nodes = 0
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
|
particl/particl-core
|
test/functional/create_cache.py
|
Python
|
mit
| 793
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from dependency_management.requirements.PipRequirement import PipRequirement
@linter(executable='dennis-cmd',
output_format='regex',
output_regex=r'(?P<message>(?P<severity>[EW])[0-9]{3}: .*)'
r'\n(?P<line>[0-9]+):.*\n(?P<end_line>[0-9]+):.*',
severity_map={'W': RESULT_SEVERITY.NORMAL,
'E': RESULT_SEVERITY.MAJOR})
class DennisBear:
"""
Lints your translated PO and POT files!
Check multiple lint rules on all the strings in the PO file
generating a list of errors and a list of warnings.
See http://dennis.readthedocs.io/en/latest/linting.html for
list of all error codes.
http://dennis.readthedocs.io/
"""
LANGUAGES = {'po', 'pot'}
REQUIREMENTS = {PipRequirement('dennis', '0.8'),
# Workaround for https://github.com/willkg/dennis/issues/91
PipRequirement('click', '6.6')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax'}
@staticmethod
def create_arguments(filename, file, config_file, allow_untranslated=True):
"""
:param allow_untranslated: Set to false to display unchanged
translation warning.
"""
if allow_untranslated:
return ('lint', filename, '--excluderules', 'W302')
else:
return ('lint', filename)
|
horczech/coala-bears
|
bears/gettext/DennisBear.py
|
Python
|
agpl-3.0
| 1,583
|
from redis import DataError
from redis.exceptions import ResponseError
from .exceptions import VersionMismatchException
from .query_result import QueryResult
class GraphCommands:
"""RedisGraph Commands"""
def commit(self):
"""
Create entire graph.
For more information see `CREATE <https://oss.redis.com/redisgraph/master/commands/#create>`_. # noqa
"""
if len(self.nodes) == 0 and len(self.edges) == 0:
return None
query = "CREATE "
for _, node in self.nodes.items():
query += str(node) + ","
query += ",".join([str(edge) for edge in self.edges])
# Discard leading comma.
if query[-1] == ",":
query = query[:-1]
return self.query(query)
def query(self, q, params=None, timeout=None, read_only=False, profile=False):
"""
Executes a query against the graph.
For more information see `GRAPH.QUERY <https://oss.redis.com/redisgraph/master/commands/#graphquery>`_. # noqa
Args:
-------
q :
The query.
params : dict
Query parameters.
timeout : int
Maximum runtime for read queries in milliseconds.
read_only : bool
Executes a readonly query if set to True.
profile : bool
Return details on results produced by and time
spent in each operation.
"""
# maintain original 'q'
query = q
# handle query parameters
if params is not None:
query = self._build_params_header(params) + query
# construct query command
# ask for compact result-set format
# specify known graph version
if profile:
cmd = "GRAPH.PROFILE"
else:
cmd = "GRAPH.RO_QUERY" if read_only else "GRAPH.QUERY"
command = [cmd, self.name, query, "--compact"]
# include timeout is specified
if timeout:
if not isinstance(timeout, int):
raise Exception("Timeout argument must be a positive integer")
command += ["timeout", timeout]
# issue query
try:
response = self.execute_command(*command)
return QueryResult(self, response, profile)
except ResponseError as e:
if "wrong number of arguments" in str(e):
print(
"Note: RedisGraph Python requires server version 2.2.8 or above"
) # noqa
if "unknown command" in str(e) and read_only:
# `GRAPH.RO_QUERY` is unavailable in older versions.
return self.query(q, params, timeout, read_only=False)
raise e
except VersionMismatchException as e:
# client view over the graph schema is out of sync
# set client version and refresh local schema
self.version = e.version
self._refresh_schema()
# re-issue query
return self.query(q, params, timeout, read_only)
def merge(self, pattern):
"""
Merge pattern.
For more information see `MERGE <https://oss.redis.com/redisgraph/master/commands/#merge>`_. # noqa
"""
query = "MERGE "
query += str(pattern)
return self.query(query)
def delete(self):
"""
Deletes graph.
For more information see `DELETE <https://oss.redis.com/redisgraph/master/commands/#delete>`_. # noqa
"""
self._clear_schema()
return self.execute_command("GRAPH.DELETE", self.name)
# declared here, to override the built in redis.db.flush()
def flush(self):
"""
Commit the graph and reset the edges and the nodes to zero length.
"""
self.commit()
self.nodes = {}
self.edges = []
def explain(self, query, params=None):
"""
Get the execution plan for given query,
Returns an array of operations.
For more information see `GRAPH.EXPLAIN <https://oss.redis.com/redisgraph/master/commands/#graphexplain>`_. # noqa
Args:
-------
query:
The query that will be executed.
params: dict
Query parameters.
"""
if params is not None:
query = self._build_params_header(params) + query
plan = self.execute_command("GRAPH.EXPLAIN", self.name, query)
return "\n".join(plan)
def bulk(self, **kwargs):
"""Internal only. Not supported."""
raise NotImplementedError(
"GRAPH.BULK is internal only. "
"Use https://github.com/redisgraph/redisgraph-bulk-loader."
)
def profile(self, query):
"""
Execute a query and produce an execution plan augmented with metrics
for each operation's execution. Return a string representation of a
query execution plan, with details on results produced by and time
spent in each operation.
For more information see `GRAPH.PROFILE <https://oss.redis.com/redisgraph/master/commands/#graphprofile>`_. # noqa
"""
return self.query(query, profile=True)
def slowlog(self):
"""
Get a list containing up to 10 of the slowest queries issued
against the given graph ID.
For more information see `GRAPH.SLOWLOG <https://oss.redis.com/redisgraph/master/commands/#graphslowlog>`_. # noqa
Each item in the list has the following structure:
1. A unix timestamp at which the log entry was processed.
2. The issued command.
3. The issued query.
4. The amount of time needed for its execution, in milliseconds.
"""
return self.execute_command("GRAPH.SLOWLOG", self.name)
def config(self, name, value=None, set=False):
"""
Retrieve or update a RedisGraph configuration.
For more information see `GRAPH.CONFIG <https://oss.redis.com/redisgraph/master/commands/#graphconfig>`_. # noqa
Args:
name : str
The name of the configuration
value :
The value we want to ser (can be used only when `set` is on)
set : bool
Turn on to set a configuration. Default behavior is get.
"""
params = ["SET" if set else "GET", name]
if value is not None:
if set:
params.append(value)
else:
raise DataError(
"``value`` can be provided only when ``set`` is True"
) # noqa
return self.execute_command("GRAPH.CONFIG", *params)
def list_keys(self):
"""
Lists all graph keys in the keyspace.
For more information see `GRAPH.LIST <https://oss.redis.com/redisgraph/master/commands/#graphlist>`_. # noqa
"""
return self.execute_command("GRAPH.LIST")
|
alisaifee/redis-py
|
redis/commands/graph/commands.py
|
Python
|
mit
| 6,937
|
#!Measurement
# all of this is configuration info that can be used in the script.
# you refer to these values using mx.<group>.<attribute>
# e.g
# mx.baseline.counts is 180
# mx.multicollect.detector is H1
'''
baseline:
after: true
before: false
counts: 180
detector: H1
mass: 34.2
settling_time: 15
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime: true
multicollect:
counts: 400
detector: H1
isotope: Ar40
peakcenter:
after: true
before: false
detector: H1
detectors:
- H1
- AX
- CDD
isotope: Ar40
peakhop:
hops_name: ''
use_peak_hop: false
'''
# entry point for the script
def main():
# print a message to the user
info('unknown measurement script')
# activate the following detectors. measurements will be plotted and save for these detectors
activate_detectors('H2', 'H1', 'AX', 'L1', 'L2', 'CDD')
# position the magnet with Ar40 on H1
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
# choose where to get the equilibration duration from
# sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
# start the equilibration thread
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
# set time zero after equilibrate returns i.e after the ion pump valve closes
set_time_zero()
# record/plot the equilibration
sniff(eqt)
# set the default fits
set_fits()
set_baseline_fits()
# multicollect on active detectors for 400
multicollect(ncounts=mx.multicollect.counts)
if mx.baseline.after:
# do a baseline measurement
baselines(ncounts=mx.baseline.counts, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
# do a peak center scan and update the mftable with new peak centers
activate_detectors(*mx.peakcenter.detectors, **{'peak_center': True})
peak_center(detector=mx.peakcenter.detector, isotope=mx.peakcenter.isotope)
# print a message to the user
info('finished measure script')
|
USGSDenverPychron/pychron
|
docs/user_guide/operation/scripts/examples/basic.py
|
Python
|
apache-2.0
| 2,487
|
import os
import shutil
import sys
import glob
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
version = 'x.y.z'
if os.path.exists('VERSION'):
version = open('VERSION').read().strip()
setup(
name='saffrontree',
version=version,
description='SaffronTree: Reference free rapid phylogenetic tree construction from raw read data',
long_description=read('README.md'),
packages = find_packages(),
package_data={'saffrontree': ['example_data/fastas/*', 'example_data/fastqs/*']},
author='Andrew J. Page',
author_email='path-help@sanger.ac.uk',
url='https://github.com/sanger-pathogens/saffrontree',
scripts=glob.glob('scripts/*'),
test_suite='nose.collector',
tests_require=['nose >= 1.3'],
install_requires=[
'dendropy >= 4.1.0',
'biopython >= 1.68',
'pyfastaq >= 3.12.0',
],
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
],
)
|
andrewjpage/saffrontree
|
setup.py
|
Python
|
gpl-3.0
| 1,270
|
from setuptools import setup
setup(
name = 'md2remark',
py_modules = ['md2remark_runner'],
packages = ['md2remark', 'md2remark.resources', 'md2remark.resources.templates'],
package_data = {'md2remark.resources.templates': ['*.mustache']},
install_requires = ['pystache==0.5.4'],
version = '0.1.0',
description = 'Builds a slideshow from markdown using remark.js.',
long_description = open('README.rst', 'r').read(),
author = 'Patrick Ayoup',
author_email = 'patrick.ayoup@gmail.com',
license = 'MIT',
url = 'https://github.com/patrickayoup/md2remark/',
download_url = 'https://github.com/patrickayoup/md2remark/tarball/0.1.0',
keywords = ['markdown', 'slideshow'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Utilities'
],
entry_points = {
'console_scripts': [
'md2remark = md2remark_runner:run'
]
},
)
|
patrickayoup/md2remark
|
setup.py
|
Python
|
mit
| 1,102
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderTransaction.currency'
db.add_column('datacash_ordertransaction', 'currency',
self.gf('django.db.models.fields.CharField')(default=settings.DATACASH_CURRENCY, max_length=12),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OrderTransaction.currency'
db.delete_column('datacash_ordertransaction', 'currency')
models = {
'datacash.fraudresponse': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'FraudResponse'},
'aggregator_identifier': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merchant_identifier': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'merchant_order_ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'message_digest': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'raw_response': ('django.db.models.fields.TextField', [], {}),
'recommendation': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {}),
't3m_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
'datacash.ordertransaction': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'OrderTransaction'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'auth_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'datacash_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merchant_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'order_number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_xml': ('django.db.models.fields.TextField', [], {}),
'response_xml': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['datacash']
|
django-oscar/django-oscar-datacash
|
datacash/migrations/0004_auto__add_field_ordertransaction_currency.py
|
Python
|
bsd-3-clause
| 3,342
|
"""
EM Routines
"""
|
Patrick-Cole/pygmi
|
pygmi/em/__init__.py
|
Python
|
gpl-3.0
| 21
|
import random
import sys
import __builtin__
from externals.moduleman.plugin import moduleman_plugin
from framework.core.myexception import FuzzException
from framework.fuzzer.base import wfuzz_iterator
from framework.plugins.api import search_bing
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class ipnet:
name = "ipnet"
description = "Returns list of IP addresses of a given network. ie. 192.168.1.0/24"
category = ["default"]
priority = 99
def __init__(self, network):
try:
import ipaddress
net = ipaddress.ip_network(u'%s' % network)
self.f = net.hosts()
self.__count = net.num_addresses - 2
if self.__count <= 0:
raise FuzzException(FuzzException.FATAL, "There are not hosts in the specified network")
except ValueError:
raise FuzzException(FuzzException.FATAL, "The specified network has an incorrect format.")
except ImportError:
raise FuzzException(FuzzException.FATAL, "ipnet plugin requires ipaddress module. Please install it using pip.")
def next(self):
return str(self.f.next())
def count(self):
return self.__count
def __iter__ (self):
return self
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class file:
name = "file"
description = "Returns each word from a file."
category = ["default"]
priority = 99
def __init__(self, filename):
try:
self.f = open(filename,"r")
except IOError:
raise FuzzException(FuzzException.FATAL, "Error opening file")
self.__count = len(self.f.readlines())
self.f.seek(0)
def next (self):
return self.f.next().strip()
def count(self):
return self.__count
def __iter__ (self):
return self
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class range:
name = "range"
description = "Returns each number of the given range. ie. 0-10"
category = ["default"]
priority = 99
def __init__(self, whatrange): ## range example --> "23-56"
try:
ran = whatrange.split("-")
self.minimum = int(ran[0])
self.maximum = int(ran[1])
self.__count = self.maximum - self.minimum + 1
self.width = len(ran[0])
self.current = self.minimum
except:
raise FuzzException(FuzzException.FATAL, "Bad range format (eg. \"23-56\")")
def next(self):
if self.current>self.maximum:
raise StopIteration
else:
if self.width:
payl = "%0"+ str(self.width) + "d"
payl = payl % (self.current)
else:
payl = str(self.current)
self.current += 1
return payl
def count(self):
return self.__count
def __iter__(self):
return self
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class hexrange:
name = "hexrange"
description = "Returns each hex number of the given hex range. ie. 00-ff"
category = ["default"]
priority = 99
def __init__(self, prange): ## range example --> "0-ffa"
try:
ran = prange.split("-")
self.minimum = int(ran[0],16)
self.maximum = int(ran[1],16)
self.__count = self.maximum - self.minimum + 1
self.current = self.minimum
except:
raise Exception, "Bad range format (eg. \"0-ffa\")"
def __iter__(self):
return self
def count(self):
return self.__count
def next(self):
if self.current > self.maximum:
raise StopIteration
lgth=len(hex(self.maximum).replace("0x",""))
pl="%"+str(lgth)+"s"
num=hex(self.current).replace("0x","")
pl= pl % (num)
payl=pl.replace(" ","0")
self.current+=1
return payl
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class hexrand:
name = "hexrand"
description = "Returns random hex numbers."
category = ["default"]
priority = 99
def __init__(self, prange): ## range example --> "0-ffa"
try:
ran = prange.split("-")
self.minimum=int(ran[0],16)
self.maximum=int(ran[1],16)
self.__count=-1
except:
raise Exception, "Bad range format (eg. \"0-ffa\")"
def __iter__ (self):
return self
def count(self):
return self.__count
def next (self):
self.current = random.SystemRandom().randint(self.minimum,self.maximum)
lgth = len(hex(self.maximum).replace("0x",""))
pl="%"+str(lgth)+"s"
num = hex(self.current).replace("0x","")
pl = pl % (num)
payl =pl.replace(" ","0")
return payl
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class buffer_overflow:
name = "buffer_overflow"
description = "Returns a string using the following pattern A * given number."
category = ["default"]
priority = 99
def __init__(self, n):
self.l = ['A' * int(n)]
self.current = 0
def __iter__(self):
return self
def count(self):
return 1
def next (self):
if self.current == 0:
elem = self.l[self.current]
self.current+=1
return elem
else:
raise StopIteration
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class list:
name = "list"
description = "Returns each element of the given word list separated by -. ie word1-word2"
category = ["default"]
priority = 99
def __init__(self, l):
if l.find("\\") >= 0:
l = l.replace("\\-", "$SEP$")
l = l.replace("\\\\", "$SCAP$")
self.l = l.split("-")
for i in __builtin__.range(len(self.l)):
self.l[i] = self.l[i].replace("$SEP$", "-")
self.l[i] = self.l[i].replace("$SCAP$", "\\")
else:
self.l = l.split("-")
self.__count = len(self.l)
self.current = 0
def __iter__ (self):
return self
def count(self):
return self.__count
def next (self):
if self.current >= self.__count:
raise StopIteration
else:
elem = self.l[self.current]
self.current += 1
return elem
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class stdin:
name = "stdin"
description = "Returns each item read from stdin."
category = ["default"]
priority = 99
def __init__(self, deprecated):
# stdin is unseekable
self.__count = -1
#self.__count=len(sys.stdin.readlines())
#sys.stdin.seek(0)
def count(self):
return self.__count
def __iter__ (self):
return self
def next (self):
#line=sys.stdin.next().strip().split(':')
line = sys.stdin.next().strip()
return line
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class names:
name = "names"
description = "Returns possible usernames by mixing the given words, separated by -, using known typical constructions. ie. jon-smith"
category = ["default"]
priority = 99
def __init__(self, startnames):
self.startnames = startnames
from sets import Set
possibleusernames = []
name = ""
llist = self.startnames.split("-")
for x in llist:
if name == "":
name = name + x
else:
name = name + " " + x
if " " in name:
parts = name.split()
possibleusernames.append(parts[0])
possibleusernames.append(parts[0]+"."+parts[1])
possibleusernames.append(parts[0]+parts[1])
possibleusernames.append(parts[0]+"."+parts[1][0])
possibleusernames.append(parts[0][0]+"."+parts[1])
possibleusernames.append(parts[0]+parts[1][0])
possibleusernames.append(parts[0][0]+parts[1])
str1=""
str2=""
str3=""
str4=""
for i in __builtin__.range(0,len(parts)-1):
str1=str1+parts[i]+"."
str2=str2+parts[i]
str3=str3+parts[i][0]+"."
str4=str4+parts[i][0]
str5=str1+parts[-1]
str6=str2+parts[-1]
str7=str4+parts[-1]
str8=str3+parts[-1]
str9=str2+parts[-1][0]
str10=str4+parts[-1][0]
possibleusernames.append(str5)
possibleusernames.append(str6)
possibleusernames.append(str7)
possibleusernames.append(str8)
possibleusernames.append(str9)
possibleusernames.append(str10)
possibleusernames.append(parts[-1])
possibleusernames.append(parts[0]+"."+parts[-1])
possibleusernames.append(parts[0]+parts[-1])
possibleusernames.append(parts[0]+"."+parts[-1][0])
possibleusernames.append(parts[0][0]+"."+parts[-1])
possibleusernames.append(parts[0]+parts[-1][0])
possibleusernames.append(parts[0][0]+parts[-1])
else:
possibleusernames.append(name)
self.creatednames=possibleusernames
self.__count=len(possibleusernames)
def count(self):
return self.__count
def __iter__(self):
return self
def next(self):
if self.creatednames:
payl = self.creatednames.pop()
return payl
else:
raise StopIteration
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class permutation:
name = "permutation"
description = "Returns permutations of the given charset and length. ie. abc-2"
category = ["default"]
priority = 99
def __init__(self, prange): ## range example --> "abcdef-4"
self.charset = []
try:
ran = prange.split("-")
self.charset = ran[0]
self.width = int(ran[1])
except:
raise Exception, "Bad range format (eg. \"abfdeg-3\")"
pset = []
for x in self.charset:
pset.append(x)
words = self.xcombinations(pset, self.width)
self.lista = []
for x in words:
self.lista.append(''.join(x))
self.__count = len(self.lista)
def __iter__ (self):
return self
def count(self):
return self.__count
def next (self):
if self.lista != []:
payl=self.lista.pop()
return payl
else:
raise StopIteration
def xcombinations(self, items, n):
if n == 0:
yield []
else:
try:
for i in xrange(len(items)):
for cc in self.xcombinations(items[:i] + items[i:], n - 1):
yield [items[i]] + cc
except:
print "Interrupted Permutation calculations"
sys.exit()
@wfuzz_iterator
@moduleman_plugin("count", "next", "__iter__")
class bing:
'''
Some examples of bing hacking:
- http://www.elladodelmal.com/2010/02/un-poco-de-bing-hacking-i-de-iii.html
'''
name = "bing"
description = "Returns URL results of a given bing API search (needs api key). ie, intitle:\"JBoss JMX Management Console\"-10"
category = ["default"]
priority = 99
def __init__(self, dork):
self.l = search_bing(dork)
self.__count = len(self.l)
self.current = 0
def __iter__ (self):
return self
def count(self):
return self.__count
def next (self):
if self.current >= self.__count:
raise StopIteration
else:
elem = self.l[self.current]['Url']
self.current += 1
return str(elem.strip())
|
Bladefidz/wfuzz
|
plugins/payloads.py
|
Python
|
gpl-2.0
| 10,373
|
# Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
# pylint: disable=wrong-import-order,ungrouped-imports
import gi
from gi.repository import Gdk
from gi.repository import Gtk
from virtinst import log
# We can use either 2.91 or 2.90. This is just to silence runtime warnings
try:
gi.require_version("Vte", "2.91")
log.debug("Using VTE API 2.91")
except ValueError: # pragma: no cover
gi.require_version("Vte", "2.90")
log.debug("Using VTE API 2.90")
from gi.repository import Vte
import libvirt
from ..baseclass import vmmGObject
class _DataStream(vmmGObject):
"""
Wrapper class for interacting with libvirt console stream
"""
def __init__(self, vm):
vmmGObject.__init__(self)
self.vm = vm
self.conn = vm.conn
self._stream = None
self._streamToTerminal = b""
self._terminalToStream = ""
def _cleanup(self):
self.close()
self.vm = None
self.conn = None
#################
# Internal APIs #
#################
def _display_data(self, terminal):
if not self._streamToTerminal:
return # pragma: no cover
terminal.feed(self._streamToTerminal)
self._streamToTerminal = b""
def _event_on_stream(self, stream, events, opaque):
ignore = stream
terminal = opaque
if (events & libvirt.VIR_EVENT_HANDLE_ERROR or
events & libvirt.VIR_EVENT_HANDLE_HANGUP): # pragma: no cover
log.debug("Received stream ERROR/HANGUP, closing console")
self.close()
return
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
try:
got = self._stream.recv(1024 * 100)
except Exception: # pragma: no cover
log.exception("Error receiving stream data")
self.close()
return
if got == -2: # pragma: no cover
# This is basically EAGAIN
return
if len(got) == 0:
log.debug("Received EOF from stream, closing")
self.close()
return
queued_text = bool(self._streamToTerminal)
self._streamToTerminal += got
if not queued_text:
self.idle_add(self._display_data, terminal)
if (events & libvirt.VIR_EVENT_HANDLE_WRITABLE and
self._terminalToStream):
try:
done = self._stream.send(self._terminalToStream.encode())
except Exception: # pragma: no cover
log.exception("Error sending stream data")
self.close()
return
if done == -2: # pragma: no cover
# This is basically EAGAIN
return
self._terminalToStream = self._terminalToStream[done:]
if not self._terminalToStream:
self._stream.eventUpdateCallback(libvirt.VIR_STREAM_EVENT_READABLE |
libvirt.VIR_STREAM_EVENT_ERROR |
libvirt.VIR_STREAM_EVENT_HANGUP)
##############
# Public API #
##############
def open(self, dev, terminal):
if self._stream:
return
name = dev and dev.alias.name or None
log.debug("Opening console stream for dev=%s alias=%s",
dev, name)
# libxl doesn't set aliases, their open_console just defaults to
# opening the first console device, so don't force prescence of
# an alias
stream = self.conn.get_backend().newStream(libvirt.VIR_STREAM_NONBLOCK)
self.vm.open_console(name, stream)
self._stream = stream
self._stream.eventAddCallback((libvirt.VIR_STREAM_EVENT_READABLE |
libvirt.VIR_STREAM_EVENT_ERROR |
libvirt.VIR_STREAM_EVENT_HANGUP),
self._event_on_stream,
terminal)
def close(self):
if self._stream:
try:
self._stream.eventRemoveCallback()
except Exception: # pragma: no cover
log.exception("Error removing stream callback")
try:
self._stream.finish()
except Exception: # pragma: no cover
log.exception("Error finishing stream")
self._stream = None
def send_data(self, src, text, length, terminal):
"""
Callback when data has been entered into VTE terminal
"""
ignore = src
ignore = length
ignore = terminal
if self._stream is None:
return # pragma: no cover
self._terminalToStream += text
if self._terminalToStream:
self._stream.eventUpdateCallback(libvirt.VIR_STREAM_EVENT_READABLE |
libvirt.VIR_STREAM_EVENT_WRITABLE |
libvirt.VIR_STREAM_EVENT_ERROR |
libvirt.VIR_STREAM_EVENT_HANGUP)
class vmmSerialConsole(vmmGObject):
@staticmethod
def can_connect(vm, dev):
"""
Check if we think we can actually open passed console/serial dev
"""
usable_types = ["pty"]
ctype = dev.type
err = ""
if not vm.is_active():
err = _("Serial console not available for inactive guest")
elif ctype not in usable_types:
err = (_("Console for device type '%s' is not supported") % ctype)
return err
@staticmethod
def get_serialcon_devices(vm):
serials = vm.xmlobj.devices.serial
consoles = vm.xmlobj.devices.console
if serials and vm.serial_is_console_dup(serials[0]):
consoles.pop(0)
return serials + consoles
def __init__(self, vm, target_port, name):
vmmGObject.__init__(self)
self.vm = vm
self.target_port = target_port
self.name = name
self.lastpath = None
self._datastream = _DataStream(self.vm)
self._serial_popup = None
self._serial_copy = None
self._serial_paste = None
self._init_popup()
self._vteterminal = None
self._init_terminal()
self._box = None
self._error_label = None
self._init_ui()
self.vm.connect("state-changed", self._vm_status_changed)
def _cleanup(self):
self._datastream.cleanup()
self._datastream = None
self.vm = None
self._vteterminal = None
self._box = None
###########
# UI init #
###########
def _init_terminal(self):
self._vteterminal = Vte.Terminal()
self._vteterminal.set_scrollback_lines(1000)
self._vteterminal.set_audible_bell(False)
self._vteterminal.get_accessible().set_name("Serial Terminal")
self._vteterminal.connect("button-press-event",
self._show_serial_rcpopup)
self._vteterminal.connect("commit",
self._datastream.send_data, self._vteterminal)
self._vteterminal.show()
def _init_popup(self):
self._serial_popup = Gtk.Menu()
self._serial_popup.get_accessible().set_name("serial-popup-menu")
self._serial_copy = Gtk.MenuItem.new_with_mnemonic(_("_Copy"))
self._serial_copy.connect("activate", self._serial_copy_text)
self._serial_popup.add(self._serial_copy)
self._serial_paste = Gtk.MenuItem.new_with_mnemonic(_("_Paste"))
self._serial_paste.connect("activate", self._serial_paste_text)
self._serial_popup.add(self._serial_paste)
def _init_ui(self):
self._box = Gtk.Notebook()
self._box.set_show_tabs(False)
self._box.set_show_border(False)
align = Gtk.Box()
align.set_border_width(2)
evbox = Gtk.EventBox()
evbox.modify_bg(Gtk.StateType.NORMAL, Gdk.Color(0, 0, 0))
terminalbox = Gtk.HBox()
scrollbar = Gtk.VScrollbar()
self._error_label = Gtk.Label()
self._error_label.set_width_chars(40)
self._error_label.set_line_wrap(True)
if self._vteterminal:
scrollbar.set_adjustment(self._vteterminal.get_vadjustment())
align.add(self._vteterminal)
evbox.add(align)
terminalbox.pack_start(evbox, True, True, 0)
terminalbox.pack_start(scrollbar, False, False, 0)
self._box.append_page(terminalbox, Gtk.Label(""))
self._box.append_page(self._error_label, Gtk.Label(""))
self._box.show_all()
scrollbar.hide()
scrollbar.get_adjustment().connect(
"changed", self._scrollbar_adjustment_changed, scrollbar)
###################
# Private methods #
###################
def _show_error(self, msg):
self._error_label.set_markup("<b>%s</b>" % msg)
self._box.set_current_page(1)
def _lookup_dev(self):
devs = vmmSerialConsole.get_serialcon_devices(self.vm)
found = None
for dev in devs:
port = dev.get_xml_idx()
path = dev.source.path
if port == self.target_port:
if path != self.lastpath:
log.debug("Serial console '%s' path changed to %s",
self.target_port, path)
self.lastpath = path
found = dev
break
if not found: # pragma: no cover
log.debug("No devices found for serial target port '%s'",
self.target_port)
self.lastpath = None
return found
##############
# Public API #
##############
def close(self):
if self._datastream:
self._datastream.close()
def get_box(self):
return self._box
def has_focus(self):
return bool(self._vteterminal and
self._vteterminal.get_property("has-focus"))
def set_focus_callbacks(self, in_cb, out_cb):
self._vteterminal.connect("focus-in-event", in_cb)
self._vteterminal.connect("focus-out-event", out_cb)
def open_console(self):
try:
dev = self._lookup_dev()
self._datastream.open(dev, self._vteterminal)
self._box.set_current_page(0)
return True
except Exception as e:
log.exception("Error opening serial console")
self._show_error(_("Error connecting to text console: %s") % e)
try:
self._datastream.close()
except Exception: # pragma: no cover
pass
return False
################
# UI listeners #
################
def _vm_status_changed(self, vm):
if vm.status() in [libvirt.VIR_DOMAIN_RUNNING]:
self.open_console()
else:
self._datastream.close()
def _scrollbar_adjustment_changed(self, adjustment, scrollbar):
scrollbar.set_visible(
adjustment.get_upper() > adjustment.get_page_size())
def _show_serial_rcpopup(self, src, event):
if event.button != 3:
return
self._serial_popup.show_all()
if src.get_has_selection():
self._serial_copy.set_sensitive(True)
else:
self._serial_copy.set_sensitive(False)
self._serial_popup.popup_at_pointer(event)
def _serial_copy_text(self, src_ignore):
self._vteterminal.copy_clipboard()
def _serial_paste_text(self, src_ignore):
self._vteterminal.paste_clipboard()
|
crobinso/virt-manager
|
virtManager/details/serialcon.py
|
Python
|
gpl-2.0
| 11,857
|
# -*- coding: utf-8 -*-
## This file is part of Gertrude.
##
## Gertrude is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## Gertrude is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gertrude; if not, see <http://www.gnu.org/licenses/>.
class Change:
def __init__(self, instance, member, value):
self.instance, self.member, self.value = instance, member, value
def Undo(self):
exec('self.instance.%s = self.value' % self.member)
class Delete:
def __init__(self, instance, index):
self.instance, self.index = instance, index
def Undo(self):
self.instance[self.index].delete()
del self.instance[self.index]
class Insert:
def __init__(self, instance, index, value):
self.instance, self.index, self.value = instance, index, value
def Undo(self):
if isinstance(self.instance, list):
self.instance.insert(self.index, self.value)
else:
self.instance[self.index] = self.value
class Call:
def __init__(self, function, args=None):
self.function = function
self.args = args
def Undo(self):
if self.args is None:
self.function()
else:
self.function(self.args)
class History(list):
def __init__(self):
list.__init__(self)
def Undo(self, count=1):
result = 0
for i in range(count):
if len(self) > 0:
actions = self[-1]
if actions is None:
return result
self.pop(-1)
for action in actions:
action.Undo()
result += 1
return result
def Append(self, actions):
if actions is not None and not isinstance(actions, list):
actions = [actions]
self.append(actions)
def Last(self):
if len(self) > 0:
return self[-1]
else:
return None
def Clear(self):
del self[:]
|
studio1247/gertrude
|
history.py
|
Python
|
gpl-3.0
| 2,500
|
from django.test import SimpleTestCase, TestCase
from corehq.util.es.elasticsearch import ConnectionError
from eulxml.xpath import parse as parse_xpath
from casexml.apps.case.mock import CaseFactory, CaseIndex, CaseStructure
from pillowtop.es_utils import initialize_index_and_mapping
from corehq.apps.case_search.filter_dsl import (
CaseFilterError,
build_filter_from_ast,
)
from corehq.apps.es import CaseSearchES
from corehq.apps.es.tests.utils import ElasticTestMixin, es_test
from corehq.elastic import get_es_new, send_to_elasticsearch
from corehq.form_processor.tests.utils import FormProcessorTestUtils
from corehq.pillows.case_search import transform_case_for_elasticsearch
from corehq.pillows.mappings.case_search_mapping import CASE_SEARCH_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import trap_extra_setup
@es_test
class TestFilterDsl(ElasticTestMixin, SimpleTestCase):
def test_simple_filter(self):
parsed = parse_xpath("name = 'farid'")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "farid"
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast("domain", parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
def test_date_comparison(self):
parsed = parse_xpath("dob >= '2017-02-12'")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "dob"
}
}
],
"must": {
"range": {
"case_properties.value.date": {
"gte": "2017-02-12"
}
}
}
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_numeric_comparison(self):
parsed = parse_xpath("number <= '100.32'")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "number"
}
}
],
"must": {
"range": {
"case_properties.value.numeric": {
"lte": 100.32
}
}
}
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_numeric_comparison_negative(self):
parsed = parse_xpath("number <= -100.32")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "number"
}
}
],
"must": {
"range": {
"case_properties.value.numeric": {
"lte": -100.32
}
}
}
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_numeric_equality_negative(self):
parsed = parse_xpath("number = -100.32")
expected_filter = {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"term": {
"case_properties.key.exact": "number"
}
},
{
"term": {
"case_properties.value.exact": -100.32
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast("domain", parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
def test_case_property_existence(self):
parsed = parse_xpath("property != ''")
expected_filter = {
"bool": {
"must_not": {
"bool": {
"should": [
{
"bool": {
"must_not": {
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "property"
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "property"
}
},
{
"term": {
"case_properties.value.exact": ""
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
]
}
}
}
}
self.checkQuery(expected_filter, build_filter_from_ast("domain", parsed), is_raw_query=True)
def test_nested_filter(self):
parsed = parse_xpath("(name = 'farid' or name = 'leila') and dob <= '2017-02-11'")
expected_filter = {
"bool": {
"filter": [
{
"bool": {
"should": [
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "farid"
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "name"
}
},
{
"term": {
"case_properties.value.exact": "leila"
}
}
]
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
]
}
},
{
"nested": {
"path": "case_properties",
"query": {
"bool": {
"filter": [
{
"term": {
"case_properties.key.exact": "dob"
}
}
],
"must": {
"range": {
"case_properties.value.date": {
"lte": "2017-02-11"
}
}
}
}
}
}
}
]
}
}
built_filter = build_filter_from_ast("domain", parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
def test_self_reference(self):
with self.assertRaises(CaseFilterError):
build_filter_from_ast(None, parse_xpath("name = other_property"))
with self.assertRaises(CaseFilterError):
build_filter_from_ast(None, parse_xpath("name > other_property"))
with self.assertRaises(CaseFilterError):
build_filter_from_ast(None, parse_xpath("parent/name > other_property"))
@es_test
class TestFilterDslLookups(ElasticTestMixin, TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
super(TestFilterDslLookups, cls).setUpClass()
with trap_extra_setup(ConnectionError):
cls.es = get_es_new()
initialize_index_and_mapping(cls.es, CASE_SEARCH_INDEX_INFO)
cls.child_case_id = 'margaery'
cls.parent_case_id = 'mace'
cls.grandparent_case_id = 'olenna'
cls.domain = "Tyrell"
factory = CaseFactory(domain=cls.domain)
grandparent_case = CaseStructure(
case_id=cls.grandparent_case_id,
attrs={
'create': True,
'case_type': 'grandparent',
'update': {
"name": "Olenna",
"alias": "Queen of thorns",
"house": "Tyrell",
},
})
parent_case = CaseStructure(
case_id=cls.parent_case_id,
attrs={
'create': True,
'case_type': 'parent',
'update': {
"name": "Mace",
"house": "Tyrell",
},
},
indices=[CaseIndex(
grandparent_case,
identifier='mother',
relationship='child',
)])
child_case = CaseStructure(
case_id=cls.child_case_id,
attrs={
'create': True,
'case_type': 'child',
'update': {
"name": "Margaery",
"house": "Tyrell",
},
},
indices=[CaseIndex(
parent_case,
identifier='father',
relationship='extension',
)],
)
for case in factory.create_or_update_cases([child_case]):
send_to_elasticsearch('case_search', transform_case_for_elasticsearch(case.to_json()))
cls.es.indices.refresh(CASE_SEARCH_INDEX_INFO.index)
@classmethod
def tearDownClass(self):
FormProcessorTestUtils.delete_all_cases()
ensure_index_deleted(CASE_SEARCH_INDEX_INFO.index)
super(TestFilterDslLookups, self).tearDownClass()
def test_parent_lookups(self):
parsed = parse_xpath("father/name = 'Mace'")
# return all the cases who's parent (relationship named 'father') has case property 'name' = 'Mace'
expected_filter = {
"nested": {
"path": "indices",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"terms": {
"indices.referenced_id": [self.parent_case_id]
}
},
{
"term": {
"indices.identifier": "father"
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast(self.domain, parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
self.assertEqual([self.child_case_id], CaseSearchES().filter(built_filter).values_list('_id', flat=True))
def test_nested_parent_lookups(self):
parsed = parse_xpath("father/mother/house = 'Tyrell'")
expected_filter = {
"nested": {
"path": "indices",
"query": {
"bool": {
"filter": [
{
"bool": {
"filter": (
{
"terms": {
"indices.referenced_id": [self.parent_case_id]
}
},
{
"term": {
"indices.identifier": "father"
}
}
)
}
}
],
"must": {
"match_all": {}
}
}
}
}
}
built_filter = build_filter_from_ast(self.domain, parsed)
self.checkQuery(expected_filter, built_filter, is_raw_query=True)
self.assertEqual([self.child_case_id], CaseSearchES().filter(built_filter).values_list('_id', flat=True))
|
dimagi/commcare-hq
|
corehq/apps/case_search/tests/test_filter_dsl.py
|
Python
|
bsd-3-clause
| 21,077
|
from setuptools import setup, find_packages
setup(
name = "afed_donations",
version = "1.0",
url = 'http://github.com/steveandroulakis/afed_donations',
license = 'BSD',
description = "Tool for viewing donations to Australian political parties",
author = 'Steve Androulakis',
packages = find_packages(),
install_requires = ['setuptools',
'django==1.4.3',
'south==0.7.6'],
)
|
steveandroulakis/afed_donations
|
setup.py
|
Python
|
apache-2.0
| 454
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from nose.tools import eq_
from .utils import TestCase
from issue2branch.issue import Issue
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class TextTests(TestCase):
def setUp(self):
self.issue = Issue('the_issue_id', 'the_issue_title')
self.mock_colorize = self.patch('issue2branch.issue.colorize')
self.mock_colorize.side_effect = lambda *a: ("colorize({})"
.format(",".join(a)))
def test_default_text(self):
eq_(self.issue.text(),
'the_issue_id -colorize( Issue: ,Issue)the_issue_title')
def test_priority_text(self):
self.issue.priority = 'the_priority'
eq_(self.issue.text(),
'the_issue_id - [colorize(The_priority)] -colorize( Issue: ,Issue)the_issue_title')
def test_status_text(self):
self.issue.status = 'the_status'
eq_(self.issue.text(),
'the_issue_id - [colorize(The_status)] -colorize( Issue: ,Issue)the_issue_title')
def test_priority_status_text(self):
self.issue.status = 'the_status'
self.issue.priority = 'the_priority'
eq_(self.issue.text(),
'the_issue_id - [colorize(The_priority)/colorize(The_status)] -colorize( Issue: ,Issue)the_issue_title')
def test_assignee_text(self):
self.issue.assignee = 'the_assignee'
eq_(self.issue.text(),
'the_issue_id -colorize( Issue: ,Issue)the_issue_title - \033[32m(the_assignee)\033[0m')
def test_project_text(self):
self.issue.project = 'the_project'
eq_(self.issue.text(),
'the_issue_id -\033[35m {the_project}\033[0mcolorize( Issue: ,Issue)the_issue_title')
def test_issue_branch():
issue = Issue('the_id', 'the_title', tag='the_tag')
eq_(issue.branch(), 'the_tag-the_id-the_title')
|
pignacio/issue2branch
|
test/test_issue.py
|
Python
|
gpl-3.0
| 1,979
|
"""
Sampling along tracks
---------------------
The :func:`pygmt.grdtrack` function samples a raster grid's value along specified
points. We will need to input a 2D raster to ``grid`` which can be an
:class:`xarray.DataArray`. The argument passed to the ``points`` parameter can be a
:class:`pandas.DataFrame` table where the first two columns are x and y (or longitude
and latitude). Note also that there is a ``newcolname`` parameter that will be used to
name the new column of values sampled from the grid.
Alternatively, a NetCDF file path can be passed to ``grid``. An ASCII file path can
also be accepted for ``points``. To save an output ASCII file, a file name argument
needs to be passed to the ``outfile`` parameter.
"""
import pygmt
# Load sample grid and point datasets
grid = pygmt.datasets.load_earth_relief()
points = pygmt.datasets.load_ocean_ridge_points()
# Sample the bathymetry along the world's ocean ridges at specified track points
track = pygmt.grdtrack(points=points, grid=grid, newcolname="bathymetry")
fig = pygmt.Figure()
# Plot the earth relief grid on Cylindrical Stereographic projection, masking land areas
fig.basemap(region="g", projection="Cyl_stere/150/-20/15c", frame=True)
fig.grdimage(grid=grid, cmap="gray")
fig.coast(land="#666666")
# Plot the sampled bathymetry points using circles (c) of 0.15 cm size
# Points are colored using elevation values (normalized for visual purposes)
fig.plot(
x=track.longitude,
y=track.latitude,
style="c0.15c",
cmap="terra",
color=(track.bathymetry - track.bathymetry.mean()) / track.bathymetry.std(),
)
fig.show()
|
GenericMappingTools/gmt-python
|
examples/gallery/images/track_sampling.py
|
Python
|
bsd-3-clause
| 1,614
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import List, Optional
from sqlalchemy.exc import SQLAlchemyError
from superset.dao.base import BaseDAO
from superset.dao.exceptions import DAODeleteFailedError
from superset.extensions import db
from superset.models.sql_lab import SavedQuery
from superset.queries.saved_queries.filters import SavedQueryFilter
logger = logging.getLogger(__name__)
class SavedQueryDAO(BaseDAO):
model_cls = SavedQuery
base_filter = SavedQueryFilter
@staticmethod
def bulk_delete(models: Optional[List[SavedQuery]], commit: bool = True) -> None:
item_ids = [model.id for model in models] if models else []
try:
db.session.query(SavedQuery).filter(SavedQuery.id.in_(item_ids)).delete(
synchronize_session="fetch"
)
if commit:
db.session.commit()
except SQLAlchemyError as ex:
if commit:
db.session.rollback()
raise DAODeleteFailedError() from ex
|
apache/incubator-superset
|
superset/queries/saved_queries/dao.py
|
Python
|
apache-2.0
| 1,792
|
# -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from fixture import DataSet
class AidPERSONIDDATAData(DataSet):
class AidPERSONIDDATA_1_uid:
tag = u'uid'
personid = 1L
data = u'2'
class AidPERSONIDDATA_2_uid:
tag = u'uid'
personid = 2L
data = u'1'
class AidPERSONIDDATA_3_uid:
tag = u'uid'
personid = 3L
data = u'4'
class AidPERSONIDDATA_4_uid:
tag = u'uid'
personid = 4L
data = u'5'
class AidPERSONIDDATA_5_uid:
tag = u'uid'
personid = 5L
data = u'6'
class AidPERSONIDDATA_6_uid:
tag = u'uid'
personid = 6L
data = u'7'
class AidPERSONIDDATA_7_uid:
tag = u'uid'
personid = 7L
data = u'8'
__all__ = ('AidPERSONIDDATAData', )
|
dset0x/inspire-next
|
inspire/base/fixtures/bibauthorid.py
|
Python
|
gpl-2.0
| 1,571
|
import os, sys, fileinput, re
if sys.argv[1] == None or os.path.exists(sys.argv[1])==False:
print "Usage: %s file_to_analyze" % sys.argv[0]
sys.exit()
class portRec:
srcIP=0
srcPort=0
dstPort=0
timeRecv=0
order=0
timeSent=0
intPort=0
def __init__(self):
pass
def __str__(self):
return "[int]:%05d -> %s:%05d -> [srv]:%05d order:%05d sent: %s recv: %s" % (int(self.intPort), self.srcIP, int(self.srcPort), int(self.dstPort), int(self.order), self.timeSent, self.timeRecv)
def csv(self):
return "%05d;%s;%05d;%05d;%05d;%s;%s" % (int(self.intPort), self.srcIP, int(self.srcPort), int(self.dstPort), int(self.order), self.timeSent, self.timeRecv)
# compiled regular expressions
flineRe = re.compile(r"^[\d]+:[\d]+:[\d]+\.[\d]+ IP \(tos 0x0, ttl") # regex for first line record
addrsRe = re.compile(r"^([\d]+\.[\d]+\.[\d]+\.[\d]+)\.([\d]+) > ([\d]+\.[\d]+\.[\d]+\.[\d]+)\.([\d]+)") # regex for addresses
recRe = re.compile(r"\|\|t=([\d]+);s=([\d]+);d=([\d]+)\|\|") # regex for record
def parseLastRecord(lastRec, order):
toRet = portRec()
lstIdx = len(lastRec)-1
toRet.order = order
line1split = lastRec[0].strip().split(" ", 2)
toRet.timeRecv = line1split[0]
line2strip = lastRec[1].strip()
m = addrsRe.match(line2strip)
if(m==None):
print "Warning, unrecognized record: ", lastRec
return None
toRet.srcIP = m.group(1)
toRet.srcPort = int(m.group(2))
toRet.dstPort = int(m.group(4))
m = recRe.search(lastRec[lstIdx])
if(m==None):
#print "Record not detected in the last line: ", lastRec[lstIdx]
return toRet
toRet.timeSent = int(m.group(1))
toRet.intPort = int(m.group(2))
return toRet
# filename to analyze
fname=sys.argv[1]
# DB
portdb = []
portCounts = []
for i in range(0,65536): portCounts.append(0)
dstPorts = set()
# stores last parsed record
curOrder = 0
lastRecord=[]
for line in fileinput.input(fname):
if flineRe.match(line): # new line - finish last record processing, start new one
#print "Last record: ", lastRecord
# PROCESS LAST RECORD HERE
if len(lastRecord)>0:
objRec = parseLastRecord(lastRecord, curOrder)
curOrder+=1
#print objRec
portdb.append(objRec)
portCounts[objRec.srcPort]+=1
dstPorts.add(objRec.dstPort)
if objRec.srcPort==10000 or objRec.srcPort==10001:
print "TargetPort: %0d5, data: %s" % (objRec.dstPort, str(objRec))
# new last record
lastRecord = [line]
else:
lastRecord.append(line)
print "DONE reading data"
print "Distinct destination ports: ", dstPorts
firstPort=-1
lastPort=-1
for i in range(0,65536):
if portCounts[i] > 0 and firstPort==-1:
firstPort=i
if portCounts[i] > 0:
lastPort=i
print "Port min:max interval: [%d, %d]" % (firstPort, lastPort)
# look for gaps in interval
for i in range(firstPort,lastPort+1):
if portCounts[i]==0:
print "PortGap: %05d" % i
#for i in portdb:
#print i.csv()
|
ph4r05/NATSimTools
|
portNums.py
|
Python
|
apache-2.0
| 3,271
|
"""
ldapobject.py - wraps class _ldap.LDAPObject
See http://www.python-ldap.org/ for details.
\$Id: ldapobject.py,v 1.109 2010/06/03 12:26:39 stroeder Exp $
Compability:
- Tested with Python 2.0+ but should work with Python 1.5.x
- LDAPObject class should be exactly the same like _ldap.LDAPObject
Usage:
Directly imported by ldap/__init__.py. The symbols of _ldap are
overridden.
Thread-lock:
Basically calls into the LDAP lib are serialized by the module-wide
lock self._ldap_object_lock.
"""
from ldap import __version__
__all__ = [
'LDAPObject',
'SimpleLDAPObject',
'NonblockingLDAPObject',
'ReconnectLDAPObject',
'SmartLDAPObject'
]
if __debug__:
# Tracing is only supported in debugging mode
import traceback
import sys,time,_ldap,ldap,ldap.functions
from ldap.schema import SCHEMA_ATTRS
from ldap.controls import LDAPControl,DecodeControlTuples,EncodeControlTuples
from ldap import LDAPError
class SimpleLDAPObject:
"""
Drop-in wrapper class around _ldap.LDAPObject
"""
CLASSATTR_OPTION_MAPPING = {
"protocol_version": ldap.OPT_PROTOCOL_VERSION,
"deref": ldap.OPT_DEREF,
"referrals": ldap.OPT_REFERRALS,
"timelimit": ldap.OPT_TIMELIMIT,
"sizelimit": ldap.OPT_SIZELIMIT,
"network_timeout": ldap.OPT_NETWORK_TIMEOUT,
"error_number":ldap.OPT_ERROR_NUMBER,
"error_string":ldap.OPT_ERROR_STRING,
"matched_dn":ldap.OPT_MATCHED_DN,
}
def __init__(
self,uri,
trace_level=0,trace_file=None,trace_stack_limit=5
):
self._trace_level = trace_level
self._trace_file = trace_file or sys.stdout
self._trace_stack_limit = trace_stack_limit
self._uri = uri
self._ldap_object_lock = self._ldap_lock()
self._l = ldap.functions._ldap_function_call(ldap._ldap_module_lock,_ldap.initialize,uri)
self.timeout = -1
self.protocol_version = ldap.VERSION3
def _ldap_lock(self):
if ldap.LIBLDAP_R:
return ldap.LDAPLock(desc=self._uri)
else:
return ldap._ldap_module_lock
def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:# and func.__name__!='result':
self._trace_file.write('*** %s - %s (%s,%s)\n' % (
self._uri,
self.__class__.__name__+'.'+func.__name__,
repr(args),repr(kwargs)
))
if self._trace_level>=3:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
try:
try:
result = func(*args,**kwargs)
if __debug__ and self._trace_level>=2:
if func.__name__!="unbind_ext":
diagnostic_message_success = self._l.get_option(ldap.OPT_DIAGNOSTIC_MESSAGE)
else:
diagnostic_message_success = None
finally:
self._ldap_object_lock.release()
except LDAPError,e:
if __debug__ and self._trace_level>=2:
self._trace_file.write('=> LDAPError - %s: %s\n' % (e.__class__.__name__,str(e)))
raise
else:
if __debug__ and self._trace_level>=2:
if not diagnostic_message_success is None:
self._trace_file.write('=> diagnosticMessage: %s\n' % (repr(diagnostic_message_success)))
if result!=None and result!=(None,None):
self._trace_file.write('=> result: %s\n' % (repr(result)))
return result
def __setattr__(self,name,value):
if self.CLASSATTR_OPTION_MAPPING.has_key(name):
self.set_option(self.CLASSATTR_OPTION_MAPPING[name],value)
else:
self.__dict__[name] = value
def __getattr__(self,name):
if self.CLASSATTR_OPTION_MAPPING.has_key(name):
return self.get_option(self.CLASSATTR_OPTION_MAPPING[name])
elif self.__dict__.has_key(name):
return self.__dict__[name]
else:
raise AttributeError,'%s has no attribute %s' % (
self.__class__.__name__,repr(name)
)
def abandon_ext(self,msgid,serverctrls=None,clientctrls=None):
"""
abandon_ext(msgid[,serverctrls=None[,clientctrls=None]]) -> None
abandon(msgid) -> None
Abandons or cancels an LDAP operation in progress. The msgid should
be the message id of an outstanding LDAP operation as returned
by the asynchronous methods search(), modify() etc. The caller
can expect that the result of an abandoned operation will not be
returned from a future call to result().
"""
return self._ldap_call(self._l.abandon_ext,msgid,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def abandon(self,msgid):
return self.abandon_ext(msgid,None,None)
def cancel(self,cancelid,serverctrls=None,clientctrls=None):
"""
cancel(cancelid[,serverctrls=None[,clientctrls=None]]) -> int
Send cancels extended operation for an LDAP operation specified by cancelid.
The cancelid should be the message id of an outstanding LDAP operation as returned
by the asynchronous methods search(), modify() etc. The caller
can expect that the result of an abandoned operation will not be
returned from a future call to result().
In opposite to abandon() this extended operation gets an result from
the server and thus should be preferred if the server supports it.
"""
return self._ldap_call(self._l.cancel,cancelid,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def cancel_s(self,cancelid,serverctrls=None,clientctrls=None):
msgid = self.cancel(cancelid,serverctrls,clientctrls)
try:
res = self.result(msgid,all=1,timeout=self.timeout)
except (ldap.CANCELLED,ldap.SUCCESS):
res = None
return res
def add_ext(self,dn,modlist,serverctrls=None,clientctrls=None):
"""
add_ext(dn, modlist[,serverctrls=None[,clientctrls=None]]) -> int
This function adds a new entry with a distinguished name
specified by dn which means it must not already exist.
The parameter modlist is similar to the one passed to modify(),
except that no operation integer need be included in the tuples.
"""
return self._ldap_call(self._l.add_ext,dn,modlist,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def add_ext_s(self,dn,modlist,serverctrls=None,clientctrls=None):
msgid = self.add_ext(dn,modlist,serverctrls,clientctrls)
return self.result(msgid,all=1,timeout=self.timeout)
def add(self,dn,modlist):
"""
add(dn, modlist) -> int
This function adds a new entry with a distinguished name
specified by dn which means it must not already exist.
The parameter modlist is similar to the one passed to modify(),
except that no operation integer need be included in the tuples.
"""
return self.add_ext(dn,modlist,None,None)
def add_s(self,dn,modlist):
msgid = self.add(dn,modlist)
return self.result(msgid,all=1,timeout=self.timeout)
def simple_bind(self,who='',cred='',serverctrls=None,clientctrls=None):
"""
simple_bind([who='' [,cred='']]) -> int
"""
return self._ldap_call(self._l.simple_bind,who,cred,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def simple_bind_s(self,who='',cred='',serverctrls=None,clientctrls=None):
"""
simple_bind_s([who='' [,cred='']]) -> None
"""
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
return self.result(msgid,all=1,timeout=self.timeout)
def bind(self,who,cred,method=ldap.AUTH_SIMPLE):
"""
bind(who, cred, method) -> int
"""
assert method==ldap.AUTH_SIMPLE,'Only simple bind supported in LDAPObject.bind()'
return self.simple_bind(who,cred)
def bind_s(self,who,cred,method=ldap.AUTH_SIMPLE):
"""
bind_s(who, cred, method) -> None
"""
msgid = self.bind(who,cred,method)
return self.result(msgid,all=1,timeout=self.timeout)
def sasl_interactive_bind_s(self,who,auth,serverctrls=None,clientctrls=None,sasl_flags=ldap.SASL_QUIET):
"""
sasl_interactive_bind_s(who, auth) -> None
"""
return self._ldap_call(self._l.sasl_interactive_bind_s,who,auth,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls),sasl_flags)
def compare_ext(self,dn,attr,value,serverctrls=None,clientctrls=None):
"""
compare_ext(dn, attr, value [,serverctrls=None[,clientctrls=None]]) -> int
compare_ext_s(dn, attr, value [,serverctrls=None[,clientctrls=None]]) -> int
compare(dn, attr, value) -> int
compare_s(dn, attr, value) -> int
Perform an LDAP comparison between the attribute named attr of
entry dn, and the value value. The synchronous form returns 0
for false, or 1 for true. The asynchronous form returns the
message id of the initiates request, and the result of the
asynchronous compare can be obtained using result().
Note that this latter technique yields the answer by raising
the exception objects COMPARE_TRUE or COMPARE_FALSE.
A design bug in the library prevents value from containing
nul characters.
"""
return self._ldap_call(self._l.compare_ext,dn,attr,value,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def compare_ext_s(self,dn,attr,value,serverctrls=None,clientctrls=None):
msgid = self.compare_ext(dn,attr,value,serverctrls,clientctrls)
try:
self.result(msgid,all=1,timeout=self.timeout)
except ldap.COMPARE_TRUE:
return 1
except ldap.COMPARE_FALSE:
return 0
return None
def compare(self,dn,attr,value):
return self.compare_ext(dn,attr,value,None,None)
def compare_s(self,dn,attr,value):
return self.compare_ext_s(dn,attr,value,None,None)
def delete_ext(self,dn,serverctrls=None,clientctrls=None):
"""
delete(dn) -> int
delete_s(dn) -> None
delete_ext(dn[,serverctrls=None[,clientctrls=None]]) -> int
delete_ext_s(dn[,serverctrls=None[,clientctrls=None]]) -> None
Performs an LDAP delete operation on dn. The asynchronous
form returns the message id of the initiated request, and the
result can be obtained from a subsequent call to result().
"""
return self._ldap_call(self._l.delete_ext,dn,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def delete_ext_s(self,dn,serverctrls=None,clientctrls=None):
msgid = self.delete_ext(dn,serverctrls,clientctrls)
return self.result(msgid,all=1,timeout=self.timeout)
def delete(self,dn):
return self.delete_ext(dn,None,None)
def delete_s(self,dn):
return self.delete_ext_s(dn,None,None)
def modify_ext(self,dn,modlist,serverctrls=None,clientctrls=None):
"""
modify_ext(dn, modlist[,serverctrls=None[,clientctrls=None]]) -> int
"""
return self._ldap_call(self._l.modify_ext,dn,modlist,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def modify_ext_s(self,dn,modlist,serverctrls=None,clientctrls=None):
msgid = self.modify_ext(dn,modlist,serverctrls,clientctrls)
return self.result(msgid,all=1,timeout=self.timeout)
def modify(self,dn,modlist):
"""
modify(dn, modlist) -> int
modify_s(dn, modlist) -> None
modify_ext(dn, modlist[,serverctrls=None[,clientctrls=None]]) -> int
modify_ext_s(dn, modlist[,serverctrls=None[,clientctrls=None]]) -> None
Performs an LDAP modify operation on an entry's attributes.
dn is the DN of the entry to modify, and modlist is the list
of modifications to make to the entry.
Each element of the list modlist should be a tuple of the form
(mod_op,mod_type,mod_vals), where mod_op is the operation (one of
MOD_ADD, MOD_DELETE, MOD_INCREMENT or MOD_REPLACE), mod_type is a
string indicating the attribute type name, and mod_vals is either a
string value or a list of string values to add, delete, increment by or
replace respectively. For the delete operation, mod_vals may be None
indicating that all attributes are to be deleted.
The asynchronous modify() returns the message id of the
initiated request.
"""
return self.modify_ext(dn,modlist,None,None)
def modify_s(self,dn,modlist):
msgid = self.modify(dn,modlist)
return self.result(msgid,all=1,timeout=self.timeout)
def modrdn(self,dn,newrdn,delold=1):
"""
modrdn(dn, newrdn [,delold=1]) -> int
modrdn_s(dn, newrdn [,delold=1]) -> None
Perform a modify RDN operation. These routines take dn, the
DN of the entry whose RDN is to be changed, and newrdn, the
new RDN to give to the entry. The optional parameter delold
is used to specify whether the old RDN should be kept as
an attribute of the entry or not. The asynchronous version
returns the initiated message id.
This operation is emulated by rename() and rename_s() methods
since the modrdn2* routines in the C library are deprecated.
"""
return self.rename(dn,newrdn,None,delold)
def modrdn_s(self,dn,newrdn,delold=1):
return self.rename_s(dn,newrdn,None,delold)
def passwd(self,user,oldpw,newpw,serverctrls=None,clientctrls=None):
return self._ldap_call(self._l.passwd,user,oldpw,newpw,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def passwd_s(self,user,oldpw,newpw,serverctrls=None,clientctrls=None):
msgid = self.passwd(user,oldpw,newpw,serverctrls,clientctrls)
return self.result(msgid,all=1,timeout=self.timeout)
def rename(self,dn,newrdn,newsuperior=None,delold=1,serverctrls=None,clientctrls=None):
"""
rename(dn, newrdn [, newsuperior=None [,delold=1][,serverctrls=None[,clientctrls=None]]]) -> int
rename_s(dn, newrdn [, newsuperior=None] [,delold=1][,serverctrls=None[,clientctrls=None]]) -> None
Perform a rename entry operation. These routines take dn, the
DN of the entry whose RDN is to be changed, newrdn, the
new RDN, and newsuperior, the new parent DN, to give to the entry.
If newsuperior is None then only the RDN is modified.
The optional parameter delold is used to specify whether the
old RDN should be kept as an attribute of the entry or not.
The asynchronous version returns the initiated message id.
This actually corresponds to the rename* routines in the
LDAP-EXT C API library.
"""
return self._ldap_call(self._l.rename,dn,newrdn,newsuperior,delold,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def rename_s(self,dn,newrdn,newsuperior=None,delold=1,serverctrls=None,clientctrls=None):
msgid = self.rename(dn,newrdn,newsuperior,delold,serverctrls,clientctrls)
return self.result(msgid,all=1,timeout=self.timeout)
def result(self,msgid=ldap.RES_ANY,all=1,timeout=None):
"""
result([msgid=RES_ANY [,all=1 [,timeout=None]]]) -> (result_type, result_data)
This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search(), modify(), etc.) They all
returned an invocation identifier (a message id) upon successful
initiation of their operation. This id is guaranteed to be
unique across an LDAP session, and can be used to request the
result of a specific operation via the msgid parameter of the
result() method.
If the result of a specific operation is required, msgid should
be set to the invocation message id returned when the operation
was initiated; otherwise RES_ANY should be supplied.
The all parameter only has meaning for search() responses
and is used to select whether a single entry of the search
response should be returned, or to wait for all the results
of the search before returning.
A search response is made up of zero or more search entries
followed by a search result. If all is 0, search entries will
be returned one at a time as they come in, via separate calls
to result(). If all is 1, the search response will be returned
in its entirety, i.e. after all entries and the final search
result have been received.
For all set to 0, result tuples
trickle in (with the same message id), and with the result type
RES_SEARCH_ENTRY, until the final result which has a result
type of RES_SEARCH_RESULT and a (usually) empty data field.
When all is set to 1, only one result is returned, with a
result type of RES_SEARCH_RESULT, and all the result tuples
listed in the data field.
The method returns a tuple of the form (result_type,
result_data). The result_type is one of the constants RES_*.
See search() for a description of the search result's
result_data, otherwise the result_data is normally meaningless.
The result() method will block for timeout seconds, or
indefinitely if timeout is negative. A timeout of 0 will effect
a poll. The timeout can be expressed as a floating-point value.
If timeout is None the default in self.timeout is used.
If a timeout occurs, a TIMEOUT exception is raised, unless
polling (timeout = 0), in which case (None, None) is returned.
"""
res_type,res_data,res_msgid = self.result2(msgid,all,timeout)
return res_type,res_data
def result2(self,msgid=ldap.RES_ANY,all=1,timeout=None):
res_type, res_data, res_msgid, srv_ctrls = self.result3(msgid,all,timeout)
return res_type, res_data, res_msgid
def result3(self,msgid=ldap.RES_ANY,all=1,timeout=None):
if timeout is None:
timeout = self.timeout
ldap_result = self._ldap_call(self._l.result3,msgid,all,timeout)
if ldap_result is None:
rtype, rdata, rmsgid, decoded_serverctrls = (None,None,None,None)
else:
rtype, rdata, rmsgid, serverctrls = ldap_result
decoded_serverctrls = DecodeControlTuples(serverctrls)
return rtype, rdata, rmsgid, decoded_serverctrls
def search_ext(self,base,scope,filterstr='(objectClass=*)',attrlist=None,attrsonly=0,serverctrls=None,clientctrls=None,timeout=-1,sizelimit=0):
"""
search(base, scope [,filterstr='(objectClass=*)' [,attrlist=None [,attrsonly=0]]]) -> int
search_s(base, scope [,filterstr='(objectClass=*)' [,attrlist=None [,attrsonly=0]]])
search_st(base, scope [,filterstr='(objectClass=*)' [,attrlist=None [,attrsonly=0 [,timeout=-1]]]])
search_ext(base,scope,[,filterstr='(objectClass=*)' [,attrlist=None [,attrsonly=0 [,serverctrls=None [,clientctrls=None [,timeout=-1 [,sizelimit=0]]]]]]])
search_ext_s(base,scope,[,filterstr='(objectClass=*)' [,attrlist=None [,attrsonly=0 [,serverctrls=None [,clientctrls=None [,timeout=-1 [,sizelimit=0]]]]]]])
Perform an LDAP search operation, with base as the DN of
the entry at which to start the search, scope being one of
SCOPE_BASE (to search the object itself), SCOPE_ONELEVEL
(to search the object's immediate children), or SCOPE_SUBTREE
(to search the object and all its descendants).
filter is a string representation of the filter to
apply in the search (see RFC 2254).
Each result tuple is of the form (dn,entry), where dn is a
string containing the DN (distinguished name) of the entry, and
entry is a dictionary containing the attributes.
Attributes types are used as string dictionary keys and attribute
values are stored in a list as dictionary value.
The DN in dn is extracted using the underlying ldap_get_dn(),
which may raise an exception of the DN is malformed.
If attrsonly is non-zero, the values of attrs will be
meaningless (they are not transmitted in the result).
The retrieved attributes can be limited with the attrlist
parameter. If attrlist is None, all the attributes of each
entry are returned.
serverctrls=None
clientctrls=None
The synchronous form with timeout, search_st() or search_ext_s(),
will block for at most timeout seconds (or indefinitely if
timeout is negative). A TIMEOUT exception is raised if no result is
received within the time.
The amount of search results retrieved can be limited with the
sizelimit parameter if non-zero.
"""
return self._ldap_call(
self._l.search_ext,
base,scope,filterstr,
attrlist,attrsonly,
EncodeControlTuples(serverctrls),
EncodeControlTuples(clientctrls),
timeout,sizelimit,
)
def search_ext_s(self,base,scope,filterstr='(objectClass=*)',attrlist=None,attrsonly=0,serverctrls=None,clientctrls=None,timeout=-1,sizelimit=0):
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
return self.result(msgid,all=1,timeout=timeout)[1]
def search(self,base,scope,filterstr='(objectClass=*)',attrlist=None,attrsonly=0):
return self.search_ext(base,scope,filterstr,attrlist,attrsonly,None,None)
def search_s(self,base,scope,filterstr='(objectClass=*)',attrlist=None,attrsonly=0):
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
def search_st(self,base,scope,filterstr='(objectClass=*)',attrlist=None,attrsonly=0,timeout=-1):
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout)
def set_cache_options(self,*args,**kwargs):
"""
set_cache_options(option) -> None
Changes the caching behaviour. Currently supported options are
CACHE_OPT_CACHENOERRS, which suppresses caching of requests
that resulted in an error, and
CACHE_OPT_CACHEALLERRS, which enables caching of all requests.
The default behaviour is not to cache requests that result in
errors, except those that result in a SIZELIMIT_EXCEEDED exception.
"""
return self._ldap_call(self._l.set_cache_options,*args,**kwargs)
def start_tls_s(self):
"""
start_tls_s() -> None
Negotiate TLS with server. The `version' attribute must have been
set to VERSION3 before calling start_tls_s.
If TLS could not be started an exception will be raised.
"""
return self._ldap_call(self._l.start_tls_s)
def unbind_ext(self,serverctrls=None,clientctrls=None):
"""
unbind() -> int
unbind_s() -> None
unbind_ext() -> int
unbind_ext_s() -> None
This call is used to unbind from the directory, terminate
the current association, and free resources. Once called, the
connection to the LDAP server is closed and the LDAP object
is invalid. Further invocation of methods on the object will
yield an exception.
The unbind and unbind_s methods are identical, and are
synchronous in nature
"""
return self._ldap_call(self._l.unbind_ext,EncodeControlTuples(serverctrls),EncodeControlTuples(clientctrls))
def unbind_ext_s(self,serverctrls=None,clientctrls=None):
msgid = self.unbind_ext(serverctrls,clientctrls)
if msgid!=None:
return self.result(msgid,all=1,timeout=self.timeout)
def unbind(self):
return self.unbind_ext(None,None)
def unbind_s(self):
return self.unbind_ext_s(None,None)
def whoami_s(self,serverctrls=None,clientctrls=None):
return self._ldap_call(self._l.whoami_s,serverctrls,clientctrls)
def get_option(self,option):
result = self._ldap_call(self._l.get_option,option)
if option==ldap.OPT_SERVER_CONTROLS or option==ldap.OPT_CLIENT_CONTROLS:
result = DecodeControlTuples(result)
return result
def set_option(self,option,invalue):
if option==ldap.OPT_SERVER_CONTROLS or option==ldap.OPT_CLIENT_CONTROLS:
invalue = EncodeControlTuples(invalue)
return self._ldap_call(self._l.set_option,option,invalue)
def search_subschemasubentry_s(self,dn=''):
"""
Returns the distinguished name of the sub schema sub entry
for a part of a DIT specified by dn.
None as result indicates that the DN of the sub schema sub entry could
not be determined.
"""
try:
r = self.search_s(
dn,ldap.SCOPE_BASE,'(objectClass=*)',['subschemaSubentry']
)
except (ldap.NO_SUCH_OBJECT,ldap.NO_SUCH_ATTRIBUTE,ldap.INSUFFICIENT_ACCESS):
r = []
except ldap.UNDEFINED_TYPE:
return None
try:
if r:
e = ldap.cidict.cidict(r[0][1])
search_subschemasubentry_dn = e.get('subschemaSubentry',[None])[0]
if search_subschemasubentry_dn is None:
if dn:
# Try to find sub schema sub entry in root DSE
return self.search_subschemasubentry_s(dn='')
else:
# If dn was already root DSE we can return here
return None
else:
return search_subschemasubentry_dn
except IndexError:
return None
def read_subschemasubentry_s(self,subschemasubentry_dn,attrs=None):
"""
Returns the sub schema sub entry's data
"""
attrs = attrs or SCHEMA_ATTRS
try:
r = self.search_s(
subschemasubentry_dn,ldap.SCOPE_BASE,
'(objectClass=subschema)',
attrs
)
except ldap.NO_SUCH_OBJECT:
return None
else:
if r:
return r[0][1]
else:
return None
class NonblockingLDAPObject(SimpleLDAPObject):
def __init__(self,uri,trace_level=0,trace_file=None,result_timeout=-1):
self._result_timeout = result_timeout
SimpleLDAPObject.__init__(self,uri,trace_level,trace_file)
def result(self,msgid=ldap.RES_ANY,all=1,timeout=-1):
"""
"""
ldap_result = self._ldap_call(self._l.result,msgid,0,self._result_timeout)
if not all:
return ldap_result
start_time = time.time()
all_results = []
while all:
while ldap_result[0] is None:
if (timeout>=0) and (time.time()-start_time>timeout):
self._ldap_call(self._l.abandon,msgid)
raise ldap.TIMEOUT(
"LDAP time limit (%d secs) exceeded." % (timeout)
)
time.sleep(0.00001)
ldap_result = self._ldap_call(self._l.result,msgid,0,self._result_timeout)
if ldap_result[1] is None:
break
all_results.extend(ldap_result[1])
ldap_result = None,None
return all_results
def search_st(self,base,scope,filterstr='(objectClass=*)',attrlist=None,attrsonly=0,timeout=-1):
msgid = self.search(base,scope,filterstr,attrlist,attrsonly)
return self.result(msgid,all=1,timeout=timeout)
class ReconnectLDAPObject(SimpleLDAPObject):
"""
In case of server failure (ldap.SERVER_DOWN) the implementations
of all synchronous operation methods (search_s() etc.) are doing
an automatic reconnect and rebind and will retry the very same
operation.
This is very handy for broken LDAP server implementations
(e.g. in Lotus Domino) which drop connections very often making
it impossible to have a long-lasting control flow in the
application.
"""
__transient_attrs__ = {
'_l':None,
'_ldap_object_lock':None,
'_trace_file':None,
}
def __init__(
self,uri,
trace_level=0,trace_file=None,trace_stack_limit=5,
retry_max=1,retry_delay=60.0
):
"""
Parameters like SimpleLDAPObject.__init__() with these
additional arguments:
retry_max
Maximum count of reconnect trials
retry_delay
Time span to wait between two reconnect trials
"""
self._uri = uri
self._options = {}
self._last_bind = None
SimpleLDAPObject.__init__(self,uri,trace_level,trace_file,trace_stack_limit)
self._retry_max = retry_max
self._retry_delay = retry_delay
self._start_tls = 0
self._reconnects_done = 0L
def __getstate__(self):
"""return data representation for pickled object"""
d = {}
for k,v in self.__dict__.items():
if not self.__transient_attrs__.has_key(k):
d[k] = v
return d
def __setstate__(self,d):
"""set up the object from pickled data"""
self.__dict__.update(d)
self._ldap_object_lock = self._ldap_lock()
self._trace_file = sys.stdout
self.reconnect(self._uri)
def _apply_last_bind(self):
if self._last_bind!=None:
func,args,kwargs = self._last_bind
func(*args,**kwargs)
def _restore_options(self):
"""Restore all recorded options"""
for k,v in self._options.items():
SimpleLDAPObject.set_option(self,k,v)
def reconnect(self,uri):
# Drop and clean up old connection completely
# Reconnect
reconnect_counter = self._retry_max
while reconnect_counter:
if __debug__ and self._trace_level>=1:
self._trace_file.write('*** Try %d. reconnect to %s...\n' % (
self._retry_max-reconnect_counter+1,uri
))
try:
# Do the connect
self._l = ldap.functions._ldap_function_call(ldap._ldap_module_lock,_ldap.initialize,uri)
self._restore_options()
# StartTLS extended operation in case this was called before
if self._start_tls:
self.start_tls_s()
# Repeat last simple or SASL bind
self._apply_last_bind()
except ldap.SERVER_DOWN,e:
SimpleLDAPObject.unbind_s(self)
del self._l
if __debug__ and self._trace_level>=1:
self._trace_file.write('*** %d. reconnect to %s failed\n' % (
self._retry_max-reconnect_counter+1,uri
))
reconnect_counter = reconnect_counter-1
if not reconnect_counter:
raise
if __debug__ and self._trace_level>=1:
self._trace_file.write('=> delay %s...\n' % (self._retry_delay))
time.sleep(self._retry_delay)
else:
if __debug__ and self._trace_level>=1:
self._trace_file.write('*** %d. reconnect to %s successful, last operation will be repeated\n' % (
self._retry_max-reconnect_counter+1,uri
))
self._reconnects_done = self._reconnects_done + 1L
break
def _apply_method_s(self,func,*args,**kwargs):
if not self.__dict__.has_key('_l'):
self.reconnect(self._uri)
try:
return func(self,*args,**kwargs)
except ldap.SERVER_DOWN:
SimpleLDAPObject.unbind_s(self)
del self._l
# Try to reconnect
self.reconnect(self._uri)
# Re-try last operation
return func(self,*args,**kwargs)
def set_option(self,option,invalue):
self._options[option] = invalue
SimpleLDAPObject.set_option(self,option,invalue)
def simple_bind_s(self,*args,**kwargs):
self._last_bind = (self.simple_bind_s,args,kwargs)
return SimpleLDAPObject.simple_bind_s(self,*args,**kwargs)
def start_tls_s(self):
res = SimpleLDAPObject.start_tls_s(self)
self._start_tls = 1
return res
def sasl_interactive_bind_s(self,*args,**kwargs):
"""
sasl_interactive_bind_s(who, auth) -> None
"""
self._last_bind = (self.sasl_interactive_bind_s,args,kwargs)
return SimpleLDAPObject.sasl_interactive_bind_s(self,*args,**kwargs)
def add_ext_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.add_ext_s,*args,**kwargs)
def cancel_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.cancel_s,*args,**kwargs)
def compare_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.compare_s,*args,**kwargs)
def delete_ext_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.delete_ext_s,*args,**kwargs)
def modify_ext_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.modify_ext_s,*args,**kwargs)
def rename_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.rename_s,*args,**kwargs)
def search_ext_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.search_ext_s,*args,**kwargs)
def whoami_s(self,*args,**kwargs):
return self._apply_method_s(SimpleLDAPObject.whoami_s,*args,**kwargs)
# The class called LDAPObject will be used as default for
# ldap.open() and ldap.initialize()
LDAPObject = SimpleLDAPObject
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/python-ldap-2.3.13/Lib/ldap/ldapobject.py
|
Python
|
gpl-2.0
| 32,219
|
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
|
helenst/django
|
django/conf/project_template/project_name/settings.py
|
Python
|
bsd-3-clause
| 2,281
|
from __future__ import division
import copy
import numpy
from pycbc.tmpltbank.coord_utils import get_cov_params
def get_physical_covaried_masses(xis, bestMasses, bestXis, req_match,
massRangeParams, metricParams, fUpper,
giveUpThresh = 5000):
"""
This function takes the position of a point in the xi parameter space and
iteratively finds a close point in the physical coordinate space (masses
and spins).
Parameters
-----------
xis : list or array
Desired position of the point in the xi space. If only N values are
provided and the xi space's dimension is larger then it is assumed that
*any* value in the remaining xi coordinates is acceptable.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the desired point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
req_match : float
Desired maximum mismatch between xis and the obtained point. If a point
is found with mismatch < req_match immediately stop and return that
point. A point with this mismatch will not always be found.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
giveUpThresh : int, optional (default = 5000)
The program will try this many iterations. If no close matching point
has been found after this it will give up.
Returns
--------
mass1 : float
The heavier mass of the obtained point.
mass2 : float
The smaller mass of the obtained point
spin1z : float
The heavier bodies spin of the obtained point.
spin2z : float
The smaller bodies spin of the obtained point.
count : int
How many iterations it took to find the point. For debugging.
mismatch : float
The mismatch between the obtained point and the input xis.
new_xis : list
The position of the point in the xi space
"""
# TUNABLE PARAMETERS GO HERE!
# This states how far apart to scatter test points in the first proposal
origScaleFactor = 1
# Set up
xi_size = len(xis)
scaleFactor = origScaleFactor
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
count = 0
unFixedCount = 0
currDist = 100000000000000000
while(1):
# If we are a long way away we use larger jumps
if count:
if currDist > 1 and scaleFactor == origScaleFactor:
scaleFactor = origScaleFactor*10
# Get a set of test points with mass -> xi mappings
totmass, eta, spin1z, spin2z, mass1, mass2, new_xis = \
get_mass_distribution([bestChirpmass, bestMasses[1], bestMasses[2],
bestMasses[3]],
scaleFactor, massRangeParams, metricParams,
fUpper)
cDist = (new_xis[0] - xis[0])**2
for j in xrange(1,xi_size):
cDist += (new_xis[j] - xis[j])**2
if (cDist.min() < req_match):
idx = cDist.argmin()
scaleFactor = origScaleFactor
new_xis_list = [new_xis[ldx][idx] for ldx in xrange(len(new_xis))]
return mass1[idx], mass2[idx], spin1z[idx], spin2z[idx], count, \
cDist.min(), new_xis_list
if (cDist.min() < currDist):
idx = cDist.argmin()
bestMasses[0] = totmass[idx]
bestMasses[1] = eta[idx]
bestMasses[2] = spin1z[idx]
bestMasses[3] = spin2z[idx]
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
currDist = cDist.min()
unFixedCount = 0
scaleFactor = origScaleFactor
count += 1
unFixedCount += 1
if unFixedCount > giveUpThresh:
# Stop at this point
diff = (bestMasses[0]*bestMasses[0] * (1-4*bestMasses[1]))**0.5
mass1 = (bestMasses[0] + diff)/2.
mass2 = (bestMasses[0] - diff)/2.
new_xis_list = [new_xis[ldx][0] for ldx in xrange(len(new_xis))]
return mass1, mass2, bestMasses[2], bestMasses[3], count, \
currDist, new_xis_list
if not unFixedCount % 100:
scaleFactor *= 2
if scaleFactor > 64:
scaleFactor = 1
# Shouldn't be here!
raise RuntimeError
def get_mass_distribution(bestMasses, scaleFactor, massRangeParams,
metricParams, fUpper,
numJumpPoints=100, chirpMassJumpFac=0.0001,
etaJumpFac=0.01, spin1zJumpFac=0.01,
spin2zJumpFac=0.01):
"""
Given a set of masses, this function will create a set of points nearby
in the mass space and map these to the xi space.
Parameters
-----------
bestMasses : list
Contains [ChirpMass, eta, spin1z, spin2z]. Points will be placed around
tjos
scaleFactor : float
This parameter describes the radius away from bestMasses that points
will be placed in.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
numJumpPoints : int, optional (default = 100)
The number of points that will be generated every iteration
chirpMassJumpFac : float, optional (default=0.0001)
The jump points will be chosen with fractional variation in chirpMass
up to this multiplied by scaleFactor.
etaJumpFac : float, optional (default=0.01)
The jump points will be chosen with fractional variation in eta
up to this multiplied by scaleFactor.
spin1zJumpFac : float, optional (default=0.01)
The jump points will be chosen with absolute variation in spin1z up to
this multiplied by scaleFactor.
spin2zJumpFac : float, optional (default=0.01)
The jump points will be chosen with absolute variation in spin2z up to
this multiplied by scaleFactor.
Returns
--------
Totmass : numpy.array
Total mass of the resulting points
Eta : numpy.array
Symmetric mass ratio of the resulting points
Spin1z : numpy.array
Spin of the heavier body of the resulting points
Spin2z : numpy.array
Spin of the smaller body of the resulting points
Diff : numpy.array
Mass1 - Mass2 of the resulting points
Mass1 : numpy.array
Mass1 (mass of heavier body) of the resulting points
Mass2 : numpy.array
Mass2 (mass of smaller body) of the resulting points
new_xis : list of numpy.array
Position of points in the xi coordinates
"""
# FIXME: It would be better if rejected values could be drawn from the
# full possible mass/spin distribution. However speed in this function is
# a major factor and must be considered.
bestChirpmass = bestMasses[0]
bestEta = bestMasses[1]
bestSpin1z = bestMasses[2]
bestSpin2z = bestMasses[3]
# Firstly choose a set of values for masses and spins
chirpmass = bestChirpmass * (1 - (numpy.random.random(numJumpPoints)-0.5) \
* chirpMassJumpFac * scaleFactor )
etaRange = massRangeParams.maxEta - massRangeParams.minEta
currJumpFac = etaJumpFac * scaleFactor
if currJumpFac > etaRange:
currJumpFac = etaRange
eta = bestEta * ( 1 - (numpy.random.random(numJumpPoints) - 0.5) \
* currJumpFac)
maxSpinMag = max(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag)
minSpinMag = min(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag)
# Note that these two are cranged by spinxzFac, *not* spinxzFac/spinxz
currJumpFac = spin1zJumpFac * scaleFactor
if currJumpFac > maxSpinMag:
currJumpFac = maxSpinMag
# Actually set the new spin trial points
if massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag):
curr_spin_1z_jump_fac = currJumpFac
curr_spin_2z_jump_fac = currJumpFac
# Check spins aren't going to be unphysical
if currJumpFac > massRangeParams.maxBHSpinMag:
curr_spin_1z_jump_fac = massRangeParams.maxBHSpinMag
if currJumpFac > massRangeParams.maxNSSpinMag:
curr_spin_2z_jump_fac = massRangeParams.maxNSSpinMag
spin1z = bestSpin1z + ( (numpy.random.random(numJumpPoints) - 0.5) \
* curr_spin_1z_jump_fac)
spin2z = bestSpin2z + ( (numpy.random.random(numJumpPoints) - 0.5) \
* curr_spin_2z_jump_fac)
else:
# If maxNSSpinMag is very low (0) and maxBHSpinMag is high we can
# find it hard to place any points. So mix these when
# masses are swapping between the NS and BH.
curr_spin_bh_jump_fac = currJumpFac
curr_spin_ns_jump_fac = currJumpFac
# Check spins aren't going to be unphysical
if currJumpFac > massRangeParams.maxBHSpinMag:
curr_spin_bh_jump_fac = massRangeParams.maxBHSpinMag
if currJumpFac > massRangeParams.maxNSSpinMag:
curr_spin_ns_jump_fac = massRangeParams.maxNSSpinMag
spin1z = numpy.zeros(numJumpPoints, dtype=float)
spin2z = numpy.zeros(numJumpPoints, dtype=float)
split_point = int(numJumpPoints/2)
# So set the first half to be at least within the BH range and the
# second half to be at least within the NS range
spin1z[:split_point] = bestSpin1z + \
( (numpy.random.random(split_point) - 0.5)\
* curr_spin_bh_jump_fac)
spin1z[split_point:] = bestSpin1z + \
( (numpy.random.random(numJumpPoints-split_point) - 0.5)\
* curr_spin_ns_jump_fac)
spin2z[:split_point] = bestSpin2z + \
( (numpy.random.random(split_point) - 0.5)\
* curr_spin_bh_jump_fac)
spin2z[split_point:] = bestSpin2z + \
( (numpy.random.random(numJumpPoints-split_point) - 0.5)\
* curr_spin_ns_jump_fac)
# Point[0] is always set to the original point
chirpmass[0] = bestChirpmass
eta[0] = bestEta
spin1z[0] = bestSpin1z
spin2z[0] = bestSpin2z
# Remove points where eta becomes unphysical
eta[eta > massRangeParams.maxEta] = massRangeParams.maxEta
if massRangeParams.minEta:
eta[eta < massRangeParams.minEta] = massRangeParams.minEta
else:
eta[eta < 0.0001] = 0.0001
# Total mass, masses and mass diff
totmass = chirpmass / (eta**(3./5.))
diff = (totmass*totmass * (1-4*eta))**0.5
mass1 = (totmass + diff)/2.
mass2 = (totmass - diff)/2.
# Check the validity of the spin values
# Do the first spin
if maxSpinMag == 0:
# Shortcut if non-spinning
pass
elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag):
# Simple case where I don't have to worry about correlation with mass
numploga = abs(spin1z) > massRangeParams.maxBHSpinMag
spin1z[numploga] = 0
else:
# Do have to consider masses
boundary_mass = massRangeParams.ns_bh_boundary_mass
numploga1 = numpy.logical_and(mass1 >= boundary_mass,
abs(spin1z) <= massRangeParams.maxBHSpinMag)
numploga2 = numpy.logical_and(mass1 < boundary_mass,
abs(spin1z) <= massRangeParams.maxNSSpinMag)
numploga = numpy.logical_or(numploga1, numploga2)
numploga = numpy.logical_not(numploga)
spin1z[numploga] = 0
# Same for the second spin
if maxSpinMag == 0:
# Shortcut if non-spinning
pass
elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag):
numplogb = abs(spin2z) > massRangeParams.maxNSSpinMag
spin2z[numplogb] = 0
else:
# Do have to consider masses
boundary_mass = massRangeParams.ns_bh_boundary_mass
numplogb1 = numpy.logical_and(mass2 >= boundary_mass,
abs(spin2z) <= massRangeParams.maxBHSpinMag)
numplogb2 = numpy.logical_and(mass2 < boundary_mass,
abs(spin2z) <= massRangeParams.maxNSSpinMag)
numplogb = numpy.logical_or(numplogb1, numplogb2)
numplogb = numpy.logical_not(numplogb)
spin2z[numplogb] = 0
if (maxSpinMag) and (numploga[0] or numplogb[0]):
raise ValueError("Cannot remove the guide point!")
# And remove points where the individual masses are outside of the physical
# range. Or the total masses are.
# These "removed" points will have metric distances that will be much, much
# larger than any thresholds used in the functions in brute_force_utils.py
# and will always be rejected. An unphysical value cannot be used as it
# would result in unphysical metric distances and cause failures.
totmass[mass1 < massRangeParams.minMass1] = 0.0001
totmass[mass1 > massRangeParams.maxMass1] = 0.0001
totmass[mass2 < massRangeParams.minMass2] = 0.0001
totmass[mass2 > massRangeParams.maxMass2] = 0.0001
# There is some numerical error which can push this a bit higher. We do
# *not* want to reject the initial guide point. This error comes from
# Masses -> totmass, eta -> masses conversion, we will have points pushing
# onto the boudaries of the space.
totmass[totmass > massRangeParams.maxTotMass*1.0001] = 0.0001
totmass[totmass < massRangeParams.minTotMass*0.9999] = 0.0001
if massRangeParams.max_chirp_mass:
totmass[chirpmass > massRangeParams.max_chirp_mass*1.0001] = 0.0001
if massRangeParams.min_chirp_mass:
totmass[chirpmass < massRangeParams.min_chirp_mass*0.9999] = 0.0001
if totmass[0] < 0.00011:
raise ValueError("Cannot remove the guide point!")
mass1[totmass < 0.00011] = 0.0001
mass2[totmass < 0.00011] = 0.0001
# Then map to xis
new_xis = get_cov_params(mass1, mass2, spin1z, spin2z,
metricParams, fUpper)
return totmass, eta, spin1z, spin2z, mass1, mass2, new_xis
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num,
req_match, massRangeParams, metricParams, fUpper,
scaleFactor=0.8, numIterations=3000):
"""
This function is used to assess the depth of the xi_space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find maxima and minima.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_min : float
The minimal value of the specified dimension at the specified point in
parameter space.
xi_max : float
The maximal value of the specified dimension at the specified point in
parameter space.
"""
# Find minimum
ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=True, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
# Find maximum
ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=False, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
return ximin, ximax
def find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, req_match, \
massRangeParams, metricParams, fUpper, \
find_minimum=False, scaleFactor=0.8, \
numIterations=3000):
"""
This function is used to find the largest or smallest value of the xi
space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find extrema.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
find_minimum : boolean, optional (default = False)
If True, find the minimum value of the xi direction. If False find the
maximum value.
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_extent : float
The extremal value of the specified dimension at the specified point in
parameter space.
"""
# Setup
xi_size = len(xis)
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
if find_minimum:
xiextrema = 10000000000
else:
xiextrema = -100000000000
for _ in xrange(numIterations):
# Evaluate extrema of the xi direction specified
totmass, eta, spin1z, spin2z, _, _, new_xis = \
get_mass_distribution([bestChirpmass,bestMasses[1],bestMasses[2],
bestMasses[3]],
scaleFactor, massRangeParams, metricParams,
fUpper)
cDist = (new_xis[0] - xis[0])**2
for j in xrange(1, xi_size):
cDist += (new_xis[j] - xis[j])**2
redCDist = cDist[cDist < req_match]
if len(redCDist):
if not find_minimum:
new_xis[direction_num][cDist > req_match] = -10000000
currXiExtrema = (new_xis[direction_num]).max()
idx = (new_xis[direction_num]).argmax()
else:
new_xis[direction_num][cDist > req_match] = 10000000
currXiExtrema = (new_xis[direction_num]).min()
idx = (new_xis[direction_num]).argmin()
if ( ((not find_minimum) and (currXiExtrema > xiextrema)) or \
(find_minimum and (currXiExtrema < xiextrema)) ):
xiextrema = currXiExtrema
bestMasses[0] = totmass[idx]
bestMasses[1] = eta[idx]
bestMasses[2] = spin1z[idx]
bestMasses[3] = spin2z[idx]
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
return xiextrema
|
hagabbar/pycbc_copy
|
pycbc/tmpltbank/brute_force_methods.py
|
Python
|
gpl-3.0
| 23,536
|
SPECS = {
"org.freedesktop.DBus.ObjectManager": """
<interface name="org.freedesktop.DBus.ObjectManager">
<method name="GetManagedObjects">
<arg name="objpath_interfaces_and_properties" type="a{oa{sa{sv}}}" direction="out" />
</method>
</interface>
""",
"org.storage.stratis3.Manager.r1": """
<interface name="org.storage.stratis3.Manager.r1">
<method name="CreatePool">
<arg name="name" type="s" direction="in" />
<arg name="redundancy" type="(bq)" direction="in" />
<arg name="devices" type="as" direction="in" />
<arg name="key_desc" type="(bs)" direction="in" />
<arg name="clevis_info" type="(b(ss))" direction="in" />
<arg name="result" type="(b(oao))" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="DestroyPool">
<arg name="pool" type="o" direction="in" />
<arg name="result" type="(bs)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="EngineStateReport">
<arg name="result" type="s" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="ListKeys">
<arg name="result" type="as" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="SetKey">
<arg name="key_desc" type="s" direction="in" />
<arg name="key_fd" type="h" direction="in" />
<arg name="result" type="(bb)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="UnlockPool">
<arg name="pool_uuid" type="s" direction="in" />
<arg name="unlock_method" type="s" direction="in" />
<arg name="result" type="(bas)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="UnsetKey">
<arg name="key_desc" type="s" direction="in" />
<arg name="result" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<property name="LockedPools" type="a{sa{sv}}" access="read" />
<property name="Version" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
</interface>
""",
"org.storage.stratis3.Report.r1": """
<interface name="org.storage.stratis3.Report.r1">
<method name="GetReport">
<arg name="name" type="s" direction="in" />
<arg name="result" type="s" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
</interface>
""",
"org.storage.stratis3.blockdev.r1": """
<interface name="org.storage.stratis3.blockdev.r1">
<method name="SetUserInfo">
<arg name="id" type="(bs)" direction="in" />
<arg name="changed" type="(bs)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<property name="Devnode" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="HardwareInfo" type="(bs)" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="InitializationTime" type="t" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="PhysicalPath" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="Pool" type="o" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="Tier" type="q" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="false" />
</property>
<property name="TotalPhysicalSize" type="s" access="read" />
<property name="UserInfo" type="(bs)" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="false" />
</property>
<property name="Uuid" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
</interface>
""",
"org.storage.stratis3.filesystem.r1": """
<interface name="org.storage.stratis3.filesystem.r1">
<method name="SetName">
<arg name="name" type="s" direction="in" />
<arg name="result" type="(bs)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<property name="Created" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="Devnode" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="invalidates" />
</property>
<property name="Name" type="s" access="read" />
<property name="Pool" type="o" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="Size" type="s" access="read" />
<property name="Used" type="(bs)" access="read" />
<property name="Uuid" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
</interface>
""",
"org.storage.stratis3.pool.r1": """
<interface name="org.storage.stratis3.pool.r1">
<method name="AddCacheDevs">
<arg name="devices" type="as" direction="in" />
<arg name="results" type="(bao)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="AddDataDevs">
<arg name="devices" type="as" direction="in" />
<arg name="results" type="(bao)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="BindClevis">
<arg name="pin" type="s" direction="in" />
<arg name="json" type="s" direction="in" />
<arg name="results" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="BindKeyring">
<arg name="key_desc" type="s" direction="in" />
<arg name="results" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="CreateFilesystems">
<arg name="specs" type="a(s(bs))" direction="in" />
<arg name="results" type="(ba(os))" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="DestroyFilesystems">
<arg name="filesystems" type="ao" direction="in" />
<arg name="results" type="(bas)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="InitCache">
<arg name="devices" type="as" direction="in" />
<arg name="results" type="(bao)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="RebindClevis">
<arg name="results" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="RebindKeyring">
<arg name="key_desc" type="s" direction="in" />
<arg name="results" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="SetName">
<arg name="name" type="s" direction="in" />
<arg name="result" type="(bs)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="SnapshotFilesystem">
<arg name="origin" type="o" direction="in" />
<arg name="snapshot_name" type="s" direction="in" />
<arg name="result" type="(bo)" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="UnbindClevis">
<arg name="results" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<method name="UnbindKeyring">
<arg name="results" type="b" direction="out" />
<arg name="return_code" type="q" direction="out" />
<arg name="return_string" type="s" direction="out" />
</method>
<property name="AllocatedSize" type="s" access="read" />
<property name="AvailableActions" type="s" access="read" />
<property name="ClevisInfo" type="(b(b(ss)))" access="read" />
<property name="Encrypted" type="b" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
<property name="HasCache" type="b" access="read" />
<property name="KeyDescription" type="(b(bs))" access="read" />
<property name="Name" type="s" access="read" />
<property name="TotalPhysicalSize" type="s" access="read" />
<property name="TotalPhysicalUsed" type="(bs)" access="read" />
<property name="Uuid" type="s" access="read">
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="const" />
</property>
</interface>
""",
}
|
stratis-storage/stratisd
|
tests/client-dbus/src/stratisd_client_dbus/_introspect.py
|
Python
|
mpl-2.0
| 10,523
|
from ..core.bunk_user import BunkUser
"""
Metadata class for easy embeds when a duel has completed
"""
class DuelResult:
def __init__(self, chal: BunkUser, opnt: BunkUser, winner: BunkUser, loser: BunkUser):
self.challenger: BunkUser = chal
self.opponent: BunkUser = opnt
self.winner: BunkUser = winner
self.loser: BunkUser = loser
self.challenger_roll: int = 0
self.opponent_roll: int = 0
self.challenger.is_dueling = False
self.challenger.challenged_by_id = None
self.opponent.is_dueling = False
self.opponent.challenged_by_id = None
|
fugwenna/bunkbot
|
src/rpg/duel_result.py
|
Python
|
mit
| 623
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import cli_common.taskcluster
import uplift_backend.config
secrets = cli_common.taskcluster.get_secrets(
os.environ.get('TASKCLUSTER_SECRET'),
uplift_backend.config.PROJECT_NAME,
required=[],
existing={},
taskcluster_client_id=os.environ.get('TASKCLUSTER_CLIENT_ID'),
taskcluster_access_token=os.environ.get('TASKCLUSTER_ACCESS_TOKEN'),
)
|
lundjordan/services
|
src/uplift/backend/uplift_backend/secrets.py
|
Python
|
mpl-2.0
| 599
|
from models import User
# select user.id from user where (user.id > '1' and (user.name = 'jack' or user.email = 'abc@abc.com'))
User.where(
(User.id > 1) & ((User.name == 'jack') | (User.email == 'abc@abc.com'))
).select(User.id)
|
hit9/skylark
|
snippets/expressions_with_priority.py
|
Python
|
bsd-2-clause
| 235
|
#!/usr/bin/env python3
import json
import random
import redis
import time
from threading import Thread
# Replaces with your configuration information
redis_host = "0.0.0.0"
redis_port = 6379
redis_password = ""
def subscriber(name, channels):
"""Processes simulated event message from a Redis Channel"""
# Add code to create a Redis connection
# Add code to subscribe to the channels requested
done = False
while not done:
# Add code to get message for Redis
message = None
if message is not None and message['type'] == 'message':
print("Subscriber {} received a message: {} on channel {}".format(name, message['data'], message['channel']))
event = json.loads(message['data'])
if event['type'] == 'terminate':
done = True
time.sleep(0.001)
def run_subscribers():
"""Sets up a pubsub simulation environment with one publisher and 5 subscribers"""
# create listeners on
c1 = Thread(target=subscriber, args=('A', ['user_event:view', 'process:terminate']))
c2 = Thread(target=subscriber, args=('B', ['user_event:login', 'user_event:logout', 'process:terminate']))
c3 = Thread(target=subscriber, args=('C', ['user_event:login', 'process:terminate']))
c4 = Thread(target=subscriber, args=('D', ['user_event:logout', 'process:terminate']))
c5 = Thread(target=subscriber, args=('E', ['user_event:login', 'user_event:logout', 'user_event:view', 'process:terminate']))
c1.start()
c2.start()
c3.start()
c4.start()
c5.start()
if __name__ == '__main__':
run_subscribers()
|
healthbridgeltd/zava-cop
|
redis/subscribe.py
|
Python
|
unlicense
| 1,640
|
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Test analyze, training, and prediction.
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os
import pandas as pd
import shutil
import six
import sys
import tempfile
import unittest
from . import e2e_functions
from tensorflow.python.lib.io import file_io
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../..')))
import mltoolbox.regression.linear as reglinear # noqa: E402
import google.datalab.ml as dlml # noqa: E402
class TestLinearRegression(unittest.TestCase):
"""Test linear regression works e2e locally.
Note that there should be little need for testing the other scenarios (linear
classification, dnn regression, dnn classification) as they should only
differ at training time. The training coverage of task.py is already done in
test_sd_trainer.
"""
def __init__(self, *args, **kwargs):
super(TestLinearRegression, self).__init__(*args, **kwargs)
# Log everything
self._logger = logging.getLogger('TestStructuredDataLogger')
self._logger.setLevel(logging.DEBUG)
if not self._logger.handlers:
self._logger.addHandler(logging.StreamHandler(stream=sys.stdout))
def _make_test_files(self):
"""Builds test files and folders"""
# Make the output folders
self._test_dir = tempfile.mkdtemp()
self._preprocess_output = os.path.join(self._test_dir, 'preprocess')
self._train_output = os.path.join(self._test_dir, 'train')
self._batch_predict_output = os.path.join(self._test_dir, 'batch_predict')
# Don't make train_output folder as it should not exist at training time.
os.mkdir(self._preprocess_output)
os.mkdir(self._batch_predict_output)
# Make csv files
self._csv_train_filename = os.path.join(self._test_dir,
'train_csv_data.csv')
self._csv_eval_filename = os.path.join(self._test_dir,
'eval_csv_data.csv')
self._csv_predict_filename = os.path.join(self._test_dir,
'predict_csv_data.csv')
e2e_functions.make_csv_data(self._csv_train_filename, 100, 'regression',
True)
e2e_functions.make_csv_data(self._csv_eval_filename, 100, 'regression',
True)
self._predict_num_rows = 10
e2e_functions.make_csv_data(self._csv_predict_filename,
self._predict_num_rows, 'regression', False)
# Make schema file
self._schema_filename = os.path.join(self._test_dir, 'schema.json')
e2e_functions.make_preprocess_schema(self._schema_filename, 'regression')
# Make feature file
self._input_features_filename = os.path.join(self._test_dir,
'input_features_file.json')
transforms = {
"num1": {"transform": "scale"},
"num2": {"transform": "scale", "value": 4},
"str1": {"transform": "one_hot"},
"str2": {"transform": "embedding", "embedding_dim": 3},
"target": {"transform": "target"},
"key": {"transform": "key"},
}
file_io.write_string_to_file(
self._input_features_filename,
json.dumps(transforms, indent=2))
def _run_analyze(self):
reglinear.analyze(
output_dir=self._preprocess_output,
dataset=dlml.CsvDataSet(
file_pattern=self._csv_train_filename,
schema_file=self._schema_filename))
self.assertTrue(os.path.isfile(
os.path.join(self._preprocess_output, 'stats.json')))
self.assertTrue(os.path.isfile(
os.path.join(self._preprocess_output, 'vocab_str1.csv')))
def _run_train(self):
reglinear.train(
train_dataset=dlml.CsvDataSet(
file_pattern=self._csv_train_filename,
schema_file=self._schema_filename),
eval_dataset=dlml.CsvDataSet(
file_pattern=self._csv_eval_filename,
schema_file=self._schema_filename),
analysis_dir=self._preprocess_output,
output_dir=self._train_output,
features=self._input_features_filename,
max_steps=100,
train_batch_size=100)
self.assertTrue(os.path.isfile(
os.path.join(self._train_output, 'model', 'saved_model.pb')))
self.assertTrue(os.path.isfile(
os.path.join(self._train_output, 'evaluation_model', 'saved_model.pb')))
def _run_predict(self):
data = pd.read_csv(self._csv_predict_filename,
header=None)
df = reglinear.predict(data=data,
training_dir=self._train_output)
self.assertEqual(len(df.index), self._predict_num_rows)
self.assertEqual(list(df), ['key', 'predicted'])
def _run_batch_prediction(self, output_dir, use_target):
reglinear.batch_predict(
training_dir=self._train_output,
prediction_input_file=(self._csv_eval_filename if use_target
else self._csv_predict_filename),
output_dir=output_dir,
mode='evaluation' if use_target else 'prediction',
batch_size=4,
output_format='csv')
# check errors file is empty
errors = file_io.get_matching_files(os.path.join(output_dir, 'errors*'))
self.assertEqual(len(errors), 1)
self.assertEqual(os.path.getsize(errors[0]), 0)
# check predictions files are not empty
predictions = file_io.get_matching_files(os.path.join(output_dir,
'predictions*'))
self.assertGreater(os.path.getsize(predictions[0]), 0)
# check the schema is correct
schema_file = os.path.join(output_dir, 'csv_schema.json')
self.assertTrue(os.path.isfile(schema_file))
schema = json.loads(file_io.read_file_to_string(schema_file))
self.assertEqual(schema[0]['name'], 'key')
self.assertEqual(schema[1]['name'], 'predicted')
if use_target:
self.assertEqual(schema[2]['name'], 'target')
self.assertEqual(len(schema), 3)
else:
self.assertEqual(len(schema), 2)
def _cleanup(self):
shutil.rmtree(self._test_dir)
def test_e2e(self):
try:
self._make_test_files()
self._run_analyze()
self._run_train()
if six.PY2:
# Dataflow is only supported by python 2. Prediction assumes Dataflow
# is installed.
self._run_predict()
self._run_batch_prediction(
os.path.join(self._batch_predict_output, 'with_target'),
True)
self._run_batch_prediction(
os.path.join(self._batch_predict_output, 'without_target'),
False)
else:
print('only tested analyze in TestLinearRegression')
finally:
self._cleanup()
if __name__ == '__main__':
unittest.main()
|
craigcitro/pydatalab
|
solutionbox/structured_data/test_mltoolbox/test_datalab_e2e.py
|
Python
|
apache-2.0
| 7,533
|
from nsga2 import MultiObjectiveGA
""" added by JPQ """
from constnsga2 import ConstMultiObjectiveGA
# ---
|
fxsjy/pybrain
|
pybrain/optimization/populationbased/multiobjective/__init__.py
|
Python
|
bsd-3-clause
| 106
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.container_v1.proto import cluster_service_pb2 as google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ClusterManagerStub(object):
"""Google Container Engine Cluster Manager v1
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListClusters = channel.unary_unary(
'/google.container.v1.ClusterManager/ListClusters',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.FromString,
)
self.GetCluster = channel.unary_unary(
'/google.container.v1.ClusterManager/GetCluster',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Cluster.FromString,
)
self.CreateCluster = channel.unary_unary(
'/google.container.v1.ClusterManager/CreateCluster',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateCluster = channel.unary_unary(
'/google.container.v1.ClusterManager/UpdateCluster',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateNodePool = channel.unary_unary(
'/google.container.v1.ClusterManager/UpdateNodePool',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolAutoscaling = channel.unary_unary(
'/google.container.v1.ClusterManager/SetNodePoolAutoscaling',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLoggingService = channel.unary_unary(
'/google.container.v1.ClusterManager/SetLoggingService',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMonitoringService = channel.unary_unary(
'/google.container.v1.ClusterManager/SetMonitoringService',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetAddonsConfig = channel.unary_unary(
'/google.container.v1.ClusterManager/SetAddonsConfig',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLocations = channel.unary_unary(
'/google.container.v1.ClusterManager/SetLocations',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.UpdateMaster = channel.unary_unary(
'/google.container.v1.ClusterManager/UpdateMaster',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMasterAuth = channel.unary_unary(
'/google.container.v1.ClusterManager/SetMasterAuth',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteCluster = channel.unary_unary(
'/google.container.v1.ClusterManager/DeleteCluster',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/google.container.v1.ClusterManager/ListOperations',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.FromString,
)
self.GetOperation = channel.unary_unary(
'/google.container.v1.ClusterManager/GetOperation',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CancelOperation = channel.unary_unary(
'/google.container.v1.ClusterManager/CancelOperation',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetServerConfig = channel.unary_unary(
'/google.container.v1.ClusterManager/GetServerConfig',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ServerConfig.FromString,
)
self.ListNodePools = channel.unary_unary(
'/google.container.v1.ClusterManager/ListNodePools',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.FromString,
)
self.GetNodePool = channel.unary_unary(
'/google.container.v1.ClusterManager/GetNodePool',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.NodePool.FromString,
)
self.CreateNodePool = channel.unary_unary(
'/google.container.v1.ClusterManager/CreateNodePool',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.DeleteNodePool = channel.unary_unary(
'/google.container.v1.ClusterManager/DeleteNodePool',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.RollbackNodePoolUpgrade = channel.unary_unary(
'/google.container.v1.ClusterManager/RollbackNodePoolUpgrade',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolManagement = channel.unary_unary(
'/google.container.v1.ClusterManager/SetNodePoolManagement',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLabels = channel.unary_unary(
'/google.container.v1.ClusterManager/SetLabels',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetLegacyAbac = channel.unary_unary(
'/google.container.v1.ClusterManager/SetLegacyAbac',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.StartIPRotation = channel.unary_unary(
'/google.container.v1.ClusterManager/StartIPRotation',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.CompleteIPRotation = channel.unary_unary(
'/google.container.v1.ClusterManager/CompleteIPRotation',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNodePoolSize = channel.unary_unary(
'/google.container.v1.ClusterManager/SetNodePoolSize',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetNetworkPolicy = channel.unary_unary(
'/google.container.v1.ClusterManager/SetNetworkPolicy',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
self.SetMaintenancePolicy = channel.unary_unary(
'/google.container.v1.ClusterManager/SetMaintenancePolicy',
request_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.FromString,
)
class ClusterManagerServicer(object):
"""Google Container Engine Cluster Manager v1
"""
def ListClusters(self, request, context):
"""Lists all clusters owned by a project in either the specified zone or all
zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetCluster(self, request, context):
"""Gets the details of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateCluster(self, request, context):
"""Creates a cluster, consisting of the specified number and type of Google
Compute Engine instances.
By default, the cluster is created in the project's
[default network](/compute/docs/networks-and-firewalls#networks).
One firewall is added for the cluster. After cluster creation,
the cluster creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateCluster(self, request, context):
"""Updates the settings of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateNodePool(self, request, context):
"""Updates the version and/or image type of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetNodePoolAutoscaling(self, request, context):
"""Sets the autoscaling settings of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLoggingService(self, request, context):
"""Sets the logging service of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMonitoringService(self, request, context):
"""Sets the monitoring service of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetAddonsConfig(self, request, context):
"""Sets the addons of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLocations(self, request, context):
"""Sets the locations of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateMaster(self, request, context):
"""Updates the master of a specific cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMasterAuth(self, request, context):
"""Used to set master auth materials. Currently supports :-
Changing the admin password of a specific cluster.
This can be either via password generation or explicitly set the password.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteCluster(self, request, context):
"""Deletes the cluster, including the Kubernetes endpoint and all worker
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Lists all operations in a project in a specific zone or all zones.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOperation(self, request, context):
"""Gets the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Cancels the specified operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetServerConfig(self, request, context):
"""Returns configuration info about the Container Engine service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListNodePools(self, request, context):
"""Lists the node pools for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNodePool(self, request, context):
"""Retrieves the node pool requested.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateNodePool(self, request, context):
"""Creates a node pool for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteNodePool(self, request, context):
"""Deletes a node pool from a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RollbackNodePoolUpgrade(self, request, context):
"""Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetNodePoolManagement(self, request, context):
"""Sets the NodeManagement options for a node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLabels(self, request, context):
"""Sets labels on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLegacyAbac(self, request, context):
"""Enables or disables the ABAC authorization mechanism on a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartIPRotation(self, request, context):
"""Start master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CompleteIPRotation(self, request, context):
"""Completes master IP rotation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetNodePoolSize(self, request, context):
"""Sets the size of a specific node pool.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetNetworkPolicy(self, request, context):
"""Enables/Disables Network Policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMaintenancePolicy(self, request, context):
"""Sets the maintenance policy for a cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ClusterManagerServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListClusters': grpc.unary_unary_rpc_method_handler(
servicer.ListClusters,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListClustersResponse.SerializeToString,
),
'GetCluster': grpc.unary_unary_rpc_method_handler(
servicer.GetCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Cluster.SerializeToString,
),
'CreateCluster': grpc.unary_unary_rpc_method_handler(
servicer.CreateCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'UpdateCluster': grpc.unary_unary_rpc_method_handler(
servicer.UpdateCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'UpdateNodePool': grpc.unary_unary_rpc_method_handler(
servicer.UpdateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetNodePoolAutoscaling': grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolAutoscaling,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolAutoscalingRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetLoggingService': grpc.unary_unary_rpc_method_handler(
servicer.SetLoggingService,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLoggingServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetMonitoringService': grpc.unary_unary_rpc_method_handler(
servicer.SetMonitoringService,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMonitoringServiceRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetAddonsConfig': grpc.unary_unary_rpc_method_handler(
servicer.SetAddonsConfig,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetAddonsConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetLocations': grpc.unary_unary_rpc_method_handler(
servicer.SetLocations,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLocationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'UpdateMaster': grpc.unary_unary_rpc_method_handler(
servicer.UpdateMaster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.UpdateMasterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetMasterAuth': grpc.unary_unary_rpc_method_handler(
servicer.SetMasterAuth,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMasterAuthRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'DeleteCluster': grpc.unary_unary_rpc_method_handler(
servicer.DeleteCluster,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteClusterRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListOperationsResponse.SerializeToString,
),
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetOperationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetServerConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetServerConfig,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetServerConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ServerConfig.SerializeToString,
),
'ListNodePools': grpc.unary_unary_rpc_method_handler(
servicer.ListNodePools,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.ListNodePoolsResponse.SerializeToString,
),
'GetNodePool': grpc.unary_unary_rpc_method_handler(
servicer.GetNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.GetNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.NodePool.SerializeToString,
),
'CreateNodePool': grpc.unary_unary_rpc_method_handler(
servicer.CreateNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CreateNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'DeleteNodePool': grpc.unary_unary_rpc_method_handler(
servicer.DeleteNodePool,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.DeleteNodePoolRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'RollbackNodePoolUpgrade': grpc.unary_unary_rpc_method_handler(
servicer.RollbackNodePoolUpgrade,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.RollbackNodePoolUpgradeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetNodePoolManagement': grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolManagement,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolManagementRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetLabels': grpc.unary_unary_rpc_method_handler(
servicer.SetLabels,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLabelsRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetLegacyAbac': grpc.unary_unary_rpc_method_handler(
servicer.SetLegacyAbac,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetLegacyAbacRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'StartIPRotation': grpc.unary_unary_rpc_method_handler(
servicer.StartIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.StartIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'CompleteIPRotation': grpc.unary_unary_rpc_method_handler(
servicer.CompleteIPRotation,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.CompleteIPRotationRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetNodePoolSize': grpc.unary_unary_rpc_method_handler(
servicer.SetNodePoolSize,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNodePoolSizeRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetNetworkPolicy': grpc.unary_unary_rpc_method_handler(
servicer.SetNetworkPolicy,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetNetworkPolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
'SetMaintenancePolicy': grpc.unary_unary_rpc_method_handler(
servicer.SetMaintenancePolicy,
request_deserializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.SetMaintenancePolicyRequest.FromString,
response_serializer=google_dot_cloud_dot_container__v1_dot_proto_dot_cluster__service__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.container.v1.ClusterManager', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
tseaver/gcloud-python
|
container/google/cloud/container_v1/proto/cluster_service_pb2_grpc.py
|
Python
|
apache-2.0
| 32,660
|
#!/usr/bin/env python
"""
Small and dumb HTTP server for use in tests.
"""
from optparse import OptionParser
import BaseHTTPServer, SimpleHTTPServer, signal, sys
from mercurial import cmdutil
class simplehttpservice(object):
def __init__(self, host, port):
self.address = (host, port)
def init(self):
self.httpd = BaseHTTPServer.HTTPServer(
self.address, SimpleHTTPServer.SimpleHTTPRequestHandler)
def run(self):
self.httpd.serve_forever()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-p', '--port', dest='port', type='int', default=8000,
help='TCP port to listen on', metavar='PORT')
parser.add_option('-H', '--host', dest='host', default='localhost',
help='hostname or IP to listen on', metavar='HOST')
parser.add_option('--pid', dest='pid',
help='file name where the PID of the server is stored')
parser.add_option('-f', '--foreground', dest='foreground',
action='store_true',
help='do not start the HTTP server in the background')
parser.add_option('--daemon-pipefds')
(options, args) = parser.parse_args()
signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
if options.foreground and options.pid:
parser.error("options --pid and --foreground are mutually exclusive")
opts = {'pid_file': options.pid,
'daemon': not options.foreground,
'daemon_pipefds': options.daemon_pipefds}
service = simplehttpservice(options.host, options.port)
cmdutil.service(opts, initfn=service.init, runfn=service.run,
runargs=[sys.executable, __file__] + sys.argv[1:])
|
hekra01/mercurial
|
tests/dumbhttp.py
|
Python
|
gpl-2.0
| 1,676
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Extract and analyze the mobility Mesos using Apache Spark.
import sys, os
import numpy as np
from xoxo.bsmap import BaseStationMap
from xoxo.permov import movement_reader
from xoxo.utils import radius_of_gyration
__author__ = 'Xiaming Chen'
__email__ = 'chen@xiaming.me'
def format_time(ts):
return ts.strftime("%m%d")
def daily_rg(movdata, bsmap, output):
""" R_g for one day
"""
bsmap = BaseStationMap(bsmap)
res = {}
for person in movement_reader(open(movdata, 'rb'), bsmap):
uid = person.id
tdate = person.dtstart.strftime("%m%d")
rg = person.radius_of_gyration()
if tdate not in res:
res[tdate] = []
res[tdate].append((uid, rg))
for tdate in res:
try:
os.mkdir(output)
except:
pass
ofile = open(os.path.join(output, tdate), 'wb')
[ofile.write('%d,%.4f\n' % (i[0],i[1])) for i in sorted(res[tdate], key=lambda x: x[0])]
ofile.close()
def accu_rg(movdata, bsmap, output):
""" Accumulative R_g over multiple days
"""
bsmap = BaseStationMap(bsmap)
dates = {'0820': 0, '0821': 1, '0822': 2, '0823': 3, '0824': 4, '0825': 5, '0826': 6}
res = {}
coords = {}
for person in movement_reader(open(movdata, 'rb'), bsmap):
uid = person.id
tdate = person.which_day()
if tdate not in dates:
continue
if uid not in coords:
coords[uid] = person.coordinates
else:
coords[uid].extend(person.coordinates)
if uid not in res:
res[uid] = np.empty(7)
res[uid].fill(-1)
res[uid][dates[tdate]] = radius_of_gyration(coords[uid])
res2 = []
for uid in res:
v = res[uid]
v2 = []
for n in v:
if n == -1:
try:
v2.append(v2[-1])
except:
v2.append(0)
else:
v2.append(n)
res2.append((uid, v2))
res2 = sorted(res2, key=lambda x: x[0])
ofile = open(output, 'wb')
[ofile.write('%d,%s\n' % (i[0], ','.join(['%.4f' % j for j in i[1]]))) for i in res2]
ofile.close()
def accu_dt(movdata, bsmap, output, log=True):
""" Distribution of dwelling time for each person
"""
bsmap = BaseStationMap(bsmap)
res = {}
for person in movement_reader(open(movdata, 'rb'), bsmap):
uid = person.id
dt = person.accdwelling.values()
if uid not in res:
res[uid] = dt
else:
res[uid].extend(dt)
ofile = open(output, 'wb')
if log is True:
bins = np.logspace(-2,2,50)
else:
bins = np.arange(0,24.5,0.5)
if log is True:
ofile.write('#bins np.logspace(-2,2,50)\n')
else:
ofile.write('#bins np.arange(0,24.5,0.5)\n')
for uid in res:
hist = np.histogram(np.array(res[uid])/3600, bins=bins)[0]
ofile.write('%d,%s\n' % (uid, ','.join([str(h) for h in hist])))
ofile.close()
def loc_dt(movdata, bsmap, output, log=True):
""" Distribution of dwelling time for each person, each location
"""
bsmap = BaseStationMap(bsmap)
res = {}
for person in movement_reader(open(movdata, 'rb'), bsmap):
uid = person.id
dt = person.accdwelling
if uid not in res:
res[uid] = {}
for k, v in dt.items():
if k not in res[uid]:
res[uid][k] = []
res[uid][k].append(v)
ofile = open(output, 'wb')
if log is True:
bins = np.logspace(-2,2,50)
else:
bins = np.arange(0,24.5,0.5)
if log is True:
ofile.write('#bins np.logspace(-2,2,50)\n')
else:
ofile.write('#bins np.arange(0,24.5,0.5)\n')
for uid in res:
vs = [np.average(v) for k, v in res[uid].items()]
hist = np.histogram(np.array(vs)/3600, bins=bins)[0]
ofile.write('%d,%s\n' % (uid, ','.join([str(h) for h in hist])))
ofile.close()
def loc_dt_all(movdata, bsmap, output):
""" All raw dwelling times for each person, each location
"""
bsmap = BaseStationMap(bsmap)
res = {}
for person in movement_reader(open(movdata, 'rb'), bsmap):
uid = person.id
dt = person.accdwelling
if uid not in res:
res[uid] = {}
for k, v in dt.items():
if k not in res[uid]:
res[uid][k] = []
res[uid][k].append(v)
ofile = open(output, 'wb')
for uid in res:
vs = sorted([np.average(v)/3600 for k, v in res[uid].items()], reverse=True)
ofile.write('%d,%s\n' % (uid, ','.join(['%.3f' % v for v in vs])))
ofile.close()
def mobgraph_degree(movdata, bsmap, output):
""" Node degree of mobility graphs
"""
nloc = []
ndgr = []
bsmap = BaseStationMap(bsmap)
for person in movement_reader(open(movdata, 'rb'), bsmap):
if person.distinct_loc_num() < 2:
continue
graph = person.convert2graph()
ndgr.append(np.mean(graph.degree().values()))
nloc.append(person.distinct_loc_num())
ofile = open(output, 'wb')
ofile.write('nloc,ndgr\n')
ofile.write('\n'.join( ['%d,%.3f' % (x,y) for x, y in zip(nloc, ndgr)]))
def main():
if len(sys.argv) < 4:
print >> sys.stderr, "Usage: mesos <movdata> <bsmap> <output>"
exit(-1)
movdata = sys.argv[1]
bsmap = sys.argv[2]
output = sys.argv[3]
# daily_rg(movdata, bsmap, output)
# accu_rg(movdata, bsmap, output)
# accu_dt(movdata, bsmap, output)
# loc_dt(movdata, bsmap, output)
# loc_dw_all(movdata, bsmap, output)
mobgraph_degree(movdata, bsmap, output)
if __name__ == '__main__':
main()
|
caesar0301/paper-flowmap-code
|
src/046.rg_dt.py
|
Python
|
gpl-3.0
| 5,812
|
from collections import *
from config import main
import heapq
class UserPreference:
def __init__(self):
self.results = []
self.list1 = []
self.list2 = []
self.list3 = []
self.list4 = []
self.categories = []
self.sold_average = []
self.bought_average = []
def get_preferences(self, user):
# Reset all variable
self.results = []
self.list1 = []
self.list2 = []
self.list3 = []
self.list4 = []
self.categories = []
self.sold_average = []
self.bought_average = []
self.frequency_based(user+'.csv')
return self.results
def frequency_based(self, user):
fp = open(main.path+'data/user/'+user, "r")
lines = fp.readlines()
for i in range(len(lines)):
lines[i] = lines[i].strip()
for i in range(1,len(lines)):
self.list1 = lines[i].split(",")
self.list2.append(self.list1)
self.list3.append(self.list1[3])
d = defaultdict(int)
for i in self.list3:
d[i] += 1
result = max(iter(d.items()), key=lambda x: x[1])
self.results.append(result[0])
self.deviation_based(result[0])
# STANDARD DEVIATION APPROACH
def deviation_based(self,freq_cat):
for i in range(0,len(self.list2)):
self.categories.append(self.list2[i][3])
self.categories = list(set(self.categories))
i = 0
for item in self.list2:
self.list4.append(self.categories.index(item[3]))
self.sold_average = [0]*len(self.categories)
self.bought_average = [0]*len(self.categories)
s_average = []
b_average = []
s=[0]*len(self.categories)
b=[0]*len(self.categories)
for item in self.list2:
cat = item[3]
ind = self.categories.index(cat)
if item[4] == 'sold':
self.sold_average[ind]+= int(float(item[5]))
else:
self.bought_average[ind]+= int(float(item[5]))
for x in self.list4:
if self.list2[i][3] == self.categories[x]:
if self.list2[i][4] == 'sold':
s[x]+=1
if self.list2[i][4] == 'bought':
b[x]+=1
i+=1
for i in range(len(self.categories)):
if s[i]!=0:
s_average.append(self.sold_average[i]/s[i])
else:
s_average.append(0)
for i in range(len(self.categories)):
if b[i]!=0:
b_average.append(self.bought_average[i]/b[i])
else:
b_average.append(0)
deviation = []
for i in range(len(self.categories)):
deviation.append(s_average[i]-b_average[i])
max_category = max(deviation)
max2_category = heapq.nlargest(2, deviation)
if max_category == freq_cat:
self.results.append(self.categories[deviation.index(max_category)])
else:
self.results.append(self.categories[deviation.index(max2_category[1])])
|
adarshdec23/Market
|
core/preference/main.py
|
Python
|
apache-2.0
| 3,319
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# product_prices_on_variant for OpenERP #
# Copyright (C) 2011 Akretion Benoît GUILLOT <benoit.guillot@akretion.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp.osv.orm import Model
from openerp.osv import fields
import decimal_precision as dp
class product_product(Model):
_inherit = "product.product"
_columns = {
'list_price': fields.float('Sale Price',
digits_compute=dp.get_precision('Sale Price'),
help="Base price for computing the customer price. "
"Sometimes called the catalog price."),
'standard_price': fields.float('Cost Price', required=True,
digits_compute=dp.get_precision('Purchase Price'),
help="Product's cost for accounting stock valuation. "
"It is the base price for the supplier price."),
}
_defaults = {
'list_price': lambda *a: 1,
'standard_price': lambda *a: 1,
}
|
cgstudiomap/cgstudiomap
|
main/parts/product-attribute/product_prices_on_variant/product.py
|
Python
|
agpl-3.0
| 2,481
|
import json
from c2corg_api.models.user import User
from c2corg_api.models.user_profile import UserProfile, ArchiveUserProfile, \
USERPROFILE_TYPE
from c2corg_api.scripts.es.sync import sync_es
from c2corg_api.search import elasticsearch_config
from c2corg_api.search.mappings.user_mapping import SearchUser
from c2corg_api.tests.search import reset_search_index
from c2corg_api.models.common.attributes import quality_types
from shapely.geometry import shape, Point
from c2corg_api.models.document import (
ArchiveDocumentLocale, DocumentLocale)
from c2corg_api.views.document import DocumentRest
from c2corg_api.tests.views import BaseDocumentTestRest
class TestUserProfileRest(BaseDocumentTestRest):
def setUp(self): # noqa
self.set_prefix_and_model(
'/profiles', USERPROFILE_TYPE, UserProfile, ArchiveUserProfile,
ArchiveDocumentLocale)
BaseDocumentTestRest.setUp(self)
self._add_test_data()
def test_get_collection_unauthenticated(self):
self.app.get(self._prefix, status=403)
def test_get_collection(self):
body = self.get_collection(user='contributor')
doc = body['documents'][0]
self.assertIn('areas', doc)
self.assertIn('name', doc)
self.assertNotIn('username', doc)
self.assertNotIn('geometry', doc)
def test_get_collection_paginated(self):
self.assertResultsEqual(
self.get_collection(
{'offset': 0, 'limit': 0}, user='contributor'),
[], 7)
self.assertResultsEqual(
self.get_collection(
{'offset': 0, 'limit': 1}, user='contributor'),
[self.profile4.document_id], 7)
self.assertResultsEqual(
self.get_collection(
{'offset': 0, 'limit': 2}, user='contributor'),
[self.profile4.document_id, self.profile2.document_id], 7)
self.assertResultsEqual(
self.get_collection(
{'offset': 1, 'limit': 3}, user='contributor'),
[self.profile2.document_id, self.global_userids['contributor3'],
self.global_userids['contributor2']], 7)
def test_get_collection_lang(self):
self.get_collection_lang(user='contributor')
def test_get_collection_search(self):
reset_search_index(self.session)
self.assertResultsEqual(
self.get_collection_search({'l': 'en'}, user='contributor'),
[self.profile4.document_id, self.global_userids['contributor3'],
self.global_userids['contributor2'], self.profile1.document_id,
self.global_userids['moderator'], self.global_userids['robot']],
6)
def test_get_unauthenticated_private_profile(self):
"""Tests that only the user name is returned when requesting a private
profile unauthenticated.
"""
response = self.app.get(self._prefix + '/' +
str(self.profile1.document_id),
status=200)
body = response.json
self.assertEqual(body.get('not_authorized'), True)
self.assertNotIn('username', body)
self.assertIn('name', body)
self.assertNotIn('locales', body)
self.assertNotIn('geometry', body)
def test_get_unauthenticated_public_profile(self):
"""Tests that the full profile is returned when requesting a public
profile when unauthenticated.
"""
contributor = self.profile1.user
contributor.is_profile_public = True
self.session.flush()
body = self.get(self.profile1, check_title=False)
self.assertNotIn('username', body)
self.assertIn('name', body)
self.assertIn('locales', body)
self.assertIn('geometry', body)
def test_get(self):
body = self.get(self.profile1, user='contributor', check_title=False)
self._assert_geometry(body)
self.assertIsNone(body['locales'][0].get('title'))
self.assertNotIn('maps', body)
self.assertNotIn('username', body)
self.assertIn('name', body)
self.assertIn('forum_username', body)
def test_get_unconfirmed_user(self):
headers = self.add_authorization_header(username='contributor')
self.app.get(self._prefix + '/' + str(self.profile3.document_id),
headers=headers, status=404)
def test_get_cooked(self):
self.get_cooked(self.profile1, user='contributor')
def test_get_cooked_with_defaulting(self):
self.get_cooked_with_defaulting(self.profile1, user='contributor')
def test_get_lang(self):
self.get_lang(self.profile1, user='contributor')
def test_get_new_lang(self):
self.get_new_lang(self.profile1, user='contributor')
def test_get_404(self):
self.get_404(user='contributor')
def test_get_caching(self):
self.get_caching(self.profile1, user='contributor')
def test_get_info(self):
body, locale = self.get_info(self.profile1, 'en')
self.assertEqual(locale.get('lang'), 'en')
self.assertEqual(locale.get('title'), 'Contributor')
def test_no_post(self):
# can not create new profiles
self.app.post_json(
self._prefix, {}, expect_errors=True, status=404)
def test_put_wrong_user(self):
"""Test that a normal user can only edit its own profile.
"""
body = {
'message': 'Update',
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': self.locale_en.version}
],
'geometry': {
'version': self.profile1.geometry.version,
'geom': '{"type": "Point", "coordinates": [635957, 5723605]}' # noqa
}
}
}
headers = self.add_authorization_header(username='contributor2')
self.app_put_json(
self._prefix + '/' + str(self.profile1.document_id), body,
headers=headers, status=403)
def test_put_wrong_document_id(self):
body = {
'document': {
'document_id': '9999999',
'version': self.profile1.version,
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': self.locale_en.version}
]
}
}
self.put_wrong_document_id(body, user='moderator')
def test_put_wrong_document_version(self):
body = {
'document': {
'document_id': self.profile1.document_id,
'version': -9999,
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': self.locale_en.version}
]
}
}
self.put_wrong_version(
body, self.profile1.document_id, user='moderator')
def test_put_wrong_locale_version(self):
body = {
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': -9999}
]
}
}
self.put_wrong_version(
body, self.profile1.document_id, user='moderator')
def test_put_wrong_ids(self):
body = {
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': self.locale_en.version}
]
}
}
self.put_wrong_ids(
body, self.profile1.document_id, user='moderator')
def test_put_no_document(self):
self.put_put_no_document(
self.profile1.document_id, user='moderator')
def test_put_success_all(self):
body = {
'message': 'Update',
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'quality': quality_types[1],
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': self.locale_en.version}
],
'geometry': {
'version': self.profile1.geometry.version,
'geom': '{"type": "Point", "coordinates": [635957, 5723605]}' # noqa
}
}
}
(body, profile) = self.put_success_all(
body, self.profile1, user='moderator', check_es=False,
cache_version=3)
# version with lang 'en'
version_en = profile.versions[2]
# geometry has been changed
archive_geometry_en = version_en.document_geometry_archive
self.assertEqual(archive_geometry_en.version, 2)
self._check_es_index()
def test_put_success_figures_only(self):
body = {
'message': 'Changing figures',
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'quality': quality_types[1],
'categories': ['mountain_guide'],
'locales': [
{'lang': 'en', 'description': 'Me',
'version': self.locale_en.version}
]
}
}
(body, profile) = self.put_success_figures_only(
body, self.profile1, user='moderator', check_es=False)
self.assertEqual(profile.categories, ['mountain_guide'])
self._check_es_index()
def test_put_success_lang_only(self):
body = {
'message': 'Changing lang',
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'quality': quality_types[1],
'categories': ['amateur'],
'locales': [
{'lang': 'en', 'description': 'Me!',
'version': self.locale_en.version}
]
}
}
(body, profile) = self.put_success_lang_only(
body, self.profile1, user='moderator', check_es=False)
self.assertEqual(
profile.get_locale('en').description, 'Me!')
self._check_es_index()
def test_put_reset_title(self):
"""Tests that the title can not be set.
"""
body = {
'message': 'Changing lang',
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'quality': quality_types[1],
'categories': ['amateur'],
'locales': [
{'lang': 'en', 'title': 'Should not be set',
'description': 'Me!',
'version': self.locale_en.version}
]
}
}
(body, profile) = self.put_success_lang_only(
body, self.profile1, user='moderator', check_es=False)
self.assertEqual(
profile.get_locale('en').description, 'Me!')
self.session.refresh(self.locale_en)
self.assertEqual(self.locale_en.title, '')
# check that the the user names are added to the search index
self._check_es_index()
def test_put_success_new_lang(self):
"""Test updating a document by adding a new locale.
"""
body = {
'message': 'Adding lang',
'document': {
'document_id': self.profile1.document_id,
'version': self.profile1.version,
'quality': quality_types[1],
'categories': ['amateur'],
'locales': [
{'lang': 'es', 'description': 'Yo'}
]
}
}
(body, profile) = self.put_success_new_lang(
body, self.profile1, user='moderator', check_es=False)
self.assertEqual(profile.get_locale('es').description, 'Yo')
search_doc = self._check_es_index()
self.assertEqual(
search_doc['title_es'], 'Contributor contributor')
def _check_es_index(self):
sync_es(self.session)
search_doc = SearchUser.get(
id=self.profile1.document_id,
index=elasticsearch_config['index'])
self.assertEqual(search_doc['doc_type'], self.profile1.type)
self.assertEqual(
search_doc['title_en'], 'Contributor contributor')
self.assertEqual(
search_doc['title_fr'], 'Contributor contributor')
return search_doc
def _assert_geometry(self, body):
self.assertIsNotNone(body.get('geometry'))
geometry = body.get('geometry')
self.assertIsNotNone(geometry.get('version'))
self.assertIsNotNone(geometry.get('geom'))
geom = geometry.get('geom')
point = shape(json.loads(geom))
self.assertIsInstance(point, Point)
def _add_test_data(self):
user_id = self.global_userids['contributor']
self.profile1 = self.session.query(UserProfile).get(user_id)
self.locale_en = self.profile1.get_locale('en')
self.locale_fr = self.profile1.get_locale('fr')
DocumentRest.create_new_version(self.profile1, user_id)
self.profile2 = UserProfile(categories=['amateur'])
self.session.add(self.profile2)
self.profile3 = UserProfile(categories=['amateur'])
self.session.add(self.profile3)
self.profile4 = UserProfile(categories=['amateur'])
self.profile4.locales.append(DocumentLocale(
lang='en', description='You', title=''))
self.profile4.locales.append(DocumentLocale(
lang='fr', description='Toi', title=''))
self.session.add(self.profile4)
self.session.flush()
# create users for the profiles
self.user2 = User(
name='user2', username='user2', email='user2@c2c.org',
forum_username='user2', password='pass',
email_validated=True, profile=self.profile2)
self.user3 = User(
name='user3', username='user3', email='user3@c2c.org',
forum_username='user3', password='pass',
email_validated=False, profile=self.profile3)
self.user4 = User(
name='user4', username='user4', email='user4@c2c.org',
forum_username='user4', password='pass',
email_validated=True, profile=self.profile4)
self.session.add_all([self.user2, self.user3, self.user4])
self.session.flush()
|
c2corg/v6_api
|
c2corg_api/tests/views/test_user_profile.py
|
Python
|
agpl-3.0
| 15,302
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import json
# For old users python-crypto was not mandatory, don't break their setup
try:
from Crypto.Cipher import AES
except ImportError:
AES = None
from shinken.log import logger
from shinken.http_client import HTTPClient, HTTPException
class Stats(object):
def __init__(self):
self.name = ''
self.type = ''
self.app = None
self.stats = {}
self.api_key = ''
self.secret = ''
self.cyph = None
self.con = HTTPClient(uri='http://metrology')
def launch_reaper_thread(self):
self.reaper_thread = threading.Thread(None, target=self.reaper, name='stats-reaper')
self.reaper_thread.daemon = True
self.reaper_thread.start()
def register(self, app, name, _type, api_key='', secret=''):
self.app = app
self.name = name
self.type = _type
self.api_key = api_key
self.secret = secret
# Assumea 16 len secret, but should be alreayd ok
self.secret += '\0' * (-len(self.secret) % 16)
if AES is not None and self.secret != '':
self.cyph = AES.new(self.secret, AES.MODE_ECB)
# Will increment a stat key, if None, start at 0
def incr(self, k, v):
_min, _max, nb, _sum = self.stats.get(k, (None, None, 0, 0))
nb += 1
_sum += v
if _min is None or v < _min:
_min = v
if _max is None or v > _max:
_max = v
self.stats[k] = (_min, _max, nb, _sum)
def reaper(self):
while True:
now = int(time.time())
logger.debug('REAPER loop')
stats = self.stats
self.stats = {}
if len(stats) != 0:
s = ', '.join(['%s:%s' % (k,v) for (k,v) in stats.iteritems()])
logger.debug("REAPER: %s ", s)
# If we are not in an initializer daemon we skip, we cannot have a real name, it sucks
# to find the data after this
if not self.name:
time.sleep(10)
continue
logger.debug('REAPER we got a name')
metrics = []
for (k,e) in stats.iteritems():
nk = '%s.%s.%s' % (self.type, self.name, k)
logger.debug('REAP %s:%s', nk, e)
_min, _max, nb, _sum = e
_avg = float(_sum) / nb
# nb can't be 0 here and _min_max can't be None too
s = '%s.avg %f %d' % (nk, _avg, now)
metrics.append(s)
s = '%s.min %f %d' % (nk, _min, now)
metrics.append(s)
s = '%s.max %f %d' % (nk, _max, now)
metrics.append(s)
s = '%s.count %f %d' % (nk, nb, now)
metrics.append(s)
logger.debug('REAPER metrics to send %s (%d)', metrics, len(str(metrics)) )
# get the inner data for the daemon
struct = self.app.get_stats_struct()
struct['metrics'].extend(metrics)
logger.debug('REAPER whole struct %s', struct)
j = json.dumps(struct)
if self.cyph is not None:
logger.debug('PUT to /api/v1/put/ with %s %s', self.api_key, self.secret)
# assume a %16 length messagexs
j += '\0' * (-len(j) % 16)
encrypted_text = self.cyph.encrypt(j)
try:
self.con.put('/api/v1/put/', encrypted_text)
except HTTPException, exp:
logger.debug('REAPER cannot put to the metric server %s', exp)
time.sleep(10)
statsmgr = Stats()
|
h4wkmoon/shinken
|
shinken/stats.py
|
Python
|
agpl-3.0
| 4,624
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-01 01:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_v2', '0006_remove_event_is_valid'),
]
operations = [
migrations.AlterField(
model_name='trial',
name='percentage_all',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - all', max_digits=3, null=True, verbose_name='P'),
),
migrations.AlterField(
model_name='trial',
name='percentage_blue',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - blue', max_digits=3, null=True, verbose_name='PB'),
),
migrations.AlterField(
model_name='trial',
name='percentage_red',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - red', max_digits=3, null=True, verbose_name='PR'),
),
migrations.AlterField(
model_name='trial',
name='percentage_white',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Percentage Coefficient - white', max_digits=3, null=True, verbose_name='PW'),
),
migrations.AlterField(
model_name='trial',
name='regularity',
field=models.PositiveSmallIntegerField(help_text='Click every X seconds', verbose_name='Regularity'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_all',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - all', max_digits=3, null=True, verbose_name='TM'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_blue',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - blue', max_digits=3, null=True, verbose_name='TMB'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_red',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - red', max_digits=3, null=True, verbose_name='TMR'),
),
migrations.AlterField(
model_name='trial',
name='time_mean_white',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Mean - white', max_digits=3, null=True, verbose_name='TMW'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_all',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - all', max_digits=3, null=True, verbose_name='TSD'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_blue',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - blue', max_digits=3, null=True, verbose_name='TSDB'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_red',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - red', max_digits=3, null=True, verbose_name='TSDR'),
),
migrations.AlterField(
model_name='trial',
name='time_stdev_white',
field=models.DecimalField(blank=True, decimal_places=2, help_text='Time Coefficient Standard Deviation - white', max_digits=3, null=True, verbose_name='TSDW'),
),
migrations.AlterField(
model_name='trial',
name='timeout',
field=models.DecimalField(decimal_places=2, help_text='Seconds per color', max_digits=3, verbose_name='Timeout'),
),
]
|
AstroMatt/esa-time-perception
|
backend/api_v2/migrations/0007_auto_20170101_0101.py
|
Python
|
mit
| 3,986
|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
from atomic_reactor.constants import PLUGIN_KOJI_TAG_BUILD_KEY
from atomic_reactor.koji_util import create_koji_session, tag_koji_build
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.plugins.exit_koji_import import KojiImportPlugin
from atomic_reactor.plugins.exit_koji_promote import KojiPromotePlugin
class KojiTagBuildPlugin(ExitPlugin):
"""
Tag build in koji
Authentication is with Kerberos unless the koji_ssl_certs
configuration parameter is given, in which case it should be a
path at which 'cert', 'ca', and 'serverca' are the certificates
for SSL authentication.
If Kerberos is used for authentication, the default principal will
be used (from the kernel keyring) unless both koji_keytab and
koji_principal are specified. The koji_keytab parameter is a
keytab name like 'type:name', and so can be used to specify a key
in a Kubernetes secret by specifying 'FILE:/path/to/key'.
"""
key = PLUGIN_KOJI_TAG_BUILD_KEY
is_allowed_to_fail = False
def __init__(self, tasker, workflow, kojihub, target,
koji_ssl_certs=None, koji_proxy_user=None,
koji_principal=None, koji_keytab=None,
poll_interval=5):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param kojihub: string, koji hub (xmlrpc)
:param target: str, koji target
:param koji_ssl_certs: str, path to 'cert', 'ca', 'serverca'
:param koji_proxy_user: str, user to log in as (requires hub config)
:param koji_principal: str, Kerberos principal (must specify keytab)
:param koji_keytab: str, keytab name (must specify principal)
:param poll_interval: int, seconds between Koji task status requests
"""
super(KojiTagBuildPlugin, self).__init__(tasker, workflow)
if bool(koji_principal) != bool(koji_keytab):
raise RuntimeError('specify both koji_principal and koji_keytab '
'or neither')
self.kojihub = kojihub
self.koji_auth = {
"proxyuser": koji_proxy_user,
"ssl_certs_dir": koji_ssl_certs,
# krbV python library throws an error if these are unicode
"krb_principal": str(koji_principal),
"krb_keytab": str(koji_keytab)
}
self.target = target
self.poll_interval = poll_interval
def run(self):
"""
Run the plugin.
"""
if self.workflow.build_process_failed:
self.log.info('Build failed, skipping koji tagging')
return
build_id = self.workflow.exit_results.get(KojiImportPlugin.key)
if not build_id:
build_id = self.workflow.exit_results.get(KojiPromotePlugin.key)
if not build_id:
self.log.info('No koji build from %s or %s', KojiImportPlugin.key,
KojiPromotePlugin.key)
return
session = create_koji_session(self.kojihub, self.koji_auth)
build_tag = tag_koji_build(session, build_id, self.target,
poll_interval=self.poll_interval)
return build_tag
|
jarodwilson/atomic-reactor
|
atomic_reactor/plugins/exit_koji_tag_build.py
|
Python
|
bsd-3-clause
| 3,493
|
'''
Created on 08.02.2016.
@author: Lazar
'''
from textx.exceptions import TextXSemanticError
def data_show_processor(data_show_object):
data_show_kwd = ['table', 'list', 'thumbnail']
for type in data_show_kwd:
if type is data_show_object.type.name:
return True
else:
raise TextXSemanticError("'{0}' is not avlaible. Correct keywords are 'table', 'list' or 'thumbnail'".format(data_show_object.type))
class DataShow(object):
'''
classdocs
'''
def __init__(self, type, parent, data):
self.data = []
for s in data:
selector = s.so if s.so else s.sol
self.data.append(selector)
self.type = type
self.parent = parent
def accept(self, visitor):
return visitor.visit_other_selector(self.type.name, data=self.data)
|
theshammy/GenAn
|
src/concepts/data_show.py
|
Python
|
mit
| 839
|
import cacher
import checker
import logger
import mailer
import updater
|
christianrenier/dynamic-dns-updater
|
utils/__init__.py
|
Python
|
mit
| 71
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from oai.models import *
from djcelery.models import TaskMeta
class OaiErrorInline(admin.TabularInline):
model = OaiError
extra = 0
class OaiSourceAdmin(admin.ModelAdmin):
inlines = [OaiErrorInline]
class TaskMetaAdmin(admin.ModelAdmin):
readonly_fields = ('result',)
admin.site.register(TaskMeta, TaskMetaAdmin)
admin.site.register(OaiSource, OaiSourceAdmin)
admin.site.register(OaiRecord,OaiRecordAdmin)
admin.site.register(OaiSet)
admin.site.register(OaiFormat)
admin.site.register(ResumptionToken)
|
dcosentino/edx-platform
|
lms/djangoapps/oai/admin.py
|
Python
|
agpl-3.0
| 630
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from graph.views import WebsiteListView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'news_graph.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'graph.views.graph', name='graph'),
url(r'^website-form/', 'graph.views.website_form', name='website-form'),
url(r'^website', WebsiteListView.as_view(), name='website-list'),
)
|
Soaring-Outliers/news_graph
|
news_graph/urls.py
|
Python
|
mit
| 522
|
from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
TableToPoints1 = TableToPoints()
TableToPoints1.XColumn = 'x0'
TableToPoints1.YColumn = 'x1'
TableToPoints1.a2DPoints = 1
# SpreadSheetView1 = GetRenderView()
# DataRepresentation2 = Show()
# DataRepresentation2.FieldAssociation = 'Point Data'
# ltes = GetActiveSource()
# DataRepresentation1 = GetDisplayProperties(ltes)
# DataRepresentation1.Visibility = 0
# AnimationScene1 = GetAnimationScene()
# RenderView2 = CreateRenderView()
# RenderView2.CompressorConfig = 'vtkSquirtCompressor 0 3'
# RenderView2.UseLight = 1
# RenderView2.CameraPosition = [5.0, 5.0, 27.320508075688775]
# RenderView2.LightSwitch = 0
# RenderView2.OrientationAxesVisibility = 0
# RenderView2.CameraClippingRange = [27.047302994931886, 27.730315696824107]
# RenderView2.ViewTime = 0.0
# RenderView2.RemoteRenderThreshold = 3.0
# RenderView2.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
# RenderView2.CameraFocalPoint = [5.0, 5.0, 0.0]
# RenderView2.CameraParallelScale = 7.0710678118654755
# RenderView2.CenterOfRotation = [5.0, 5.0, 0.0]
# a1_error2_PVLookupTable = GetLookupTableForArray( " error2", 1, RGBPoints=[0.282843, 0.23, 0.299, 0.754, 0.282843, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
# a1_error2_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )
# DataRepresentation3 = Show()
# DataRepresentation3.EdgeColor = [0.0, 0.0, 0.0]
# DataRepresentation3.PointSize = 5.0
# DataRepresentation3.SelectionPointFieldDataArrayName = ' '
# DataRepresentation3.ColorArrayName = ('POINT_DATA', ' error2')
# DataRepresentation3.LookupTable = a1_error2_PVLookupTable
# DataRepresentation3.Representation = 'Surface'
# AnimationScene1.ViewModules = [ SpreadSheetView1, RenderView2 ]
# a1_error2_PVLookupTable.ScalarOpacityFunction = a1_error2_PiecewiseFunction
# Render()
|
davidshepherd7/oomph-lib-micromagnetics
|
etc/paraview_helpers/csv2points.py
|
Python
|
gpl-2.0
| 2,020
|
import csv
import xlrd
from django.core.management.base import BaseCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.smsbillables.utils import log_smsbillables_info
from corehq.messaging.smsbackends.twilio.models import SQLTwilioBackend
from corehq.apps.sms.models import OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
def bootstrap_twilio_gateway(apps, twilio_rates_filename):
currency_class = apps.get_model('accounting', 'Currency') if apps else Currency
sms_gateway_fee_class = apps.get_model('smsbillables', 'SmsGatewayFee') if apps else SmsGatewayFee
sms_gateway_fee_criteria_class = apps.get_model('smsbillables', 'SmsGatewayFeeCriteria') if apps else SmsGatewayFeeCriteria
# iso -> provider -> rate
def get_twilio_data():
twilio_file = open(twilio_rates_filename)
twilio_csv = csv.reader(twilio_file.read().splitlines())
twilio_data = {}
skip = 0
for row in twilio_csv:
if skip < 4:
skip += 1
continue
else:
try:
iso = row[0].lower()
provider = row[2].split('-')[1].lower().replace(' ', '')
rate = float(row[3])
if not(iso in twilio_data):
twilio_data[iso] = {}
twilio_data[iso][provider] = rate
except IndexError:
log_smsbillables_info("Twilio index error %s:" % row)
twilio_file.close()
return twilio_data
# iso -> provider -> (country code, number of subscribers)
def get_mach_data():
mach_workbook = xlrd.open_workbook('corehq/apps/smsbillables/management/'
'commands/pricing_data/Syniverse_coverage_list_DIAMONDplus.xls')
mach_table = mach_workbook.sheet_by_index(0)
mach_data = {}
try:
row = 7
while True:
country_code = int(mach_table.cell_value(row, 0))
iso = mach_table.cell_value(row, 1)
network = mach_table.cell_value(row, 5).lower().replace(' ', '')
subscribers = 0
try:
subscribers = int(mach_table.cell_value(row, 10).replace('.', ''))
except ValueError:
log_smsbillables_info("Incomplete subscriber data for country code %d" % country_code)
if not(iso in mach_data):
mach_data[iso] = {}
mach_data[iso][network] = (country_code, subscribers)
row += 1
except IndexError:
pass
return mach_data
twilio_data = get_twilio_data()
mach_data = get_mach_data()
for iso in twilio_data:
if iso in mach_data:
weighted_price = 0
total_subscriptions = 0
country_code = None
calculate_other = False
for twilio_provider in twilio_data[iso]:
if twilio_provider == 'other':
calculate_other = True
else:
for mach_provider in mach_data[iso]:
try:
if twilio_provider in mach_provider:
country_code, subscriptions = mach_data[iso][mach_provider]
weighted_price += twilio_data[iso][twilio_provider] * subscriptions
total_subscriptions += subscriptions
mach_data[iso][mach_provider] = country_code, 0
break
except UnicodeDecodeError:
pass
if calculate_other:
other_rate_twilio = twilio_data[iso]['other']
for _, subscriptions in mach_data[iso].values():
weighted_price += other_rate_twilio * subscriptions
total_subscriptions += subscriptions
if country_code is not None:
weighted_price = weighted_price / total_subscriptions
SmsGatewayFee.create_new(
SQLTwilioBackend.get_api_id(),
OUTGOING,
weighted_price,
country_code=country_code,
currency=currency_class.objects.get(code="USD"),
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
else:
log_smsbillables_info("%s not in mach_data" % iso)
# https://www.twilio.com/help/faq/sms/will-i-be-charged-if-twilio-encounters-an-error-when-sending-an-sms
SmsGatewayFee.create_new(
SQLTwilioBackend.get_api_id(),
OUTGOING,
0.00,
country_code=None,
currency=currency_class.objects.get(code="USD"),
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
log_smsbillables_info("Updated Twilio gateway fees.")
class Command(BaseCommand):
help = "bootstrap Twilio gateway fees"
args = ""
label = ""
def handle(self, *args, **options):
bootstrap_twilio_gateway(None)
|
qedsoftware/commcare-hq
|
corehq/apps/smsbillables/management/commands/bootstrap_twilio_gateway.py
|
Python
|
bsd-3-clause
| 5,306
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
word=open(filename).read().split()
mimic_dict={}
prev=''
for words in word:
if not prev in mimic_dict:
mimic_dict[prev]=[words]
else:
mimic_dict[prev].append(words)
prev=words
return mimic_dict
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
for i in range(200):
print word,
nexts=mimic_dict.get(word)
if not nexts:
nexts=mimic_dict['']
word=random.choice(nexts)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print dict
print_mimic(dict, '')
if __name__ == '__main__':
main()
|
sanaldavis/Google-Python-Exercies
|
basic/mimic.py
|
Python
|
apache-2.0
| 2,511
|
from django.http import HttpResponse
from anyjson import serialize
def multiply(request):
x = int(request.GET["x"])
y = int(request.GET["y"])
retval = x * y
response = {"status": "success", "retval": retval}
return HttpResponse(serialize(response), mimetype="application/json")
|
mzdaniel/oh-mainline
|
vendor/packages/celery/examples/httpexample/views.py
|
Python
|
agpl-3.0
| 302
|
from xml.etree.ElementTree import Element, SubElement, Comment
from xml.etree import ElementTree
from xml.dom import minidom
import datetime
import time
import re
import os
from git import *
from generatePoaXml import *
from xml_generation import *
from parsePoaXml import *
import settings
"""
"""
class pubMedPoaXML(object):
"""
Generate PubMed XML for the PoA article, which is pubstatus = "aheadofprint"
"""
def __init__(self, poa_articles, pub_date=None):
"""
set the root node
get the article type from the object passed in to the class
set default values for items that are boilder plate for this XML
"""
self.root = Element('ArticleSet')
# set the boiler plate values
self.contrib_types = ["author"]
self.group_contrib_types = ["author non-byline"]
self.date_types = ["received", "accepted"]
self.elife_journal_title = "eLife"
self.elife_epub_issn = "2050-084X"
self.elife_publisher_name = "eLife Sciences Publications Limited"
self.elife_language = "EN"
# Default volume value
self.elife_journal_volume = "4"
self.elife_journal_issue = ""
# Publication date
if pub_date is None:
self.pub_date = time.gmtime()
# Generate batch id
self.elife_doi_batch_id = ("elife-" + time.strftime("%Y-%m-%d-%H%M%S", self.pub_date)
+ "-PubMed")
# set comment
generated = time.strftime("%Y-%m-%d %H:%M:%S")
last_commit = get_last_commit_to_master()
comment = Comment('generated by eLife at ' + generated + ' from version ' + last_commit)
self.root.append(comment)
self.build(self.root, poa_articles)
def build(self, root, poa_articles):
for poa_article in poa_articles:
# Initialise these as None for each loop
self.contributors = None
self.groups = None
self.article = SubElement(root, "Article")
self.set_journal(self.article, poa_article)
self.set_replaces(self.article, poa_article)
self.set_article_title(self.article, poa_article)
self.set_e_location_id(self.article, poa_article)
self.set_language(self.article, poa_article)
for contrib_type in self.contrib_types:
self.set_author_list(self.article, poa_article, contrib_type)
for contrib_type in self.group_contrib_types:
self.set_group_list(self.article, poa_article, contrib_type)
self.set_publication_type(self.article, poa_article)
self.set_article_id_list(self.article, poa_article)
self.set_history(self.article, poa_article)
self.set_abstract(self.article, poa_article)
self.set_object_list(self.article, poa_article)
def get_pub_type(self, poa_article):
"""
Given an article object, determine whether the pub_type is for
PoA article or VoR article
"""
pub_type = None
if poa_article.is_poa is False:
# VoR
pub_type = "epublish"
elif poa_article.is_poa is True:
# PoA
pub_type = "aheadofprint"
return pub_type
def set_journal(self, parent, poa_article):
self.journal = SubElement(parent, "Journal")
self.publisher_name = SubElement(self.journal, "PublisherName")
self.publisher_name.text = self.elife_publisher_name
self.journal_title = SubElement(self.journal, 'JournalTitle')
self.journal_title.text = self.elife_journal_title
self.issn = SubElement(self.journal, 'Issn')
self.issn.text = self.elife_epub_issn
#self.journal_pubdate = SubElement(self.journal, "PubDate")
pub_type = self.get_pub_type(poa_article)
if pub_type == "epublish":
a_date = poa_article.get_date("pub").date
else:
# POA type, use the pub date if it is set, for when processing
# version 2, version 3, etc.
try:
a_date = poa_article.get_date("pub").date
except:
# Default use the run time date
a_date = self.pub_date
self.volume = SubElement(self.journal, "Volume")
# Use volume from the article unless not present then use the default
if poa_article.volume:
self.volume.text = poa_article.volume
else:
self.volume.text = elife_journal_volume(a_date)
self.issue = SubElement(self.journal, "Issue")
self.issue.text = self.elife_journal_issue
# Add the pub date now
self.set_pub_date(self.journal, a_date, pub_type)
def set_replaces(self, parent, poa_article):
"""
Set the Replaces tag, if applicable
"""
if ((poa_article.is_poa is False and poa_article.was_ever_poa is True)
or (poa_article.version and poa_article.version > 1)):
self.replaces = SubElement(parent, 'Replaces')
self.replaces.set("IdType", "doi")
self.replaces.text = poa_article.doi
def set_article_title(self, parent, poa_article):
"""
Set the titles and title tags allowing sub tags within title
"""
tag_name = 'ArticleTitle'
# Pubmed allows <i> tags, not <italic> tags
tag_converted_title = poa_article.title
tag_converted_title = replace_tags(tag_converted_title, 'italic', 'i')
tag_converted_title = replace_tags(tag_converted_title, 'bold', 'b')
tag_converted_title = replace_tags(tag_converted_title, 'underline', 'u')
# Specific issue to remove b tag wrapping the entire title, if present
if tag_converted_title.startswith('<b>') and tag_converted_title.endswith('</b>'):
tag_converted_title = tag_converted_title.lstrip('<b>')
tag_converted_title = tag_converted_title.rstrip('</b>')
tag_converted_title = escape_unmatched_angle_brackets(tag_converted_title)
tagged_string = '<' + tag_name + '>' + tag_converted_title + '</' + tag_name + '>'
reparsed = minidom.parseString(xml_escape_ampersand(tagged_string).encode('utf-8'))
root_xml_element = append_minidom_xml_to_elementtree_xml(
parent, reparsed
)
def set_e_location_id(self, parent, poa_article):
self.e_location_id = SubElement(parent, "ELocationID")
self.e_location_id.set("EIdType", "doi")
self.e_location_id.text = poa_article.doi
if poa_article.elocation_id:
self.e_location_id = SubElement(parent, "ELocationID")
self.e_location_id.set("EIdType", "pii")
self.e_location_id.text = poa_article.elocation_id
def set_language(self, parent, poa_article):
self.language = SubElement(parent, "Language")
self.language.text = self.elife_language
def set_author_list(self, parent, poa_article, contrib_type=None):
# If contrib_type is None, all contributors will be added regardless of their type
if self.contributors is None:
# Create the XML element on first use
self.contributors = SubElement(parent, "AuthorList")
for contributor in poa_article.contributors:
if contrib_type:
# Filter by contrib_type if supplied
if contributor.contrib_type != contrib_type:
continue
# Skip contributors with no surname and no collab
if (contributor.surname == "" or contributor.surname is None) \
and (contributor.collab == "" or contributor.collab is None):
continue
self.person_name = SubElement(self.contributors, "Author")
if contributor.given_name:
self.given_name = SubElement(self.person_name, "FirstName")
self.given_name.text = contributor.given_name
elif contributor.surname:
# Empty given_name but has a surname
self.given_name = SubElement(self.person_name, "FirstName")
self.given_name.set("EmptyYN", "Y")
if contributor.surname:
self.surname = SubElement(self.person_name, "LastName")
self.surname.text = contributor.surname
if contributor.collab:
self.collective_name = SubElement(self.person_name, "CollectiveName")
self.collective_name.text = contributor.collab
# Add each affiliation for multiple affiliation support
non_blank_aff_count = len(filter(lambda aff: aff.text != "", contributor.affiliations))
for aff in contributor.affiliations:
if aff.text != "":
if non_blank_aff_count == 1:
self.affiliation = SubElement(self.person_name, "Affiliation")
self.affiliation.text = aff.text
elif non_blank_aff_count > 1:
# Wrap each in AffiliationInfo tag
self.affiliation_info = SubElement(self.person_name, "AffiliationInfo")
self.affiliation = SubElement(self.affiliation_info, "Affiliation")
self.affiliation.text = aff.text
if contributor.orcid:
self.orcid = SubElement(self.person_name, "Identifier")
self.orcid.set("Source", "ORCID")
self.orcid.text = contributor.orcid
def set_group_list(self, parent, poa_article, contrib_type=None):
# If contrib_type is None, all contributors will be added regardless of their type
if self.groups is None:
# Create the XML element on first use
self.groups = SubElement(parent, "GroupList")
for contributor in poa_article.contributors:
if contrib_type:
# Filter by contrib_type if supplied
if contributor.contrib_type != contrib_type:
continue
# Skip contributors with no surname and no collab
if (contributor.surname == "" or contributor.surname is None) \
and (contributor.collab == "" or contributor.collab is None):
continue
# Set the GroupName value
if contributor.group_author_key:
# The contributor has a contrib-id contrib-id-type="group-author-key"
# Match this value to article contributors of type collab having the same id
for collab_contrib in poa_article.contributors:
if (collab_contrib.collab is not None
and collab_contrib.group_author_key == contributor.group_author_key):
# Set the individual GroupName to the collab name
self.group_name_text = collab_contrib.collab
# Find existing group with the same name or create it if not exists
self.group = None
for group in self.groups.findall('./Group'):
for group_name in group.findall('./GroupName'):
if group_name.text == self.group_name_text:
# Matched an existing group tag, use it
self.group = group
break
if self.group is None:
# Create a new group
self.group = SubElement(self.groups, "Group")
# Set the GroupName of the group
self.group_name = SubElement(self.group, "GroupName")
self.group_name.text = self.group_name_text
# Add the individual to the group
individual = SubElement(self.group, "IndividualName")
if contributor.given_name:
self.given_name = SubElement(individual, "FirstName")
self.given_name.text = contributor.given_name
elif contributor.surname:
# Empty given_name but has a surname
self.given_name = SubElement(individual, "FirstName")
self.given_name.set("EmptyYN", "Y")
if contributor.surname:
self.surname = SubElement(individual, "LastName")
self.surname.text = contributor.surname
# Remove a completely empty GroupList element, if empty
if len(self.groups) <= 0:
parent.remove(self.groups)
def set_publication_type(self, parent, poa_article):
if poa_article.articleType:
self.publication_type = SubElement(parent, "PublicationType")
if poa_article.articleType == "editorial":
self.publication_type.text = "EDITORIAL"
elif poa_article.articleType == "correction":
self.publication_type.text = "PUBLISHED ERRATUM"
elif (poa_article.articleType == "research-article"
or poa_article.articleType == "discussion"
or poa_article.articleType == "article-commentary"):
self.publication_type.text = "JOURNAL ARTICLE"
def set_article_id_list(self, parent, poa_article):
self.article_id_list = SubElement(parent, "ArticleIdList")
self.article_id = SubElement(self.article_id_list, "ArticleId")
self.article_id.set("IdType", "doi")
self.article_id.text = poa_article.doi
def set_pub_date(self, parent, pub_date, pub_type):
if pub_date:
self.publication_date = SubElement(parent, "PubDate")
self.publication_date.set("PubStatus", pub_type)
year = SubElement(self.publication_date, "Year")
year.text = str(pub_date.tm_year)
month = SubElement(self.publication_date, "Month")
# Get full text name of month
month.text = time.strftime('%B', pub_date)
day = SubElement(self.publication_date, "Day")
day.text = str(pub_date.tm_mday).zfill(2)
def set_date(self, parent, a_date, date_type):
if a_date:
self.date = SubElement(parent, "PubDate")
self.date.set("PubStatus", date_type)
year = SubElement(self.date, "Year")
year.text = str(a_date.tm_year)
month = SubElement(self.date, "Month")
month.text = str(a_date.tm_mon).zfill(2)
day = SubElement(self.date, "Day")
day.text = str(a_date.tm_mday).zfill(2)
def set_history(self, parent, poa_article):
self.history = SubElement(parent, "History")
for date_type in self.date_types:
date = poa_article.get_date(date_type)
if date:
self.set_date(self.history, date.date, date_type)
# If the article is VoR and is was ever PoA, then set the aheadofprint history date
if poa_article.is_poa is False and poa_article.was_ever_poa is True:
date_value_type = "epub"
date_type = "aheadofprint"
date = poa_article.get_date(date_value_type)
if date:
self.set_date(self.history, date.date, date_type)
def set_abstract(self, parent, poa_article):
tag_name = 'Abstract'
# Pubmed allows <i> tags, not <italic> tags
if poa_article.abstract:
tag_converted_abstract = poa_article.abstract
tag_converted_abstract = replace_tags(tag_converted_abstract, 'italic', 'i')
tag_converted_abstract = replace_tags(tag_converted_abstract, 'bold', 'b')
tag_converted_abstract = replace_tags(tag_converted_abstract, 'underline', 'u')
tag_converted_abstract = tag_converted_abstract.replace('<p>', '').replace('</p>', '')
tag_converted_abstract = xml_escape_ampersand(tag_converted_abstract)
not_allowed_tags = ['<sc>', '</sc>']
for tagname in not_allowed_tags:
tag_converted_abstract = tag_converted_abstract.replace(tagname, '')
tag_converted_abstract = escape_unmatched_angle_brackets(tag_converted_abstract)
tagged_string = '<' + tag_name + '>' + tag_converted_abstract + '</' + tag_name + '>'
reparsed = minidom.parseString(tagged_string.encode('utf-8'))
root_xml_element = append_minidom_xml_to_elementtree_xml(
parent, reparsed
)
else:
# Empty abstract
self.abstract = SubElement(parent, tag_name)
def set_object_list(self, parent, poa_article):
# Keywords and others go in Object tags
self.object_list = SubElement(parent, "ObjectList")
# Add related article data for correction articles
if poa_article.articleType == "correction":
for related_article in poa_article.related_articles:
if related_article.related_article_type == "corrected-article":
object = self.set_object(self.object_list, "Erratum",
"type", str(related_article.ext_link_type))
doi_param = SubElement(object, "Param")
doi_param.set("Name", "id")
doi_param.text = str(related_article.xlink_href)
# Add research organisms
for research_organism in poa_article.research_organisms:
if research_organism.lower() != 'other':
# Convert the research organism
research_organism_converted = self.convert_research_organism(research_organism)
self.set_object(self.object_list, "keyword", "value", research_organism_converted)
# Add article categories
for article_category in poa_article.article_categories:
if article_category.lower().strip() == 'computational and systems biology':
# Edge case category needs special treatment
categories = ['Computational biology', 'Systems biology']
else:
# Break on "and" and capitalise the first letter
categories = article_category.split('and')
for category in categories:
category = category.strip().lower()
self.set_object(self.object_list, "keyword", "value", category)
# Add keywords
for keyword in poa_article.author_keywords:
self.set_object(self.object_list, "keyword", "value", keyword)
# Finally, do not leave an empty ObjectList tag, if present
if len(self.object_list) <= 0:
parent.remove(self.object_list)
def convert_research_organism(self, research_organism):
# Lower case except for the first letter followed by a dot by a space
research_organism_converted = research_organism.lower()
try:
if re.match('^[a-z]\. ', research_organism_converted):
# Upper the first character and add to the remainder
research_organism_converted = (
research_organism_converted[0].upper() +
research_organism_converted[1:])
except IndexError:
pass
except UnicodeEncodeError:
pass
return research_organism_converted
def set_object(self, parent, object_type, param_name, param):
# e.g. <Object Type="keyword"><Param Name="value">human</Param></Object>
self.object = SubElement(parent, "Object")
self.object.set("Type", object_type)
self.param = SubElement(self.object, "Param")
self.param.set("Name", param_name)
self.param.text = param
return self.object
def printXML(self):
print self.root
def prettyXML(self):
publicId = '-//NLM//DTD PubMed 2.6//EN'
systemId = 'https://www.ncbi.nlm.nih.gov/entrez/query/static/PubMed.dtd'
encoding = 'utf-8'
namespaceURI = None
qualifiedName = "ArticleSet"
doctype = ElifeDocumentType(qualifiedName)
doctype._identified_mixin_init(publicId, systemId)
rough_string = ElementTree.tostring(self.root, encoding)
reparsed = minidom.parseString(rough_string)
if doctype:
reparsed.insertBefore(doctype, reparsed.documentElement)
#return reparsed.toprettyxml(indent="\t", encoding = encoding)
return reparsed.toxml(encoding=encoding)
def build_pubmed_xml_for_articles(poa_articles):
"""
Given a list of article article objects,
and then generate pubmed XML from them
"""
# test the XML generator
eXML = pubMedPoaXML(poa_articles)
prettyXML = eXML.prettyXML()
# Write to file
f = open(settings.TMP_DIR + os.sep + eXML.elife_doi_batch_id + '.xml', "wb")
f.write(prettyXML)
f.close()
return prettyXML
#print prettyXML
if __name__ == '__main__':
article_xmls = [#"generated_xml_output/elife_poa_e02935.xml"
#,"generated_xml_output/Feature.xml"
"generated_xml_output/elife02935.xml"
, "generated_xml_output/elife04024.xml"
, "generated_xml_output/elife04034.xml"
, "generated_xml_output/elife04037.xml"
, "generated_xml_output/elife04105.xml"
, "generated_xml_output/elife04180.xml"
, "generated_xml_output/elife04586.xml"
, "generated_xml_output/elife_poa_e00662.xml"
, "generated_xml_output/elife_poa_e02923.xml"
]
poa_articles = build_articles_from_article_xmls(article_xmls)
# Pretend an article object was PoA'ed for testing
for article in poa_articles:
if (article.doi == '10.7554/eLife.03528'
or article.doi == '10.7554/eLife.03126'
or article.doi == '10.7554/eLife.03401'
or article.doi == '10.7554/eLife.02935'):
article.was_ever_poa = True
if article.doi == '10.7554/eLife.00662':
# Pretend it is v2 POA, which will have a pub date
date = datetime.datetime(2015, 2, 3)
pub_date = date.timetuple()
pub_type = "pub"
date_instance = eLifeDate(pub_type, pub_date)
article.add_date(date_instance)
if article.doi == '10.7554/eLife.02923':
article.version = 2
xml_content = build_pubmed_xml_for_articles(poa_articles)
|
gnott/elife-poa-xml-generation
|
generatePubMedXml.py
|
Python
|
mit
| 22,473
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from adapt.intent import IntentBuilder
from os.path import join, dirname
from mycroft.configuration import ConfigurationManager
from mycroft.identity import IdentityManager
from mycroft.skills.core import MycroftSkill
class CerberusConfigSkill(MycroftSkill):
def __init__(self):
super(CerberusConfigSkill, self).__init__("CerberusConfigSkill")
def initialize(self):
self.load_data_files(join(dirname(__file__)))
intent = IntentBuilder("UpdateConfigurationIntent") \
.require("UpdateConfigurationPhrase") \
.build()
self.register_intent(intent, self.handle_update_intent)
def handle_update_intent(self, message):
identity = IdentityManager().get()
if not identity.owner:
self.speak_dialog("not.paired")
else:
ConfigurationManager.load_remote()
self.speak_dialog("config.updated")
def stop(self):
pass
def create_skill():
return CerberusConfigSkill()
|
ethanaward/mycroft-core
|
mycroft/skills/cerberus_config/__init__.py
|
Python
|
gpl-3.0
| 1,712
|
from turtle import *
##############
###############
penup()
goto(325,325)
pendown()
goto(325,-325)
goto(-325,-325)
goto(-325,325)
goto(325,325)
for i in range(5):
right(90)
forward(65)
right(90)
forward(650)
left(90)
forward(65)
left(90)
forward(650)
right(180)
for i in range(5):
forward(65)
right(90)
forward(650)
left(90)
forward(65)
left(90)
forward(650)
right(90)
penup()
goto(0,0)
right(90)
posx=0
posy=0
while True:
listen()
x=0
def go_f():
x="up"
def go_b():
x="down"
def go_l():
x="left"
def go_r():
x="right"
onkey(go_f, "Up")
onkey(go_b, "Down")
onkey(go_l, "Left")
onkey(go_r, "Right")
if x == "up":
if posy == 5:
print "cant go that way"
if posy !=5:
forward(65)
posy=posy+1
if x == "right":
if posx==5:
print "cant go that way"
if posx!=5:
right(90)
forward(65)
left(90)
posx=posx+1
if x == "left":
if posx==-5:
print "cant go that way"
if posx!=-5:
left(90)
forward(65)
right(90)
posx=posx-1
if x == "down":
if posy == -5:
print "cant go that way"
if posy !=-5:
back(65)
posy=posy-1
mainloop()
|
lizerd123/github
|
grid.py
|
Python
|
mit
| 1,130
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp860',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
'\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
'\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\u20a7' # 0x009e -> PESETA SIGN
'\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/encodings/cp860.py
|
Python
|
gpl-3.0
| 34,681
|
from . import hr_salesman_commission
|
MarcosCommunity/odoo
|
comunity_modules/hr_salesman_commission/__init__.py
|
Python
|
agpl-3.0
| 37
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailsearch.urls import frontend as wagtailsearch_frontend_urls
import os.path
urlpatterns = patterns("",
url(r"^django/", include(admin.site.urls)),
url(r"^wagtail/", include(wagtailadmin_urls)),
url(r"^search/", include(wagtailsearch_frontend_urls)),
url(r"^documents/", include(wagtaildocs_urls)),
url(r"", include(wagtail_urls)),
)
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns() # tell gunicorn where static files are in dev mode
urlpatterns += static(settings.MEDIA_URL + "images/", document_root=os.path.join(settings.MEDIA_ROOT, "images"))
|
timorieber/wagtail-openshift-quickstart
|
wsgi/wagtail-openshift-quickstart/wagtail-openshift-quickstart/urls.py
|
Python
|
isc
| 1,011
|
"""
Script to process pytest warnings output by pytest-json-report plugin and output it as a html
"""
import argparse
import io
import itertools
import json
import os
import re
from collections import Counter
from write_to_html import (
HtmlOutlineWriter,
) # noqa pylint: disable=import-error,useless-suppression
columns = [
"message",
"category",
"filename",
"lineno",
"high_location",
"label",
"num",
"deprecated",
]
columns_index_dict = {key: index for index, key in enumerate(columns)}
def seperate_warnings_by_location(warnings_data):
"""
Warnings originate from multiple locations, this function takes in list of warning objects
and separates them based on their filename location
"""
# first create regex for each n file location
warnings_locations = {
r".*/python\d\.\d/site-packages/.*\.py": "python", # noqa pylint: disable=W1401
r".*/edx-platform/lms/.*\.py": "lms", # noqa pylint: disable=W1401
r".*/edx-platform/openedx/.*\.py": "openedx", # noqa pylint: disable=W1401
r".*/edx-platform/cms/.*\.py": "cms", # noqa pylint: disable=W1401
r".*/edx-platform/common/.*\.py": "common", # noqa pylint: disable=W1401
}
# separate into locations flow:
# - iterate through each wanring_object, see if its filename matches any regex in warning locations.
# - If so, change high_location index on warnings_object to location name
for warnings_object in warnings_data:
warning_origin_located = False
for key in warnings_locations:
if (
re.search(key, warnings_object[columns_index_dict["filename"]])
is not None
):
warnings_object[
columns_index_dict["high_location"]
] = warnings_locations[key]
warning_origin_located = True
break
if not warning_origin_located:
warnings_object[columns_index_dict["high_location"]] = "other"
return warnings_data
def convert_warning_dict_to_list(warning_dict):
"""
converts our data dict into our defined list based on columns defined at top of this file
"""
output = []
for column in columns:
if column in warning_dict:
output.append(warning_dict[column])
else:
output.append(None)
output[columns_index_dict["num"]] = 1
return output
def read_warning_data(dir_path):
"""
During test runs in jenkins, multiple warning json files are output. This function finds all files
and aggregates the warnings in to one large list
"""
# pdb.set_trace()
dir_path = os.path.expanduser(dir_path)
# find all files that exist in given directory
files_in_dir = [
f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))
]
warnings_files = []
# TODO(jinder): currently this is hard-coded in, maybe create a constants file with info
# THINK(jinder): but creating file for one constant seems overkill
warnings_file_name_regex = (
r"pytest_warnings_?\d*\.json" # noqa pylint: disable=W1401
)
# iterate through files_in_dir and see if they match our know file name pattern
for temp_file in files_in_dir:
if re.search(warnings_file_name_regex, temp_file) is not None:
warnings_files.append(temp_file)
# go through each warning file and aggregate warnings into warnings_data
warnings_data = []
for temp_file in warnings_files:
with io.open(os.path.expanduser(dir_path + "/" + temp_file), "r") as read_file:
json_input = json.load(read_file)
if "warnings" in json_input:
data = [
convert_warning_dict_to_list(warning_dict)
for warning_dict in json_input["warnings"]
]
warnings_data.extend(data)
else:
print(temp_file)
return warnings_data
def compress_similar_warnings(warnings_data):
"""
find all warnings that are exactly the same, count them, and return set with count added to each warning
"""
tupled_data = [tuple(data) for data in warnings_data]
test_counter = Counter(tupled_data)
output = [list(value) for value in test_counter.keys()]
for data_object in output:
data_object[columns_index_dict["num"]] = test_counter[tuple(data_object)]
return output
def process_warnings_json(dir_path):
"""
Master function to process through all warnings and output a dict
dict structure:
{
location: [{warning text: {file_name: warning object}}]
}
flow:
- Aggregate data from all warning files
- Separate warnings by deprecated vs non deprecated(has word deprecate in it)
- Further categorize warnings
- Return output
Possible Error/enhancement: there might be better ways to separate deprecates vs
non-deprecated warnings
"""
warnings_data = read_warning_data(dir_path)
for warnings_object in warnings_data:
warnings_object[columns_index_dict["deprecated"]] = bool(
"deprecated" in warnings_object[columns_index_dict["message"]]
)
warnings_data = seperate_warnings_by_location(warnings_data)
compressed_warnings_data = compress_similar_warnings(warnings_data)
return compressed_warnings_data
def group_and_sort_by_sumof(data, group, sort_by):
"""
Group and sort data.
Return
List of tuples. Each tuple has:
- Group key
- Iterable of warnings that belongs to that group
- Count of warnings that belong to that group
"""
sorted_data = sorted(data, key=lambda x: x[columns.index(group)])
groups_by = itertools.groupby(sorted_data, lambda x: x[columns_index_dict[group]])
temp_list_to_sort = []
for key, generator in groups_by:
value = list(generator)
temp_list_to_sort.append((key, value, sum([item[columns_index_dict[sort_by]] for item in value])))
# sort by count
return sorted(temp_list_to_sort, key=lambda x: -x[2])
def write_html_report(warnings_data, html_path):
"""
converts from list of lists data to our html
"""
html_path = os.path.expanduser(html_path)
if "/" in html_path:
location_of_last_dir = html_path.rfind("/")
dir_path = html_path[:location_of_last_dir]
os.makedirs(dir_path, exist_ok=True)
with io.open(html_path, "w") as fout:
html_writer = HtmlOutlineWriter(fout)
category_sorted_by_count = group_and_sort_by_sumof(
warnings_data, "category", "num"
)
for category, group_in_category, category_count in category_sorted_by_count:
# xss-lint: disable=python-wrap-html
html = u'<span class="count">{category}, count: {count}</span> '.format(
category=category, count=category_count
)
html_writer.start_section(html, klass=u"category")
locations_sorted_by_count = group_and_sort_by_sumof(
group_in_category, "high_location", "num"
)
for (
location,
group_in_location,
location_count,
) in locations_sorted_by_count:
# xss-lint: disable=python-wrap-html
html = u'<span class="count">{location}, count: {count}</span> '.format(
location=location, count=location_count
)
html_writer.start_section(html, klass=u"location")
message_group_sorted_by_count = group_and_sort_by_sumof(
group_in_location, "message", "num"
)
for (
message,
message_group,
message_count,
) in message_group_sorted_by_count:
# xss-lint: disable=python-wrap-html
html = u'<span class="count">{warning_text}, count: {count}</span> '.format(
warning_text=message, count=message_count
)
html_writer.start_section(html, klass=u"warning_text")
# warnings_object[location][warning_text] is a list
for warning in message_group:
# xss-lint: disable=python-wrap-html
html = u'<span class="count">{warning_file_path}</span> '.format(
warning_file_path=warning[columns_index_dict["filename"]]
)
html_writer.start_section(html, klass=u"warning")
# xss-lint: disable=python-wrap-html
html = u'<p class="lineno">lineno: {lineno}</p> '.format(
lineno=warning[columns_index_dict["lineno"]]
)
html_writer.write(html)
# xss-lint: disable=python-wrap-html
html = u'<p class="num">num_occur: {num}</p> '.format(
num=warning[columns_index_dict["num"]]
)
html_writer.write(html)
html_writer.end_section()
html_writer.end_section()
html_writer.end_section()
html_writer.end_section()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Process and categorize pytest warnings and output html report."
)
parser.add_argument("--dir-path", default="test_root/log")
parser.add_argument("--html-path", default="test_html.html")
args = parser.parse_args()
data_output = process_warnings_json(args.dir_path)
write_html_report(data_output, args.html_path)
|
msegado/edx-platform
|
openedx/core/process_warnings.py
|
Python
|
agpl-3.0
| 9,900
|
import requests.exceptions
class ConsulClientRequestException(Exception):
"Exception while making a request to Consul"
class ConsulClient(object):
def __init__(self, http, consul_url, output):
self._http = http
self._base_url = consul_url
self._output = output
def get_active_endpoints_for_service(self, service):
url = self._url_for_service(service)
try:
resp = self._http.get(url, timeout=60)
except requests.exceptions.Timeout as e:
self._output('Timeout connecting to Consul')
raise ConsulClientRequestException(e)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
self._output('HTTP Error {0}'.format(str(e)))
raise ConsulClientRequestException(e)
addresses = []
port = None
for service in resp.json():
addresses.append(service['Address'])
port = service['ServicePort']
return [(addresses, port)]
def ensure_kube_service_registered(self, service_manifest):
"""Registers/updates a Kube service registration into Consul
- service_manifest: manifest of a Kubernetes service
"""
doc = self._doc_from_manifest(service_manifest, self._consul_registration_doc)
self._output(doc)
url = self._url_for_catalog_registration()
self._put(url, doc)
def ensure_kube_service_deregistered(self, service_manifest):
"""Deregisters a Kube service registration from Consul
- service_manifest: manifest of a Kubernetes service
"""
doc = self._doc_from_manifest(service_manifest, self._consul_deregistration_doc)
self._output(doc)
url = self._url_for_catalog_deregistration()
self._put(url, doc)
def _port_number_from_name(self, ports, name):
for port in ports:
if port['name'] == name:
return port['port']
raise Exception('No port with name {0}'.format(name))
def _put(self, url, data):
try:
resp = self._http.put(url, timeout=60, json=data)
except requests.exceptions.Timeout as e:
self._output('Timeout connecting to Consul')
raise ConsulClientRequestException(e)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
self._output('HTTP Error {0}'.format(str(e)))
raise ConsulClientRequestException(e)
def _doc_from_manifest(self, service_manifest, func):
"""Creates a doc from a Kube manifest.
`service_manifest` has properties extracted then applied to `func`. The
result of `func` is returned.
"""
service_metadata = service_manifest['metadata']
service_spec = service_manifest['spec']
endpoint = service_metadata['annotations']['domainName']
consul_name = service_metadata['annotations']['consul8s/service.name']
service_id = service_metadata['annotations'].get('consul8s/service.id', consul_name)
tags_str = service_metadata['annotations'].get('consul8s/service.tags')
if tags_str is None:
tags = []
else:
tags = tags_str.split(',')
port_name = service_metadata['annotations']['consul8s/service.port_name']
port_number = self._port_number_from_name(service_spec['ports'], port_name)
doc = func(consul_name, service_id, endpoint, port_number, tags)
return doc
def _consul_registration_doc(self, service, service_id, address, port, tags):
doc = {
'Node': 'kubernetes-{}'.format(service_id),
'Address': address,
'Service': {
'ID': service_id,
'Service': service,
'Address': address,
'Port': port,
'Tags': tags,
},
}
return doc
def _consul_deregistration_doc(self, service, service_id, address, port, tags):
doc = {
'Node': 'kubernetes-{}'.format(service_id),
'ServiceID': service_id,
}
return doc
def _url_for_service(self, service):
return '{0}/v1/catalog/service/{1}?passing'.format(self._base_url, service)
def _url_for_catalog_registration(self):
return '{0}/v1/catalog/register'.format(self._base_url)
def _url_for_catalog_deregistration(self):
return '{0}/v1/catalog/deregister'.format(self._base_url)
|
reactiveops/consul8s
|
consul8s/consul_client.py
|
Python
|
apache-2.0
| 4,540
|
# Local project imports
from core import app
from core.models import User
import helpers as util
# Standard python imports
import json
# Third party imports
from flask import render_template, jsonify, request
# Application homepage
@app.route('/index')
@app.route("/")
def index():
return render_template("index.html")
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'GET':
return render_template("signup.html")
elif request.method == 'POST':
return render_template("register.html")
@app.route('/copyright')
def copyright():
return "Copyright Message Here"
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/gallery')
def gallery():
return render_template("gallery.html")
@app.route("/blog")
def blog():
return render_template("blog.html")
@app.route("/partners")
def partners():
return render_template("partners.html")
@app.route("/support")
def support():
return render_template("support.html")
@app.route("/contact")
def contact():
return render_template("contact.html")
# Application error handlers
@app.errorhandler(400)
@app.errorhandler(403)
@app.errorhandler(404)
@app.errorhandler(405)
def not_found(error=None):
message = {
'code': error.code,
'status': 'failure',
'url': request.url, # set the requested url
'reason': error.name
}
resp = jsonify(message) # generate a response object
resp.status_code = error.code # set the error status code
return resp
|
pesos/ngo-portal
|
src/core/views.py
|
Python
|
mit
| 1,570
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
import sys
from optparse import OptionParser, make_option as Option
from pprint import pformat
from textwrap import wrap
from anyjson import deserialize
from celery import __version__
from celery.app import app_or_default, current_app
from celery.utils import term
from celery.bin.base import Command as CeleryCommand
commands = {}
class Error(Exception):
pass
def command(fun, name=None):
commands[name or fun.__name__] = fun
return fun
class Command(object):
help = ""
args = ""
version = __version__
option_list = CeleryCommand.preload_options + (
Option("--quiet", "-q", action="store_true", dest="quiet",
default=False),
Option("--no-color", "-C", dest="no_color", action="store_true",
help="Don't colorize output."),
)
def __init__(self, app=None, no_color=False):
self.app = app_or_default(app)
self.colored = term.colored(enabled=not no_color)
def __call__(self, *args, **kwargs):
try:
self.run(*args, **kwargs)
except Error, exc:
self.error(self.colored.red("Error: %s" % exc))
def error(self, s):
return self.out(s, fh=sys.stderr)
def out(self, s, fh=sys.stdout):
s = str(s)
if not s.endswith("\n"):
s += "\n"
sys.stdout.write(s)
def create_parser(self, prog_name, command):
return OptionParser(prog=prog_name,
usage=self.usage(command),
version=self.version,
option_list=self.option_list)
def run_from_argv(self, prog_name, argv):
self.prog_name = prog_name
self.command = argv[0]
self.arglist = argv[1:]
self.parser = self.create_parser(self.prog_name, self.command)
options, args = self.parser.parse_args(self.arglist)
self.colored = term.colored(enabled=not options.no_color)
self(*args, **options.__dict__)
def run(self, *args, **kwargs):
raise NotImplementedError()
def usage(self, command):
return "%%prog %s [options] %s" % (command, self.args)
def prettify_list(self, n):
c = self.colored
if not n:
return "- empty -"
return "\n".join(str(c.reset(c.white("*"), " %s" % (item, )))
for item in n)
def prettify_dict_ok_error(self, n):
c = self.colored
if "ok" in n:
return (c.green("OK"),
indent(self.prettify(n["ok"])[1]))
elif "error" in n:
return (c.red("ERROR"),
indent(self.prettify(n["error"])[1]))
def prettify(self, n):
OK = str(self.colored.green("OK"))
if isinstance(n, list):
return OK, self.prettify_list(n)
if isinstance(n, dict):
if "ok" in n or "error" in n:
return self.prettify_dict_ok_error(n)
if isinstance(n, basestring):
return OK, unicode(n)
return OK, pformat(n)
class list_(Command):
args = "<bindings>"
def list_bindings(self, channel):
fmt = lambda q, e, r: self.out("%s %s %s" % (q.ljust(28),
e.ljust(28), r))
fmt("Queue", "Exchange", "Routing Key")
fmt("-" * 16, "-" * 16, "-" * 16)
for binding in channel.list_bindings():
fmt(*binding)
def run(self, what, *_, **kw):
topics = {"bindings": self.list_bindings}
if what not in topics:
raise ValueError("%r not in %r" % (what, topics.keys()))
with self.app.broker_connection() as conn:
self.app.amqp.get_task_consumer(conn).declare()
with conn.channel() as channel:
return topics[what](channel)
list_ = command(list_, "list")
class apply(Command):
args = "<task_name>"
option_list = Command.option_list + (
Option("--args", "-a", dest="args"),
Option("--kwargs", "-k", dest="kwargs"),
Option("--eta", dest="eta"),
Option("--countdown", dest="countdown", type="int"),
Option("--expires", dest="expires"),
Option("--serializer", dest="serializer", default="json"),
Option("--queue", dest="queue"),
Option("--exchange", dest="exchange"),
Option("--routing-key", dest="routing_key"),
)
def run(self, name, *_, **kw):
# Positional args.
args = kw.get("args") or ()
if isinstance(args, basestring):
args = deserialize(args)
# Keyword args.
kwargs = kw.get("kwargs") or {}
if isinstance(kwargs, basestring):
kwargs = deserialize(kwargs)
# Expires can be int.
expires = kw.get("expires") or None
try:
expires = int(expires)
except (TypeError, ValueError):
pass
res = self.app.send_task(name, args=args, kwargs=kwargs,
countdown=kw.get("countdown"),
serializer=kw.get("serializer"),
queue=kw.get("queue"),
exchange=kw.get("exchange"),
routing_key=kw.get("routing_key"),
eta=kw.get("eta"),
expires=expires)
self.out(res.task_id)
apply = command(apply)
def pluralize(n, text, suffix='s'):
if n > 1:
return text + suffix
return text
class purge(Command):
def run(self, *args, **kwargs):
app = current_app()
queues = len(app.amqp.queues.keys())
messages_removed = app.control.discard_all()
if messages_removed:
self.out("Purged %s %s from %s known task %s." % (
messages_removed, pluralize(messages_removed, "message"),
queues, pluralize(queues, "queue")))
else:
self.out("No messages purged from %s known %s" % (
queues, pluralize(queues, "queue")))
purge = command(purge)
class result(Command):
args = "<task_id>"
option_list = Command.option_list + (
Option("--task", "-t", dest="task"),
)
def run(self, task_id, *args, **kwargs):
from celery import registry
result_cls = self.app.AsyncResult
task = kwargs.get("task")
if task:
result_cls = registry.tasks[task].AsyncResult
result = result_cls(task_id)
self.out(self.prettify(result.get())[1])
result = command(result)
class inspect(Command):
choices = {"active": 1.0,
"active_queues": 1.0,
"scheduled": 1.0,
"reserved": 1.0,
"stats": 1.0,
"revoked": 1.0,
"registered_tasks": 1.0, # alias to registered
"registered": 1.0,
"enable_events": 1.0,
"disable_events": 1.0,
"ping": 0.2,
"add_consumer": 1.0,
"cancel_consumer": 1.0}
option_list = Command.option_list + (
Option("--timeout", "-t", type="float", dest="timeout",
default=None,
help="Timeout in seconds (float) waiting for reply"),
Option("--destination", "-d", dest="destination",
help="Comma separated list of destination node names."))
show_body = True
def usage(self, command):
return "%%prog %s [options] %s [%s]" % (
command, self.args, "|".join(self.choices.keys()))
def run(self, *args, **kwargs):
self.quiet = kwargs.get("quiet", False)
self.show_body = kwargs.get("show_body", True)
if not args:
raise Error("Missing inspect command. See --help")
command = args[0]
if command == "help":
raise Error("Did you mean 'inspect --help'?")
if command not in self.choices:
raise Error("Unknown inspect command: %s" % command)
destination = kwargs.get("destination")
timeout = kwargs.get("timeout") or self.choices[command]
if destination and isinstance(destination, basestring):
destination = map(str.strip, destination.split(","))
def on_reply(body):
c = self.colored
node = body.keys()[0]
reply = body[node]
status, preply = self.prettify(reply)
self.say("->", c.cyan(node, ": ") + status, indent(preply))
self.say("<-", command)
i = self.app.control.inspect(destination=destination,
timeout=timeout,
callback=on_reply)
replies = getattr(i, command)(*args[1:])
if not replies:
raise Error("No nodes replied within time constraint.")
return replies
def say(self, direction, title, body=""):
c = self.colored
if direction == "<-" and self.quiet:
return
dirstr = not self.quiet and c.bold(c.white(direction), " ") or ""
self.out(c.reset(dirstr, title))
if body and self.show_body:
self.out(body)
inspect = command(inspect)
def indent(s, n=4):
i = [" " * n + l for l in s.split("\n")]
return "\n".join("\n".join(wrap(j)) for j in i)
class status(Command):
option_list = inspect.option_list
def run(self, *args, **kwargs):
replies = inspect(app=self.app,
no_color=kwargs.get("no_color", False)) \
.run("ping", **dict(kwargs, quiet=True, show_body=False))
if not replies:
raise Error("No nodes replied within time constraint")
nodecount = len(replies)
if not kwargs.get("quiet", False):
self.out("\n%s %s online." % (nodecount,
nodecount > 1 and "nodes" or "node"))
status = command(status)
class help(Command):
def usage(self, command):
return "%%prog <command> [options] %s" % (self.args, )
def run(self, *args, **kwargs):
self.parser.print_help()
usage = ["",
"Type '%s <command> --help' for help on a "
"specific command." % (self.prog_name, ),
"",
"Available commands:"]
for command in list(sorted(commands.keys())):
usage.append(" %s" % command)
self.out("\n".join(usage))
help = command(help)
class celeryctl(CeleryCommand):
commands = commands
def execute(self, command, argv=None):
try:
cls = self.commands[command]
except KeyError:
cls, argv = self.commands["help"], ["help"]
cls = self.commands.get(command) or self.commands["help"]
try:
cls(app=self.app).run_from_argv(self.prog_name, argv)
except Error:
return self.execute("help", argv)
def remove_options_at_beginning(self, argv, index=0):
if argv:
while index <= len(argv):
value = argv[index]
if value.startswith("--"):
pass
elif value.startswith("-"):
index += 1
else:
return argv[index:]
index += 1
return []
def handle_argv(self, prog_name, argv):
self.prog_name = prog_name
argv = self.remove_options_at_beginning(argv)
try:
command = argv[0]
except IndexError:
command, argv = "help", ["help"]
return self.execute(command, argv)
def main():
try:
celeryctl().execute_from_commandline()
except KeyboardInterrupt:
pass
if __name__ == "__main__": # pragma: no cover
main()
|
softak/webfaction_demo
|
vendor-local/lib/python/celery/bin/celeryctl.py
|
Python
|
bsd-3-clause
| 11,986
|
import sys
from types import Node
from parser import parse
from walk import bfs
from builder import build
|
wmgaca/deusexmachina
|
lib/__init__.py
|
Python
|
mit
| 107
|
from epsilon import *
class Veblen(Epsilon):
def __init__(self,veblinForm):
assert(isinstance(veblinForm,(list,tuple)))
for p in veblinForm:
assert(isinstance(p,(Ordinal,int,long)))
for i in range(len(veblinForm)):
if(isinstance(veblinForm[i],(int,long))):
veblinForm[i]=Ordinal.int(veblinForm[i])
assert(len(veblinForm) > 0)
while(veblinForm[0] == 0):
veblinForm = veblinForm[1:]
assert(len(veblinForm) > 0)
self.vform = veblinForm
vform = self.vform
if(len(vform) == 1):
self.form=[(vform[0],1)]
return
#otherwise
self.form = [(self,1)] #fixed points of w** , so cantor normal form does this.
if((len(vform) == 2) and (vform[0] == 1)):
self.epsilon_alpha = vform[1]
else:
self.epsilon_alpha = self
def __str__(self):
return 'phi('+','.join([str(p) for p in self.vform])+')'
def __lt__(self,other):
if(isinstance(other,(Veblen,Epsilon))):
if(len(self.vform) == len(other.vform)):
i = 0
while((i < len(self.vform)) and (self.vform[i] == other.vform[i])):
i += 1
if(i == len(self.vform)):
return False
if(self.vform[i] < other.vform[i]):
j = i+1
while(j < len(self.vform)):
if(not (self.vform[j] < other)):
return False
j += 1
return True
else: # it isn't equal, so self.vform[i] > other.vform[i]
j = i+1
while(j < len(self.vform)):
if(self < other.vform[j]):
return True
j += 1
return False
#this line never reached
if(len(self.vform) < len(other.vform)):
for p in self.vform:
if(not (p < other)):
return False
return True
if(len(self.vform) > len(other.vform)):
for p in other.vform:
if(self < p):
return True
return False #Warning: this might be wrong for cases where self == p sometimes. Not sure.
def phi(*args):
if(len(args)==0):
return z
if(len(args)==1):
return w**args[0]
if((len(args)==2) and (args[0]==sz)):
return Epsilon(args[1])
return Veblen(list(args))
|
drocta/py-Ordinals
|
veblen.py
|
Python
|
lgpl-2.1
| 2,673
|
from __future__ import unicode_literals
import logging
import weakref
import spotify
import spotify.connection
import spotify.player
import spotify.social
from spotify import ffi, lib, serialized, utils
__all__ = ['Session', 'SessionEvent']
logger = logging.getLogger(__name__)
class Session(utils.EventEmitter):
"""The Spotify session.
If no ``config`` is provided, the default config is used.
The session object will emit a number of events. See :class:`SessionEvent`
for a list of all available events and how to connect your own listener
functions up to get called when the events happens.
.. warning::
You can only have one :class:`Session` instance per process. This is a
libspotify limitation. If you create a second :class:`Session` instance
in the same process pyspotify will raise a :exc:`RuntimeError` with the
message "Session has already been initialized".
:param config: the session config
:type config: :class:`Config` or :class:`None`
"""
@serialized
def __init__(self, config=None):
super(Session, self).__init__()
if spotify._session_instance is not None:
raise RuntimeError('Session has already been initialized')
if config is not None:
self.config = config
else:
self.config = spotify.Config()
if self.config.application_key is None:
self.config.load_application_key_file()
sp_session_ptr = ffi.new('sp_session **')
spotify.Error.maybe_raise(
lib.sp_session_create(
self.config._sp_session_config, sp_session_ptr
)
)
self._sp_session = ffi.gc(sp_session_ptr[0], lib.sp_session_release)
self._cache = weakref.WeakValueDictionary()
self._emitters = []
self._callback_handles = set()
self.connection = spotify.connection.Connection(self)
self.offline = spotify.offline.Offline(self)
self.player = spotify.player.Player(self)
self.social = spotify.social.Social(self)
spotify._session_instance = self
_cache = None
"""A mapping from sp_* objects to their corresponding Python instances.
The ``_cached`` helper constructors on wrapper objects use this cache for
finding and returning existing alive wrapper objects for the sp_* object it
is about to create a wrapper for.
The cache *does not* keep objects alive. It's only a means for looking up
the objects if they are kept alive somewhere else in the application.
Internal attribute.
"""
_emitters = None
"""A list of event emitters with attached listeners.
When an event emitter has attached event listeners, we must keep the
emitter alive for as long as the listeners are attached. This is achieved
by adding them to this list.
When creating wrapper objects around sp_* objects we must also return the
existing wrapper objects instead of creating new ones so that the set of
event listeners on the wrapper object can be modified. This is achieved
with a combination of this list and the :attr:`_cache` mapping.
Internal attribute.
"""
_callback_handles = None
"""A set of handles returned by :meth:`spotify.ffi.new_handle`.
These must be kept alive for the handle to remain valid until the callback
arrives, even if the end user does not maintain a reference to the object
the callback works on.
Internal attribute.
"""
config = None
"""A :class:`Config` instance with the current configuration.
Once the session has been created, changing the attributes of this object
will generally have no effect.
"""
connection = None
"""An :class:`~spotify.connection.Connection` instance for controlling the
connection to the Spotify servers."""
offline = None
"""An :class:`~spotify.offline.Offline` instance for controlling offline
sync."""
player = None
"""A :class:`~spotify.player.Player` instance for controlling playback."""
social = None
"""A :class:`~spotify.social.Social` instance for controlling social
sharing."""
def login(self, username, password=None, remember_me=False, blob=None):
"""Authenticate to Spotify's servers.
You can login with one of two combinations:
- ``username`` and ``password``
- ``username`` and ``blob``
To get the ``blob`` string, you must once log in with ``username`` and
``password``. You'll then get the ``blob`` string passed to the
:attr:`~SessionCallbacks.credentials_blob_updated` callback.
If you set ``remember_me`` to :class:`True`, you can later login to the
same account without providing any ``username`` or credentials by
calling :meth:`relogin`.
"""
username = utils.to_char(username)
if password is not None:
password = utils.to_char(password)
blob = ffi.NULL
elif blob is not None:
password = ffi.NULL
blob = utils.to_char(blob)
else:
raise AttributeError('password or blob is required to login')
spotify.Error.maybe_raise(
lib.sp_session_login(
self._sp_session, username, password, bool(remember_me), blob
)
)
def logout(self):
"""Log out the current user.
If you logged in with the ``remember_me`` argument set to
:class:`True`, you will also need to call :meth:`forget_me` to
completely remove all credentials of the user that was logged in.
"""
spotify.Error.maybe_raise(lib.sp_session_logout(self._sp_session))
@property
def remembered_user_name(self):
"""The username of the remembered user from a previous :meth:`login`
call."""
return utils.get_with_growing_buffer(
lib.sp_session_remembered_user, self._sp_session
)
def relogin(self):
"""Relogin as the remembered user.
To be able do this, you must previously have logged in with
:meth:`login` with the ``remember_me`` argument set to :class:`True`.
To check what user you'll be logged in as if you call this method, see
:attr:`remembered_user_name`.
"""
spotify.Error.maybe_raise(lib.sp_session_relogin(self._sp_session))
def forget_me(self):
"""Forget the remembered user from a previous :meth:`login` call."""
spotify.Error.maybe_raise(lib.sp_session_forget_me(self._sp_session))
@property
@serialized
def user(self):
"""The logged in :class:`User`."""
sp_user = lib.sp_session_user(self._sp_session)
if sp_user == ffi.NULL:
return None
return spotify.User(self, sp_user=sp_user, add_ref=True)
@property
@serialized
def user_name(self):
"""The username of the logged in user."""
return utils.to_unicode(lib.sp_session_user_name(self._sp_session))
@property
@serialized
def user_country(self):
"""The country of the currently logged in user.
The :attr:`~SessionEvent.OFFLINE_STATUS_UPDATED` event is emitted on
the session object when this changes.
"""
return utils.to_country(lib.sp_session_user_country(self._sp_session))
@property
@serialized
def playlist_container(self):
"""The :class:`PlaylistContainer` for the currently logged in user."""
sp_playlistcontainer = lib.sp_session_playlistcontainer(
self._sp_session
)
if sp_playlistcontainer == ffi.NULL:
return None
return spotify.PlaylistContainer._cached(
self, sp_playlistcontainer, add_ref=True
)
@property
def inbox(self):
"""The inbox :class:`Playlist` for the currently logged in user."""
sp_playlist = lib.sp_session_inbox_create(self._sp_session)
if sp_playlist == ffi.NULL:
return None
return spotify.Playlist._cached(
self, sp_playlist=sp_playlist, add_ref=False
)
def set_cache_size(self, size):
"""Set maximum size in MB for libspotify's cache.
If set to 0 (the default), up to 10% of the free disk space will be
used."""
spotify.Error.maybe_raise(
lib.sp_session_set_cache_size(self._sp_session, size)
)
def flush_caches(self):
"""Write all cached data to disk.
libspotify does this regularly and on logout, so you should never need
to call this method yourself.
"""
spotify.Error.maybe_raise(lib.sp_session_flush_caches(self._sp_session))
def preferred_bitrate(self, bitrate):
"""Set preferred :class:`Bitrate` for music streaming."""
spotify.Error.maybe_raise(
lib.sp_session_preferred_bitrate(self._sp_session, bitrate)
)
def preferred_offline_bitrate(self, bitrate, allow_resync=False):
"""Set preferred :class:`Bitrate` for offline sync.
If ``allow_resync`` is :class:`True` libspotify may resynchronize
already synced tracks.
"""
spotify.Error.maybe_raise(
lib.sp_session_preferred_offline_bitrate(
self._sp_session, bitrate, allow_resync
)
)
@property
def volume_normalization(self):
"""Whether volume normalization is active or not.
Set to :class:`True` or :class:`False` to change.
"""
return bool(lib.sp_session_get_volume_normalization(self._sp_session))
@volume_normalization.setter
def volume_normalization(self, value):
spotify.Error.maybe_raise(
lib.sp_session_set_volume_normalization(self._sp_session, value)
)
def process_events(self):
"""Process pending events in libspotify.
This method must be called for most callbacks to be called. Without
calling this method, you'll only get the callbacks that are called from
internal libspotify threads. When the
:attr:`~SessionEvent.NOTIFY_MAIN_THREAD` event is emitted (from an
internal libspotify thread), it's your job to make sure this method is
called (from the thread you use for accessing Spotify), so that further
callbacks can be triggered (from the same thread).
pyspotify provides an :class:`~spotify.EventLoop` that you can use for
processing events when needed.
"""
next_timeout = ffi.new('int *')
spotify.Error.maybe_raise(
lib.sp_session_process_events(self._sp_session, next_timeout)
)
return next_timeout[0]
def inbox_post_tracks(
self, canonical_username, tracks, message, callback=None
):
"""Post a ``message`` and one or more ``tracks`` to the inbox of the
user with the given ``canonical_username``.
``tracks`` can be a single :class:`~spotify.Track` or a list of
:class:`~spotify.Track` objects.
Returns an :class:`InboxPostResult` that can be used to check if the
request completed successfully.
If callback isn't :class:`None`, it is called with an
:class:`InboxPostResult` instance when the request has completed.
"""
return spotify.InboxPostResult(
self, canonical_username, tracks, message, callback
)
def get_starred(self, canonical_username=None):
"""Get the starred :class:`Playlist` for the user with
``canonical_username``.
If ``canonical_username`` isn't specified, the starred playlist for
the currently logged in user is returned.
"""
if canonical_username is None:
sp_playlist = lib.sp_session_starred_create(self._sp_session)
else:
sp_playlist = lib.sp_session_starred_for_user_create(
self._sp_session, utils.to_bytes(canonical_username)
)
if sp_playlist == ffi.NULL:
return None
return spotify.Playlist._cached(self, sp_playlist, add_ref=False)
def get_published_playlists(self, canonical_username=None):
"""Get the :class:`PlaylistContainer` of published playlists for the
user with ``canonical_username``.
If ``canonical_username`` isn't specified, the published container for
the currently logged in user is returned.
"""
if canonical_username is None:
canonical_username = ffi.NULL
else:
canonical_username = utils.to_bytes(canonical_username)
sp_playlistcontainer = lib.sp_session_publishedcontainer_for_user_create(
self._sp_session, canonical_username
)
if sp_playlistcontainer == ffi.NULL:
return None
return spotify.PlaylistContainer._cached(
self, sp_playlistcontainer, add_ref=False
)
def get_link(self, uri):
"""
Get :class:`Link` from any Spotify URI.
A link can be created from a string containing a Spotify URI on the
form ``spotify:...``.
Example::
>>> session = spotify.Session()
# ...
>>> session.get_link(
... 'spotify:track:2Foc5Q5nqNiosCNqttzHof')
Link('spotify:track:2Foc5Q5nqNiosCNqttzHof')
>>> session.get_link(
... 'http://open.spotify.com/track/4wl1dK5dHGp3Ig51stvxb0')
Link('spotify:track:4wl1dK5dHGp3Ig51stvxb0')
"""
return spotify.Link(self, uri=uri)
def get_track(self, uri):
"""
Get :class:`Track` from a Spotify track URI.
Example::
>>> session = spotify.Session()
# ...
>>> track = session.get_track(
... 'spotify:track:2Foc5Q5nqNiosCNqttzHof')
>>> track.load().name
u'Get Lucky'
"""
return spotify.Track(self, uri=uri)
def get_local_track(self, artist=None, title=None, album=None, length=None):
"""
Get :class:`Track` for a local track.
Spotify's official clients supports adding your local music files to
Spotify so they can be played in the Spotify client. These are not
synced with Spotify's servers or between your devices and there is not
trace of them in your Spotify user account. The exception is when you
add one of these local tracks to a playlist or mark them as starred.
This creates a "local track" which pyspotify also will be able to
observe.
"Local tracks" can be recognized in several ways:
- The track's URI will be of the form
``spotify:local:ARTIST:ALBUM:TITLE:LENGTH_IN_SECONDS``. Any of the
parts in all caps can be left out if there is no information
available. That is, ``spotify:local::::`` is a valid local track URI.
- :attr:`Link.type` will be :class:`LinkType.LOCALTRACK` for the
track's link.
- :attr:`Track.is_local` will be :class:`True` for the track.
This method can be used to create local tracks that can be starred or
added to playlists.
``artist`` may be an artist name. ``title`` may be a track name.
``album`` may be an album name. ``length`` may be a track length in
milliseconds.
Note that when creating a local track you provide the length in
milliseconds, while the local track URI contains the length in seconds.
"""
if artist is None:
artist = ''
if title is None:
title = ''
if album is None:
album = ''
if length is None:
length = -1
artist = utils.to_char(artist)
title = utils.to_char(title)
album = utils.to_char(album)
sp_track = lib.sp_localtrack_create(artist, title, album, length)
return spotify.Track(self, sp_track=sp_track, add_ref=False)
def get_album(self, uri):
"""
Get :class:`Album` from a Spotify album URI.
Example::
>>> session = spotify.Session()
# ...
>>> album = session.get_album(
... 'spotify:album:6wXDbHLesy6zWqQawAa91d')
>>> album.load().name
u'Forward / Return'
"""
return spotify.Album(self, uri=uri)
def get_artist(self, uri):
"""
Get :class:`Artist` from a Spotify artist URI.
Example::
>>> session = spotify.Session()
# ...
>>> artist = session.get_artist(
... 'spotify:artist:22xRIphSN7IkPVbErICu7s')
>>> artist.load().name
u'Rob Dougan'
"""
return spotify.Artist(self, uri=uri)
def get_playlist(self, uri):
"""
Get :class:`Playlist` from a Spotify playlist URI.
Example::
>>> session = spotify.Session()
# ...
>>> playlist = session.get_playlist(
... 'spotify:user:fiat500c:playlist:54k50VZdvtnIPt4d8RBCmZ')
>>> playlist.load().name
u'500C feelgood playlist'
"""
return spotify.Playlist(self, uri=uri)
def get_user(self, uri):
"""
Get :class:`User` from a Spotify user URI.
Example::
>>> session = spotify.Session()
# ...
>>> user = session.get_user('spotify:user:jodal')
>>> user.load().display_name
u'jodal'
"""
return spotify.User(self, uri=uri)
def get_image(self, uri, callback=None):
"""
Get :class:`Image` from a Spotify image URI.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, an :class:`Image` instance, when
the image is done loading.
Example::
>>> session = spotify.Session()
# ...
>>> image = session.get_image(
... 'spotify:image:a0bdcbe11b5cd126968e519b5ed1050b0e8183d0')
>>> image.load().data_uri[:50]
u'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEBLAEsAAD'
"""
return spotify.Image(self, uri=uri, callback=callback)
def search(
self,
query,
callback=None,
track_offset=0,
track_count=20,
album_offset=0,
album_count=20,
artist_offset=0,
artist_count=20,
playlist_offset=0,
playlist_count=20,
search_type=None,
):
"""
Search Spotify for tracks, albums, artists, and playlists matching
``query``.
.. warning::
The search API was broken at 2016-02-03 by a server-side change
made by Spotify. The functionality was never restored.
Please use the Spotify Web API to perform searches.
The ``query`` string can be free format, or use some prefixes like
``title:`` and ``artist:`` to limit what to match on. There is no
official docs on the search query format, but there's a `Spotify blog
post
<https://www.spotify.com/blog/archives/2008/01/22/searching-spotify/>`_
from 2008 with some examples.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, a :class:`Search` instance, when
the search completes.
The ``*_offset`` and ``*_count`` arguments can be used to retrieve more
search results. libspotify will currently not respect ``*_count``
values higher than 200, though this may change at any time as the limit
isn't documented in any official docs. If you want to retrieve more
than 200 results, you'll have to search multiple times with different
``*_offset`` values. See the ``*_total`` attributes on the
:class:`Search` to see how many results exists, and to figure out
how many searches you'll need to make to retrieve everything.
``search_type`` is a :class:`SearchType` value. It defaults to
:attr:`SearchType.STANDARD`.
Returns a :class:`Search` instance.
"""
raise Exception(
'Spotify broke the libspotify search API 2016-02-03 '
'and never restored it.'
)
def get_toplist(
self, type=None, region=None, canonical_username=None, callback=None
):
"""Get a :class:`Toplist` of artists, albums, or tracks that are the
currently most popular worldwide or in a specific region.
``type`` is a :class:`ToplistType` instance that specifies the type of
toplist to create.
``region`` is either a :class:`ToplistRegion` instance, or a 2-letter
ISO 3166-1 country code as a unicode string, that specifies the
geographical region to create a toplist for.
If ``region`` is :attr:`ToplistRegion.USER` and ``canonical_username``
isn't specified, the region of the current user will be used. If
``canonical_username`` is specified, the region of the specified user
will be used instead.
If ``callback`` isn't :class:`None`, it is expected to be a callable
that accepts a single argument, a :class:`Toplist` instance, when the
toplist request completes.
Example::
>>> import spotify
>>> session = spotify.Session()
# ...
>>> toplist = session.get_toplist(
... type=spotify.ToplistType.TRACKS, region='US')
>>> toplist.load()
>>> len(toplist.tracks)
100
>>> len(toplist.artists)
0
>>> toplist.tracks[0]
Track(u'spotify:track:2dLLR6qlu5UJ5gk0dKz0h3')
"""
return spotify.Toplist(
self,
type=type,
region=region,
canonical_username=canonical_username,
callback=callback,
)
class SessionEvent(object):
"""Session events.
Using the :class:`Session` object, you can register listener functions to
be called when various session related events occurs. This class enumerates
the available events and the arguments your listener functions will be
called with.
Example usage::
import spotify
def logged_in(session, error_type):
if error_type is spotify.ErrorType.OK:
print('Logged in as %s' % session.user)
else:
print('Login failed: %s' % error_type)
session = spotify.Session()
session.on(spotify.SessionEvent.LOGGED_IN, logged_in)
session.login('alice', 's3cret')
All events will cause debug log statements to be emitted, even if no
listeners are registered. Thus, there is no need to register listener
functions just to log that they're called.
"""
LOGGED_IN = 'logged_in'
"""Called when login has completed.
Note that even if login has succeeded, that does not mean that you're
online yet as libspotify may have cached enough information to let you
authenticate with Spotify while offline.
This event should be used to get notified about login errors. To get
notified about the authentication and connection state, refer to the
:attr:`SessionEvent.CONNECTION_STATE_UPDATED` event.
:param session: the current session
:type session: :class:`Session`
:param error_type: the login error type
:type error_type: :class:`ErrorType`
"""
LOGGED_OUT = 'logged_out'
"""Called when logout has completed or there is a permanent connection
error.
:param session: the current session
:type session: :class:`Session`
"""
METADATA_UPDATED = 'metadata_updated'
"""Called when some metadata has been updated.
There is no way to know what metadata was updated, so you'll have to
refresh all you metadata caches.
:param session: the current session
:type session: :class:`Session`
"""
CONNECTION_ERROR = 'connection_error'
"""Called when there is a connection error and libspotify has problems
reconnecting to the Spotify service.
May be called repeatedly as long as the problem persists. Will be called
with an :attr:`ErrorType.OK` error when the problem is resolved.
:param session: the current session
:type session: :class:`Session`
:param error_type: the connection error type
:type error_type: :class:`ErrorType`
"""
MESSAGE_TO_USER = 'message_to_user'
"""Called when libspotify wants to show a message to the end user.
:param session: the current session
:type session: :class:`Session`
:param data: the message
:type data: text
"""
NOTIFY_MAIN_THREAD = 'notify_main_thread'
"""Called when processing on the main thread is needed.
When this is called, you should call :meth:`~Session.process_events` from
your main thread. Failure to do so may cause request timeouts, or a lost
connection.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
MUSIC_DELIVERY = 'music_delivery'
"""Called when there is decompressed audio data available.
If the function returns a lower number of frames consumed than
``num_frames``, libspotify will retry delivery of the unconsumed frames in
about 100ms. This can be used for rate limiting if libspotify is giving you
audio data too fast.
.. note::
You can register at most one event listener for this event.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
:param audio_format: the audio format
:type audio_format: :class:`AudioFormat`
:param frames: the audio frames
:type frames: bytestring
:param num_frames: the number of frames
:type num_frames: int
:returns: the number of frames consumed
"""
PLAY_TOKEN_LOST = 'play_token_lost'
"""Music has been paused because an account only allows music to be played
from one location simultaneously.
When this event is emitted, you should pause playback.
:param session: the current session
:type session: :class:`Session`
"""
LOG_MESSAGE = 'log_message'
"""Called when libspotify have something to log.
Note that pyspotify logs this for you, so you'll probably never need to
register a listener for this event.
:param session: the current session
:type session: :class:`Session`
:param data: the message
:type data: text
"""
END_OF_TRACK = 'end_of_track'
"""Called when all audio data for the current track has been delivered.
:param session: the current session
:type session: :class:`Session`
"""
STREAMING_ERROR = 'streaming_error'
"""Called when audio streaming cannot start or continue.
:param session: the current session
:type session: :class:`Session`
:param error_type: the streaming error type
:type error_type: :class:`ErrorType`
"""
USER_INFO_UPDATED = 'user_info_updated'
"""Called when anything related to :class:`User` objects is updated.
:param session: the current session
:type session: :class:`Session`
"""
START_PLAYBACK = 'start_playback'
"""Called when audio playback should start.
You need to implement a listener for the :attr:`GET_AUDIO_BUFFER_STATS`
event for the :attr:`START_PLAYBACK` event to be useful.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
STOP_PLAYBACK = 'stop_playback'
"""Called when audio playback should stop.
You need to implement a listener for the :attr:`GET_AUDIO_BUFFER_STATS`
event for the :attr:`STOP_PLAYBACK` event to be useful.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
"""
GET_AUDIO_BUFFER_STATS = 'get_audio_buffer_stats'
"""Called to query the application about its audio buffer.
.. note::
You can register at most one event listener for this event.
.. warning::
This event is emitted from an internal libspotify thread. Thus, your
event listener must not block, and must use proper synchronization
around anything it does.
:param session: the current session
:type session: :class:`Session`
:returns: an :class:`AudioBufferStats` instance
"""
OFFLINE_STATUS_UPDATED = 'offline_status_updated'
"""Called when offline sync status is updated.
:param session: the current session
:type session: :class:`Session`
"""
CREDENTIALS_BLOB_UPDATED = 'credentials_blob_updated'
"""Called when storable credentials have been updated, typically right
after login.
The ``blob`` argument can be stored and later passed to
:meth:`~Session.login` to login without storing the user's password.
:param session: the current session
:type session: :class:`Session`
:param blob: the authentication blob
:type blob: bytestring
"""
CONNECTION_STATE_UPDATED = 'connection_state_updated'
"""Called when the connection state is updated.
The connection state includes login, logout, offline mode, etc.
:param session: the current session
:type session: :class:`Session`
"""
SCROBBLE_ERROR = 'scrobble_error'
"""Called when there is a scrobble error event.
:param session: the current session
:type session: :class:`Session`
:param error_type: the scrobble error type
:type error_type: :class:`ErrorType`
"""
PRIVATE_SESSION_MODE_CHANGED = 'private_session_mode_changed'
"""Called when there is a change in the private session mode.
:param session: the current session
:type session: :class:`Session`
:param is_private: whether the session is private
:type is_private: bool
"""
class _SessionCallbacks(object):
"""Internal class."""
@classmethod
def get_struct(cls):
return ffi.new(
'sp_session_callbacks *',
{
'logged_in': cls.logged_in,
'logged_out': cls.logged_out,
'metadata_updated': cls.metadata_updated,
'connection_error': cls.connection_error,
'message_to_user': cls.message_to_user,
'notify_main_thread': cls.notify_main_thread,
'music_delivery': cls.music_delivery,
'play_token_lost': cls.play_token_lost,
'log_message': cls.log_message,
'end_of_track': cls.end_of_track,
'streaming_error': cls.streaming_error,
'userinfo_updated': cls.user_info_updated,
'start_playback': cls.start_playback,
'stop_playback': cls.stop_playback,
'get_audio_buffer_stats': cls.get_audio_buffer_stats,
'offline_status_updated': cls.offline_status_updated,
'credentials_blob_updated': cls.credentials_blob_updated,
'connectionstate_updated': cls.connection_state_updated,
'scrobble_error': cls.scrobble_error,
'private_session_mode_changed': cls.private_session_mode_changed,
},
)
# XXX Avoid use of the spotify._session_instance global in the following
# callbacks.
@staticmethod
@ffi.callback('void(sp_session *, sp_error)')
def logged_in(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
if error_type == spotify.ErrorType.OK:
logger.info('Spotify logged in')
else:
logger.error('Spotify login error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.LOGGED_IN, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback('void(sp_session *)')
def logged_out(sp_session):
if not spotify._session_instance:
return
logger.info('Spotify logged out')
spotify._session_instance.emit(
SessionEvent.LOGGED_OUT, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *)')
def metadata_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('Metadata updated')
spotify._session_instance.emit(
SessionEvent.METADATA_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *, sp_error)')
def connection_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error('Spotify connection error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.CONNECTION_ERROR, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback('void(sp_session *, const char *)')
def message_to_user(sp_session, data):
if not spotify._session_instance:
return
data = utils.to_unicode(data).strip()
logger.debug('Message to user: %s', data)
spotify._session_instance.emit(
SessionEvent.MESSAGE_TO_USER, spotify._session_instance, data
)
@staticmethod
@ffi.callback('void(sp_session *)')
def notify_main_thread(sp_session):
if not spotify._session_instance:
return
logger.debug('Notify main thread')
spotify._session_instance.emit(
SessionEvent.NOTIFY_MAIN_THREAD, spotify._session_instance
)
@staticmethod
@ffi.callback(
'int(sp_session *, const sp_audioformat *, const void *, int)'
)
def music_delivery(sp_session, sp_audioformat, frames, num_frames):
if not spotify._session_instance:
return 0
if (
spotify._session_instance.num_listeners(SessionEvent.MUSIC_DELIVERY)
== 0
):
logger.debug('Music delivery, but no listener')
return 0
audio_format = spotify.AudioFormat(sp_audioformat)
frames_buffer = ffi.buffer(
frames, audio_format.frame_size() * num_frames
)
frames_bytes = frames_buffer[:]
num_frames_consumed = spotify._session_instance.call(
SessionEvent.MUSIC_DELIVERY,
spotify._session_instance,
audio_format,
frames_bytes,
num_frames,
)
logger.debug(
'Music delivery of %d frames, %d consumed',
num_frames,
num_frames_consumed,
)
return num_frames_consumed
@staticmethod
@ffi.callback('void(sp_session *)')
def play_token_lost(sp_session):
if not spotify._session_instance:
return
logger.debug('Play token lost')
spotify._session_instance.emit(
SessionEvent.PLAY_TOKEN_LOST, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *, const char *)')
def log_message(sp_session, data):
if not spotify._session_instance:
return
data = utils.to_unicode(data).strip()
logger.debug('libspotify log message: %s', data)
spotify._session_instance.emit(
SessionEvent.LOG_MESSAGE, spotify._session_instance, data
)
@staticmethod
@ffi.callback('void(sp_session *)')
def end_of_track(sp_session):
if not spotify._session_instance:
return
logger.debug('End of track')
spotify._session_instance.emit(
SessionEvent.END_OF_TRACK, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *, sp_error)')
def streaming_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error('Spotify streaming error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.STREAMING_ERROR, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback('void(sp_session *)')
def user_info_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('User info updated')
spotify._session_instance.emit(
SessionEvent.USER_INFO_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *)')
def start_playback(sp_session):
if not spotify._session_instance:
return
logger.debug('Start playback called')
spotify._session_instance.emit(
SessionEvent.START_PLAYBACK, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *)')
def stop_playback(sp_session):
if not spotify._session_instance:
return
logger.debug('Stop playback called')
spotify._session_instance.emit(
SessionEvent.STOP_PLAYBACK, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *, sp_audio_buffer_stats *)')
def get_audio_buffer_stats(sp_session, sp_audio_buffer_stats):
if not spotify._session_instance:
return
if (
spotify._session_instance.num_listeners(
SessionEvent.GET_AUDIO_BUFFER_STATS
)
== 0
):
logger.debug('Audio buffer stats requested, but no listener')
return
logger.debug('Audio buffer stats requested')
stats = spotify._session_instance.call(
SessionEvent.GET_AUDIO_BUFFER_STATS, spotify._session_instance
)
sp_audio_buffer_stats.samples = stats.samples
sp_audio_buffer_stats.stutter = stats.stutter
@staticmethod
@ffi.callback('void(sp_session *)')
def offline_status_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('Offline status updated')
spotify._session_instance.emit(
SessionEvent.OFFLINE_STATUS_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *, const char *)')
def credentials_blob_updated(sp_session, data):
if not spotify._session_instance:
return
data = ffi.string(data)
logger.debug('Credentials blob updated: %r', data)
spotify._session_instance.emit(
SessionEvent.CREDENTIALS_BLOB_UPDATED,
spotify._session_instance,
data,
)
@staticmethod
@ffi.callback('void(sp_session *)')
def connection_state_updated(sp_session):
if not spotify._session_instance:
return
logger.debug('Connection state updated')
spotify._session_instance.emit(
SessionEvent.CONNECTION_STATE_UPDATED, spotify._session_instance
)
@staticmethod
@ffi.callback('void(sp_session *, sp_error)')
def scrobble_error(sp_session, sp_error):
if not spotify._session_instance:
return
error_type = spotify.ErrorType(sp_error)
logger.error('Spotify scrobble error: %r', error_type)
spotify._session_instance.emit(
SessionEvent.SCROBBLE_ERROR, spotify._session_instance, error_type
)
@staticmethod
@ffi.callback('void(sp_session *, bool)')
def private_session_mode_changed(sp_session, is_private):
if not spotify._session_instance:
return
is_private = bool(is_private)
status = 'private' if is_private else 'public'
logger.debug('Private session mode changed: %s', status)
spotify._session_instance.emit(
SessionEvent.PRIVATE_SESSION_MODE_CHANGED,
spotify._session_instance,
is_private,
)
|
mopidy/pyspotify
|
spotify/session.py
|
Python
|
apache-2.0
| 40,801
|
import numpy as np
A = np.matrix('1 2; 3 4')
B = np.matrix([[5, 6],
[7, 8]])
print(A * B)
|
pdbartlett/misc-stuff
|
python/islp/01intro.py
|
Python
|
apache-2.0
| 106
|
from core.vectors import ModuleExec
from core.module import Module, Status
from core import modules
from core import messages
from core.loggers import log
from distutils import spawn
from mako import template
import tempfile
import subprocess
import atexit
class Mount(Module):
"""Mount remote filesystem using HTTPfs."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments([
{ 'name' : '-rpath', 'help' : 'Remote web path where to save the agent. If it is a folder find the first writable folder in it', 'default' : '.' },
{ 'name' : '-httpfs-binary', 'default' : 'httpfs' },
{ 'name' : '-no-autoremove', 'action' : 'store_true', 'default' : False, 'help' : 'Do not autoremove on exit' }
])
def run(self):
# Check binary
binary_path = spawn.find_executable(
self.args['httpfs_binary']
)
if not binary_path:
log.error(
messages.module_file_mount.httpfs_s_not_found % self.args['httpfs_binary']
)
return
# Generate PHP agent
try:
status = 0
agent = subprocess.check_output(
[ binary_path, 'generate', 'php' ]
)
except subprocess.CalledProcessError as e:
status = e.returncode
agent = ''
if status or not agent:
log.error(
messages.module_file_mount.error_generating_agent
)
return
# Save temporary PHP agent, and upload it
temp_file = tempfile.NamedTemporaryFile(
suffix = '.php',
prefix = '',
delete = False
)
temp_file.write(agent)
# Without this flush() uploads only a
# portion of the file
temp_file.flush()
result = ModuleExec(
'file_upload2web',
[
temp_file.name,
self.args['rpath']
]
).run()
temp_file.close()
if (
not result or
not result[0] or
len(result[0]) != 2 or
not result[0][0] or
not result[0][1]
):
log.error(
messages.module_file_mount.failed_agent_upload
)
return
self.args.update({
'agent_abs_path' : result[0][0],
'agent_url' : result[0][1]
})
log.warn(
template.Template(
messages.module_file_mount.agent_installed_tutorial
).render(**self.args)
)
if self.args['no_autoremove']:
log.warn(messages.module_file_mount.httpfs_agent_manually_remove_s % (result[0][0]))
else:
log.warn(messages.module_file_mount.httpfs_agent_removed)
atexit.register(
ModuleExec('file_rm', [
result[0][0]
]
).run
)
|
dtrip/weevely3
|
modules/file/mount.py
|
Python
|
gpl-3.0
| 3,403
|
#!/usr/bin/env python
# coding: utf-8
## Checks bash env for Debug and Testing Settings for globals()
from os import environ
globals()['PRD_ENV'] = 0 # 1=True, 0=False
if environ.get('PYDEBUG'): import pdb; pdb.set_trace()
# if environ.get('DEBUG'): globals()['DEBUG'] = 1
if environ.get('PRD_ENV'): globals()['PRD_ENV'] = 1
else: globals()['PRD_ENV'] = 0
### End Env
## STAGING CONFIGS ##
SITE_STG = "14456"
DB_URI_STG = 'oracle+cx_oracle://MZIMG:p1zza4me@qarac201-vip.qa.bluefly.com:1521/bfyqa1201'
MOZU_BASE_STG = "staging-sb.mozu.com"
TENANT_STG = '11146'
MOZU_MASTER_CATID_STG = "1"
__MOZU_AUTH_URL_STG__ = "https://home.staging.mozu.com/api/platform/applications/authtickets"
__STG_AUTH__ = {'applicationId': 'bluefly.product_images.1.0.0.release',
'sharedSecret': '53de2fb67cb04a95af323693caa48ddb'}
## PROD CONFIGS ##
SITE_PRD = "16829"
DB_URI_PRD = 'oracle+cx_oracle://MZIMG:m0zu1mages@borac102-vip.l3.bluefly.com:1521/bfyprd12'
MOZU_BASE_PRD = "mozu.com"
__MOZU_AUTH_URL_PRD__ = "https://home.mozu.com/api/platform/applications/authtickets"
TENANT_PRD = '12106'
MOZU_MASTER_CATID_PRD = "2"
__PRD_AUTH__ = {'applicationId': 'bluefly.ImageSync.1.0.0.Release',
'sharedSecret': '0b8eb07f0e654f2eb9d972276e0005d1'}
## STANDARD CONFIGS -- Used in both STG and PRD ##
MOZU_PROTOCOL = "https"
MOZU_LIST_FQN = 'files@mozu'
MOZU_DOCUMENT_TYPE_FQN = 'image@mozu'
#################################################
### ALL Variable Configs can be set above for ###
#################################################
def set_environment():
from os import environ
# Set Standard Env vars
environ['MOZU_PROTOCOL'] = MOZU_PROTOCOL
environ['MOZU_LIST_FQN'] = MOZU_LIST_FQN
environ['MOZU_DOCUMENT_TYPE_FQN'] = MOZU_DOCUMENT_TYPE_FQN
if globals()['PRD_ENV'] == 0:
## USING PRD Database in Debug, rest are STG
environ['SQLALCHEMY_DATABASE_URI'] = DB_URI_STG
environ['MOZU_TENANT_NAME'] = TENANT_STG
environ['MOZU_SITE_NAME'] = SITE_STG
environ['MOZU_BASE_URL'] = MOZU_BASE_STG
environ['MOZU_AUTH_URL'] = __MOZU_AUTH_URL_STG__
environ['MOZU_MASTER_CATALOG_ID'] = MOZU_MASTER_CATID_STG
print 'SET ENV 1\tSTAGING ENV CONFIG SET \t{}\n'.format(environ.get('PRD_ENV'))
elif globals()['PRD_ENV'] == 1:
environ['SQLALCHEMY_DATABASE_URI'] = DB_URI_PRD
environ['MOZU_TENANT_NAME'] = TENANT_PRD
environ['MOZU_SITE_NAME'] = SITE_PRD
environ['MOZU_BASE_URL'] = MOZU_BASE_PRD
environ['MOZU_AUTH_URL'] = __MOZU_AUTH_URL_PRD__
environ['MOZU_MASTER_CATALOG_ID'] = MOZU_MASTER_CATID_PRD
print 'SET ENV 2\tPRODUCTION ENV CONFIG SET \t\v{}\t***RUNNING IN PRODUCTION***\n\n\n\t***RUNNING IN PRODUCTION***\n\n\n\t***RUNNING IN PRODUCTION***\n\n\nPRD_ENV Is Set to 1\n'.format(environ.get('PRD_ENV'))
else:
environ['SQLALCHEMY_DATABASE_URI'] = DB_URI_STG
environ['MOZU_TENANT_NAME'] = TENANT_STG
environ['MOZU_SITE_NAME'] = SITE_STG
environ['MOZU_BASE_URL'] = MOZU_BASE_STG
environ['MOZU_AUTH_URL'] = __MOZU_AUTH_URL_STG__
environ['MOZU_MASTER_CATALOG_ID'] = MOZU_MASTER_CATID_STG
print 'SET ENV 3\tSTAGING ENV CONFIG ASSUMED PROD env var not Set Correctly {} \n'.format(environ.get('PRD_ENV'))
#print 'LOCAL ENV SET for MOZU:\n\n ', dict(environ)
return
def get_mozu_client_authtoken():
import requests, json
set_environment()
_auth_headers = {'Content-type': 'application/json', 'Accept-Encoding': 'gzip, deflate'}
if globals()['PRD_ENV'] == 0:
_auth_request = __STG_AUTH__
_auth_url = __MOZU_AUTH_URL_STG__
elif globals()['PRD_ENV'] == 1:
_auth_request = __PRD_AUTH__
_auth_url = __MOZU_AUTH_URL_PRD__
#_auth_headers_prod_addition = {'x-vol-tenant': TENANT_PRD, 'x-vol-master-catalog': MOZU_MASTER_CATID_PRD }
#_auth_headers = dict(list(_auth_headers.items()) + list(_auth_headers_prod_addition.items()))
#print environ
else:
_auth_request = __STG_AUTH__
_auth_url = __MOZU_AUTH_URL_STG__
_auth_response = requests.post(_auth_url, data=json.dumps(_auth_request), headers=_auth_headers, verify=False)
print "Auth Response: {0}".format(_auth_response.status_code)
print _auth_response.text
print _auth_response.headers
print '\n\n', _auth_request, _auth_headers, '\t', _auth_url
#print '\n', environ.items(), '\n'
_auth_response.raise_for_status()
_auth = _auth_response.json()
# print "Auth Ticket: {0}".format(_auth["accessToken"])
return _auth["accessToken"] #, _auth_response.status_code
def authenticate():
auth = get_mozu_client_authtoken()
return auth
if __name__ == '__main__':
set_environment()
|
relic7/prodimages
|
mozu/base_config.py
|
Python
|
mit
| 4,826
|
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
"""Module for the template control mappers."""
__all__ = ["HDFMapControlTemplate", "HDFMapControlCLTemplate"]
import h5py
import numpy as np
import os
from abc import ABC, abstractmethod
from typing import Iterable, List, Union
from warnings import warn
from .parsers import CLParse
from .types import ConType
class HDFMapControlTemplate(ABC):
# noinspection PySingleQuotedDocstring
'''
Template class for all control mapping classes to inherit from.
Any inheriting class should define :code:`__init__` as::
def __init__(self, group: h5py.Group):
"""
:param group: HDF5 group object
"""
# initialize
HDFMapControlTemplate.__init__(self, group)
# define control type
self.info['contype'] = ConType.motion
# populate self.configs
self._build_configs()
.. note::
* Any method that raises a :exc:`NotImplementedError` is
intended to be overwritten by the inheriting class.
* :code:`from bapsflib._hdf.maps.controls import ConType`
* If a control device is structured around a
:ibf:`command list`, then its mapping class should subclass
:class:`~.templates.HDFMapControlCLTemplate`.
Which is a subclass of
:class:`~.templates.HDFMapControlTemplate`,
but adds methods for parsing/handling a command list.
'''
def __init__(self, group: h5py.Group):
"""
:param group: the control device HDF5 group object
"""
# condition group arg
if isinstance(group, h5py.Group):
self._control_group = group
else:
raise TypeError("arg `group` is not of type h5py.Group")
# define _info attribute
self._info = {
"group name": os.path.basename(group.name),
"group path": group.name,
"contype": NotImplemented,
}
# initialize configuration dictionary
self._configs = {}
@property
def configs(self) -> dict:
"""
Dictionary containing all the relevant mapping information to
translate the HDF5 data into a numpy array by
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`.
**-- Constructing** :code:`configs` **--**
The :code:`configs` dict is a nested dictionary where the first
level of keys represents the control device configuration names.
Each configuration corresponds to one dataset in the HDF5
control group and represents a grouping of state values
associated with an probe or instrument used during an
experiment.
Each configuration is a dictionary consisting of a set of
required keys (:code:`'dset paths'`, :code:`'shotnum'`, and
:code:`'state values'`) and optional keys. Any optional key is
considered as meta-info for the device and is added to the
:attr:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls.info`
dictionary when the numpy array is constructed. The required
keys constitute the mapping for constructing the numpy array
and are explained in the table below.
.. csv-table:: Dictionary breakdown for
:code:`config = configs['config name']`
:header: "Key", "Description"
:widths: 20, 60
"::
config['dset paths']
", "
Internal HDF5 path to the dataset associated with the
control device configuration. For example, ::
config['dset paths'] = ('/foo/bar/Control/d1', )
"
"::
config['shotnum']
", "
Defines how the run shot numbers are stored in the HDF5
file, which are mapped to the :code:`'shotnum'` field of the
constructed numpy array. Should look like, ::
config['shotnum'] = {
'dset paths': config['dset paths'],
'dset field': ('Shot number',),
'shape': (),
'dtype': numpy.int32,
}
where :code:`'dset paths'` is the internal HDF5 path to the
dataset, :code:`'dset field'` is the field name of the
dataset containing shot numbers, :code:`'shape'` is the
numpy shape of the shot number data, and :code:`'dtype'`
is the numpy :code:`dtype` of the data. This all defines
the numpy :code:`dtype` of the :code:`'shotnum'` field in
the
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`
constructed numpy array.
"
"::
config['state values']
", "
This is another dictionary defining :code:`'state values`.
For example, ::
config['state values'] = {
'xyz': {
'dset paths': config['dset paths'],
'dset field': ('x', 'y', 'z'),
'shape': (3,),
'dtype': numpy.float32}
}
will tell
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`
to construct a numpy array with a the :code:`'xyz'` field.
This field would be a 3-element array of
:code:`numpy.float32`, where the :code:`'x'` field of the
HDF5 dataset is mapped to the 1st index, :code:`'y'` is
mapped to the 2nd index, and :code:`'z'` is mapped to the
3rd index.
**Note:**
* A state value field (key) can not be defined as
:code:`'signal'` since this field is reserved for
digitizer data constructed by
:class:`~bapsflib._hdf.utils.hdfreaddata.HDFReadData`.
* If state value data represents probe position data, then
it should be given the field name (key) :code:`'xyz'`
(like in the example above).
"
If a control device saves data around the concept of a
:ibf:`command list`, then :code:`configs` has a few additional
required keys, see table below.
.. csv-table:: Additional required keys for
:code:`config = configs['config name']` when
the control device saves data around the concept
of a :ibf:`command list`.
:header: "Key", "Description"
:widths: 20, 60
"::
config['command list']
", "
A tuple representing the original **command list**.
For example, ::
config['command list'] = ('VOLT: 20.0',
'VOLT 25.0',
'VOLT 30.0')
"
"::
config['state values']
", "
Has all the same keys as before, plus the addition of
:code:`'command list'`, :code:`'cl str`,
and :code:`'re pattern'`.
For example, ::
config['state values'] = {
'command': {
'dset paths': config['dset paths'],
'dset field': ('Command index',),
'shape': (),
'dtype': numpy.float32,
'command list': (20.0, 25.0, 30.0),
'cl str': ('VOLT: 20.0', 'VOLT 25.0',
'VOLT 30.0'),
're pattern': re.compile(r'some RE pattern')}
}
where :code:`'re pattern'` is the compiled RE pattern used
to parse the original **command list**, :code:`'cl str'` is
the matched string segment of the **command list**, and
:code:`'command list'` is the set of values that will
populate the constructed numpy array.
"
.. note::
For further details, look to :ref:`add_control_mod`.
"""
return self._configs
@property
def contype(self) -> ConType:
"""control device type"""
return self._info["contype"]
@property
def dataset_names(self) -> List[str]:
"""list of names of the HDF5 datasets in the control group"""
dnames = [
name for name in self.group if isinstance(self.group[name], h5py.Dataset)
]
return dnames
@property
def group(self) -> h5py.Group:
"""Instance of the HDF5 Control Device group"""
return self._control_group
@property
def has_command_list(self) -> bool:
"""
:return: :code:`True` if dataset utilizes a command list
"""
has_cl = False
for config_name in self._configs:
if "command list" in self._configs[config_name]:
has_cl = True
break
return has_cl
@property
def info(self) -> dict:
"""
Control device dictionary of meta-info. For example, ::
info = {
'group name': 'Control',
'group path': '/foo/bar/Control',
'contype': 'motion',
}
"""
return self._info
@property
def one_config_per_dset(self) -> bool:
"""
:code:`'True'` if each control configuration has its own dataset
"""
n_dset = len(self.dataset_names)
n_configs = len(self._configs)
return True if n_dset == n_configs else False
@property
def subgroup_names(self) -> List[str]:
"""list of names of the HDF5 sub-groups in the control group"""
sgroup_names = [
name for name in self.group if isinstance(self.group[name], h5py.Group)
]
return sgroup_names
@property
def device_name(self) -> str:
"""Name of Control device"""
return self._info["group name"]
@abstractmethod
def construct_dataset_name(self, *args) -> str:
"""
Constructs the dataset name corresponding to the input
arguments.
:return: name of dataset
:raise: :exc:`NotImplementedError`
"""
raise NotImplementedError
@abstractmethod
def _build_configs(self):
"""
Gathers the necessary metadata and fills :data:`configs`.
:raise: :exc:`NotImplementedError`
"""
raise NotImplementedError
class HDFMapControlCLTemplate(HDFMapControlTemplate):
# noinspection PySingleQuotedDocstring
'''
A modified :class:`HDFMapControlTemplate` template class for
mapping control devices that record around the concept of a
:ibf:`command list`.
Any inheriting class should define :code:`__init__` as::
def __init__(self, group: h5py.Group):
"""
:param group: HDF5 group object
"""
# initialize
HDFMapControlCLTemplate.__init__(self, control_group)
# define control type
self.info['contype'] = ConType.waveform
# define known command list RE patterns
self._default_re_patterns = (
r'(?P<FREQ>(\bFREQ\s)(?P<VAL>(\d+\.\d*|\.\d+|\d+\b)))',
)
# populate self.configs
self._build_configs()
.. note::
* Any method that raises a :exc:`NotImplementedError` is
intended to be overwritten by the inheriting class.
* :code:`from bapsflib._hdf.maps.controls import ConType`
'''
def __init__(self, group: h5py.Group):
"""
:param group: the control device HDF5 group object
"""
HDFMapControlTemplate.__init__(self, group)
# initialize internal 'command list' regular expression (RE)
# patterns
self._default_re_patterns = ()
"""tuple of default RE patterns for the control device"""
@abstractmethod
def _default_state_values_dict(self, config_name: str) -> dict:
"""
Returns the default :code:`'state values'` dictionary for
configuration *config_name*.
:param str config_name: configuration name
:raise: :exc:`NotImplementedError`
:Example:
.. code-block:: python
# define default dict
default_dict = {
'command': {
'dset paths':
self._configs[config_name]['dese paths'],
'dset field': ('Command index', ),
're pattern': None,
'command list':
self._configs[config_name]['command list'],
'cl str':
self._configs[config_name]['command list'],
'shape': (),
}
}
default_dict['command']['dtype'] = \\
default_dict['command']['command list'].dtype
# return
return default_dict
"""
raise NotImplementedError
def _construct_state_values_dict(
self, config_name: str, patterns: Union[str, Iterable[str]]
) -> dict:
"""
Returns a dictionary for
:code:`configs[config_name]['state values']` based on the
supplied RE patterns. :code:`None` is returned if the
construction failed.
:param config_name: configuration name
:param patterns: list of RE pattern strings
"""
# -- check requirements exist before continuing ----
# get dataset
dset_path = self._configs[config_name]["dset paths"][0]
dset = self.group.get(dset_path)
# ensure 'Command index' is a field
if "Command index" not in dset.dtype.names:
warn(f"Dataset '{dset_path}' does NOT have 'Command index' field")
return {}
# ensure 'Command index' is a field of scalars
if dset.dtype["Command index"].shape != () or not np.issubdtype(
dset.dtype["Command index"].type, np.integer
):
warn(
f"Dataset '{dset_path}' 'Command index' field is NOT a column of integers"
)
return {}
# -- apply RE patterns to 'command list' ----
success, sv_dict = self.clparse(config_name).apply_patterns(patterns)
# regex was unsuccessful, return alt_dict
if not success:
return {}
# -- complete `sv_dict` before return ----
# 1. 'command list' and 'cl str' are tuples from clparse
# 2. add 'dset paths'
# 3. add 'dset field'
# 4. add 'shape'
# 5. 'dtype' defined by clparse.apply_patterns
#
for state in sv_dict:
# add additional keys
sv_dict[state]["dset paths"] = self._configs[config_name]["dset paths"]
sv_dict[state]["dset field"] = ("Command index",)
sv_dict[state]["shape"] = ()
# return
return sv_dict
def clparse(self, config_name: str) -> CLParse:
"""
Return instance of
:class:`~bapsflib.lapd.controls.parsers.CLParse`
for `config_name`.
:param str config_name: configuration name
"""
# retrieve command list
cl = self._configs[config_name]["command list"]
# define clparse and return
return CLParse(cl)
def reset_state_values_config(self, config_name: str, apply_patterns=False):
"""
Reset the :code:`configs[config_name]['state values']`
dictionary.
:param config_name: configuration name
:param bool apply_patterns: Set :code:`False` (DEFAULT) to
reset to :code:`_default_state_values_dict(config_name)`.
Set :code:`True` to rebuild dict using
:attr:`_default_re_patterns`.
"""
if apply_patterns:
# get sv_dict dict
sv_dict = self._construct_state_values_dict(
config_name, self._default_re_patterns
)
if not bool(sv_dict):
sv_dict = self._default_state_values_dict(config_name)
else:
# get default dict
sv_dict = self._default_state_values_dict(config_name)
# reset config
self._configs[config_name]["state values"] = sv_dict
def set_state_values_config(
self, config_name: str, patterns: Union[str, Iterable[str]]
):
"""
Rebuild and set
:code:`configs[config_name]['state values']` based on the
supplied RE *patterns*.
:param config_name: configuration name
:param patterns: list of RE strings
"""
# construct dict for 'state values' dict
sv_dict = self._construct_state_values_dict(config_name, patterns)
# update 'state values' dict
if not bool(sv_dict):
# do nothing since default parsing was unsuccessful
warn("RE parsing of 'command list' was unsuccessful, doing nothing")
else:
self._configs[config_name]["state values"] = sv_dict
|
rocco8773/bapsflib
|
bapsflib/_hdf/maps/controls/templates.py
|
Python
|
bsd-3-clause
| 17,703
|
#!/usr/bin/env python
# 8 band Audio equaliser from wav file
# import alsaaudio as aa
# import smbus
from struct import unpack
import numpy as np
import wave
from time import sleep
import sys
ADDR = 0x20 #The I2C address of MCP23017
DIRA = 0x00 #PortA I/O direction, by pin. 0=output, 1=input
DIRB = 0x01 #PortB I/O direction, by pin. 0=output, 1=input
BANKA = 0x12 #Register address for Bank A
BANKB = 0x13 #Register address for Bank B
# bus=smbus.SMBus(0) #Use '1' for newer Pi boards;
# #Set up the 23017 for 16 output pins
# bus.write_byte_data(ADDR, DIRA, 0); #all zeros = all outputs on Bank A
# bus.write_byte_data(ADDR, DIRB, 0); #all zeros = all outputs on Bank B
# def TurnOffLEDS ():
# bus.write_byte_data(ADDR, BANKA, 0xFF) #set all columns high
# bus.write_byte_data(ADDR, BANKB, 0x00) #set all rows low
# def Set_Column(row, col):
# bus.write_byte_data(ADDR, BANKA, col)
# bus.write_byte_data(ADDR, BANKB, row)
# # Initialise matrix
# TurnOffLEDS()
matrix = [0,0,0,0,0,0,0,0]
power = []
# weighting = [2,2,8,8,16,32,64,64] # Change these according to taste
weighting = [2,2,2,2,4,4,8,8] # Change these according to taste
# Set up audio
#wavfile = wave.open('test_stereo_16000Hz_16bit_PCM.wav','r')
#wavfile = wave.open('Media-Convert_test5_PCM_Stereo_VBR_8SS_44100Hz.wav','r')
wavfile = wave.open('Media-Convert_test2_PCM_Mono_VBR_8SS_48000Hz.wav','r')
sample_rate = wavfile.getframerate()
no_channels = wavfile.getnchannels()
chunk = 4096 # Use a multiple of 8
# output = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL)
# output.setchannels(no_channels)
# output.setrate(sample_rate)
# output.setformat(aa.PCM_FORMAT_S16_LE)
# output.setperiodsize(chunk)
# Return power array index corresponding to a particular frequency
def piff(val):
return int(2*chunk*val/sample_rate)
def print_intensity(matrix):
levelFull = "||||||||";
levelEmpty = " ";
levelStr = "";
for level in matrix:
#level = 0;
levelStr += levelFull[0: level] + levelEmpty [0:8-(level)] + " ";
sys.stdout.write("\rlevel: " + levelStr);
sys.stdout.flush();
def calculate_levels(data, chunk, sample_rate):
#print ("[calculate_levels] chunk=%s, sample_rate: %s, len(data)=%s" % (chunk, sample_rate, len(data)));
if(len(data) != chunk):
print ("\n[calculate_levels] skiping: chunk=%s != len(data)=%s" % (chunk, len(data)));
return None;
global matrix
# Convert raw data (ASCII string) to numpy array
data = unpack("%dh"%(len(data)/2),data)
data = np.array(data, dtype='h')
# Apply FFT - real data
fourier=np.fft.rfft(data)
# Remove last element in array to make it the same size as chunk
fourier=np.delete(fourier,len(fourier)-1)
# Find average 'amplitude' for specific frequency ranges in Hz
power = np.abs(fourier)
matrix[0]= int(np.mean(power[piff(0) :piff(156):1]))
matrix[1]= int(np.mean(power[piff(156) :piff(313):1]))
matrix[2]= int(np.mean(power[piff(313) :piff(625):1]))
matrix[3]= int(np.mean(power[piff(625) :piff(1250):1]))
matrix[4]= int(np.mean(power[piff(1250) :piff(2500):1]))
matrix[5]= int(np.mean(power[piff(2500) :piff(5000):1]))
matrix[6]= int(np.mean(power[piff(5000) :piff(10000):1]))
# Produces error, I guess to low sampling rate of the audio file
# matrix[7]= int(np.mean(power[piff(10000):piff(20000):1]))
# Tidy up column values for the LED matrix
matrix=np.divide(np.multiply(matrix,weighting),1000000)
# Set floor at 0 and ceiling at 8 for LED matrix
matrix=matrix.clip(0,8)
return matrix
# Process audio file
print "Processing....."
data = wavfile.readframes(chunk)
while data != '':
# output.write(data)
matrix = calculate_levels(data, chunk,sample_rate)
if matrix == None:
next;
print_intensity(matrix);
# for i in range (0,8):
# Set_Column((1<<matrix[i])-1,0xFF^(1<<i))
sleep(0.1);
data = wavfile.readframes(chunk)
# TurnOffLEDS()
# =========================
|
mprinc/FeelTheSound
|
src/PoC/fft.py
|
Python
|
cc0-1.0
| 4,053
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import *
from karesansui.lib.utils import *
def assert_regexp_matches(text, regexp):
import re
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
if not regexp.search(text):
message = '''Regexp didn't match: %r not found in %r''' % (regexp.pattern, text)
raise AssertionError(message)
class TestList(object):
def test_sample(self):
numbers = xrange(10)
eq_(len(numbers), 10)
assert max(numbers) == 9
assert_equal(sum(numbers), 45)
def test_dotsplit(self):
string_1 = "foo"
string_2 = "foo.bar"
string_3 = "foo.bar.hoge"
assert_equal(dotsplit(string_1)[0], "foo")
assert_equal(dotsplit(string_2)[0], "foo")
assert_equal(dotsplit(string_2)[1], "bar")
assert_equal(dotsplit(string_3)[0], "foo.bar")
assert_equal(dotsplit(string_3)[1], "hoge")
def test_ucfirst(self):
string_lc = "abcdefg012345-./"
string_uc = "ABCDEFG012345-./"
assert_equal(ucfirst(string_lc)[0:4], "Abcd")
assert_equal(ucfirst(string_uc)[0:4], "ABCD")
def test_lcfirst(self):
string_lc = "abcdefg012345-./"
string_uc = "ABCDEFG012345-./"
assert_equal(lcfirst(string_lc)[0:4], "abcd")
assert_equal(lcfirst(string_uc)[0:4], "aBCD")
def test_next_number(self):
min = 10
max = 20
exclude_numbers = [10,11,12]
min = 21
max = 20
exclude_numbers = [10,11,12]
assert_equal(next_number(min,max,exclude_numbers), None)
def test_is_uuid(self):
uuid = string_from_uuid(generate_uuid())
assert_equal(is_uuid(uuid), True)
uuid_1 = generate_uuid()
uuid_2 = generate_uuid()
assert uuid_1 != uuid_2
def test_file_type(self):
assert_regexp_matches(file_type("/etc/hosts"),"ASCII .*text")
assert_regexp_matches(file_type("/bin/ls"),"bit LSB executable")
def test_sample():
numbers = xrange(10)
assert_equal(len(numbers), 10)
assert_equal(max(numbers), 9)
assert_equal(sum(numbers), 45)
|
karesansui/karesansui
|
karesansui/tests/nose/test_list.py
|
Python
|
mit
| 2,172
|
"""Tests for util.db module."""
import threading
import time
import unittest
from io import StringIO
import ddt
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.management import call_command
from django.db import IntegrityError, connection
from django.db.transaction import TransactionManagementError, atomic
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_settings
from common.djangoapps.util.db import enable_named_outer_atomic, generate_int_id, outer_atomic
def do_nothing():
"""Just return."""
return
@ddt.ddt
class TransactionManagersTestCase(TransactionTestCase):
"""
Tests outer_atomic.
Note: This TestCase only works with MySQL.
To test do: "./manage.py lms --settings=test_with_mysql test util.tests.test_db"
"""
DECORATORS = {
'outer_atomic': outer_atomic(),
'outer_atomic_read_committed': outer_atomic(read_committed=True),
}
@ddt.data(
('outer_atomic', IntegrityError, None, True),
('outer_atomic_read_committed', type(None), False, True),
)
@ddt.unpack
def test_concurrent_requests(self, transaction_decorator_name, exception_class, created_in_1, created_in_2):
"""
Test that when isolation level is set to READ COMMITTED get_or_create()
for the same row in concurrent requests does not raise an IntegrityError.
"""
transaction_decorator = self.DECORATORS[transaction_decorator_name]
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
class RequestThread(threading.Thread):
""" A thread which runs a dummy view."""
def __init__(self, delay, **kwargs):
super().__init__(**kwargs)
self.delay = delay
self.status = {}
@transaction_decorator
def run(self):
"""A dummy view."""
try:
try:
User.objects.get(username='student', email='student@edx.org')
except User.DoesNotExist:
pass
else:
raise AssertionError('Did not raise User.DoesNotExist.')
if self.delay > 0:
time.sleep(self.delay)
__, created = User.objects.get_or_create(username='student', email='student@edx.org')
except Exception as exception: # pylint: disable=broad-except
self.status['exception'] = exception
else:
self.status['created'] = created
thread1 = RequestThread(delay=1)
thread2 = RequestThread(delay=0)
thread1.start()
thread2.start()
thread2.join()
thread1.join()
assert isinstance(thread1.status.get('exception'), exception_class)
assert thread1.status.get('created') == created_in_1
assert thread2.status.get('exception') is None
assert thread2.status.get('created') == created_in_2
def test_outer_atomic_nesting(self):
"""
Test that outer_atomic raises an error if it is nested inside
another atomic.
"""
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
outer_atomic()(do_nothing)() # pylint: disable=not-callable
with atomic():
atomic()(do_nothing)() # pylint: disable=not-callable
with outer_atomic():
atomic()(do_nothing)() # pylint: disable=not-callable
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic()(do_nothing)() # pylint: disable=not-callable
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with outer_atomic():
outer_atomic()(do_nothing)() # pylint: disable=not-callable
def test_named_outer_atomic_nesting(self):
"""
Test that a named outer_atomic raises an error only if nested in
enable_named_outer_atomic and inside another atomic.
"""
if connection.vendor != 'mysql':
raise unittest.SkipTest('Only works on MySQL.')
outer_atomic(name='abc')(do_nothing)() # pylint: disable=not-callable
with atomic():
outer_atomic(name='abc')(do_nothing)() # pylint: disable=not-callable
with enable_named_outer_atomic('abc'):
outer_atomic(name='abc')(do_nothing)() # pylint: disable=not-callable # Not nested.
with atomic():
outer_atomic(name='pqr')(do_nothing)() # pylint: disable=not-callable # Not enabled.
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic(name='abc')(do_nothing)() # pylint: disable=not-callable
with enable_named_outer_atomic('abc', 'def'):
outer_atomic(name='def')(do_nothing)() # pylint: disable=not-callable # Not nested.
with atomic():
outer_atomic(name='pqr')(do_nothing)() # pylint: disable=not-callable # Not enabled.
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic(name='def')(do_nothing)() # pylint: disable=not-callable
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with outer_atomic():
outer_atomic(name='def')(do_nothing)() # pylint: disable=not-callable
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with atomic():
outer_atomic(name='abc')(do_nothing)() # pylint: disable=not-callable
with self.assertRaisesRegex(TransactionManagementError, 'Cannot be inside an atomic block.'):
with outer_atomic():
outer_atomic(name='abc')(do_nothing)() # pylint: disable=not-callable
@ddt.ddt
class GenerateIntIdTestCase(TestCase):
"""Tests for `generate_int_id`"""
@ddt.data(10)
def test_no_used_ids(self, times):
"""
Verify that we get a random integer within the specified range
when there are no used ids.
"""
minimum = 1
maximum = times
for __ in range(times):
assert generate_int_id(minimum, maximum) in list(range(minimum, (maximum + 1)))
@ddt.data(10)
def test_used_ids(self, times):
"""
Verify that we get a random integer within the specified range
but not in a list of used ids.
"""
minimum = 1
maximum = times
used_ids = {2, 4, 6, 8}
for __ in range(times):
int_id = generate_int_id(minimum, maximum, used_ids)
assert int_id in list(set(range(minimum, (maximum + 1))) - used_ids)
class MigrationTests(TestCase):
"""
Tests for migrations.
"""
@override_settings(MIGRATION_MODULES={})
def test_migrations_are_in_sync(self):
"""
Tests that the migration files are in sync with the models.
If this fails, you needs to run the Django command makemigrations.
The test is set up to override MIGRATION_MODULES to ensure migrations are
enabled for purposes of this test regardless of the overall test settings.
TODO: Find a general way of handling the case where if we're trying to
make a migrationless release that'll require a separate migration
release afterwards, this test doesn't fail.
"""
out = StringIO()
call_command("makemigrations", dry_run=True, verbosity=3, stdout=out)
output = out.getvalue()
assert 'No changes detected' in output
|
EDUlib/edx-platform
|
common/djangoapps/util/tests/test_db.py
|
Python
|
agpl-3.0
| 8,081
|
# -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
try:
set
except NameError:
from sets import Set as set
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, get_choice_opt, \
ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(unicode, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items():
opt = options.get(name, False)
if isinstance(opt, basestring) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
}
|
KeyIngredient/gaepaste
|
pygments/filters/__init__.py
|
Python
|
lgpl-3.0
| 9,514
|
""" Stores a scrip in an HDF5 file. Assumes the data exists locally."""
# This is more of a test scrip where we want to explore all ideas.
import pandas as pd
from corp_actions_nse import get_corp_action_csv
csv_filename = '500209.csv'
COL_NAMES = ["Date", "Open Price", "High Price", "Low Price", "Close Price", "No.of Shares", "Deliverable Quantity"]
#
infy = pd.read_csv(csv_filename, index_col='Date', usecols=COL_NAMES, parse_dates=True)
infy.columns = list('OHLCVD')
infy = infy[::-1] # BSE scrip files are reverse latest first
print(infy[:10])
hdf_filename = 'infy.h5'
c = get_corp_action_csv('infy')
corp_actions = {'corp_actions': c}
h5store = pd.HDFStore(hdf_filename)
h5store['infy'] = infy
h5store.get_storer('infy').attrs.corp_actions = c
h5store.close()
# open again for reading
h5store = pd.HDFStore(hdf_filename)
infy = h5store['infy']
corp_actions = h5store.get_storer('infy').attrs.corp_actions
print(infy[:10])
for act in corp_actions:
if act.action in ['B', 'S']:
ts = pd.Timestamp(act.ex_date)
ratio = act.ratio
infy['O'][infy.index < ts] = infy['O'] * ratio
infy['H'][infy.index < ts] = infy['H'] * ratio
infy['L'][infy.index < ts] = infy['L'] * ratio
infy['C'][infy.index < ts] = infy['C'] * ratio
infy['V'][infy.index < ts] = infy['V'] / ratio
infy['D'][infy.index < ts] = infy['D'] / ratio
print(infy[:10])
h5store.close()
|
gabhijit/tickdownload
|
scrip_to_hd5.py
|
Python
|
mit
| 1,435
|
"""Fake module to test tests."""
def test_proc(x):
"""Fake procedure to test doctests.
>>> test_proc(42)
43
"""
return x
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
|
Grahack/test-travis
|
mytestmodule.py
|
Python
|
gpl-2.0
| 232
|
#!/usr/bin/env python
'''Testing a sprite.
The ball should bounce off the sides of the window. You may resize the
window.
This test should just run without failing.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import unittest
from pyglet.gl import glClear
import pyglet.window
import pyglet.window.event
from pyglet import clock
from scene2d import Sprite, Image2d, FlatView
from scene2d.image import TintEffect
from scene2d.camera import FlatCamera
ball_png = os.path.join(os.path.dirname(__file__), 'ball.png')
class BouncySprite(Sprite):
def update(self):
# move, check bounds
p = self.properties
self.x += p['dx']; self.y += p['dy']
if self.left < 0: self.left = 0; p['dx'] = -p['dx']
elif self.right > 320: self.right = 320; p['dx'] = -p['dx']
if self.bottom < 0: self.bottom = 0; p['dy'] = -p['dy']
elif self.top > 320: self.top = 320; p['dy'] = -p['dy']
class SpriteOverlapTest(unittest.TestCase):
def test_sprite(self):
w = pyglet.window.Window(width=320, height=320)
image = Image2d.load(ball_png)
ball1 = BouncySprite(0, 0, 64, 64, image, properties=dict(dx=10, dy=5))
ball2 = BouncySprite(288, 0, 64, 64, image,
properties=dict(dx=-10, dy=5))
view = FlatView(0, 0, 320, 320, sprites=[ball1, ball2])
view.fx, view.fy = 160, 160
clock.set_fps_limit(60)
e = TintEffect((.5, 1, .5, 1))
while not w.has_exit:
clock.tick()
w.dispatch_events()
ball1.update()
ball2.update()
if ball1.overlaps(ball2):
if 'overlap' not in ball2.properties:
ball2.properties['overlap'] = e
ball2.add_effect(e)
elif 'overlap' in ball2.properties:
ball2.remove_effect(e)
del ball2.properties['overlap']
view.clear()
view.draw()
w.flip()
w.close()
if __name__ == '__main__':
unittest.main()
|
nicememory/pie
|
pyglet/contrib/currently-broken/scene2d/tests/scene2d/SPRITE_OVERLAP.py
|
Python
|
apache-2.0
| 2,070
|
import json
content = ""
with open('annotated.json', 'r') as content_file:
content = content_file.read()
content = json.loads(content)
content = sorted(content, key = lambda x: x["name"])
for ing in content:
if("measurement" in ing.keys()):
continue
print(ing["name"])
meas = input("1 - ml, 2 - g, 3 - pc. 4- ignore\n")
if(meas == 1 or meas == "1"):
ing["measurement"] = "ml"
elif(meas == 2 or meas == "2"):
ing["measurement"] = "g"
elif(meas == 3 or meas == "3"):
ing["measurement"] = "pc"
elif(meas == 4 or meas == "4"):
ing["filter"] = 1
else:
break
content = [x for x in content if not("filter" in x.keys())]
with open('annotated.json', "w") as output_file:
output_file.write(json.dumps(content))
output_file.close()
|
blacksph3re/alastair2
|
crawl/annotate.py
|
Python
|
apache-2.0
| 780
|
# -*- coding: utf-8 -*-
"""
pyfdarc.py
This file contains layout definitions for Qt and matplotlib widgets
A dark and a light theme can be selected via a constant but this more a demonstration
on how to set things than a finished layout yet.
See
http://stackoverflow.com/questions/13034496/using-global-variables-between-files-in-python
http://pymotw.com/2/articles/data_persistence.html
for information on passing/storing data between files
See
http://doc.qt.io/qt-4.8/stylesheet-examples.html
http://www.informit.com/articles/article.aspx?p=1405556
for qss styling
Author: Christian Muenker
"""
from __future__ import division, unicode_literals
# importing pyfdarc runs the module once, defining all module variables
# which are global (similar to class variables)
THEME = 'light'
# -----------------------------
# Layout for matplotlib widgets
# -----------------------------
# dark theme
mpl_dark = {'axes.facecolor':'black',
'axes.labelcolor':'white',
'axes.edgecolor':'white',
'axes.color_cycle': ['r', 'g', 'c', 'm', 'y', 'w'],
'figure.facecolor':'#202020',
'figure.edgecolor':'#808080', # also color for hatched specs in |H(f)|
'savefig.facecolor':'black',
'savefig.edgecolor':'black',
'xtick.color':'white',
'ytick.color':'white',
'text.color':'white',
'grid.color':'#CCCCCC'
}
# light theme
mpl_light = {'axes.facecolor':'white',
'axes.labelcolor':'black',
'axes.edgecolor':'black',
'axes.color_cycle': ['r', 'b', 'c', 'm', 'k'],
'figure.facecolor':'white',
'figure.edgecolor':'#808080', # also color for hatched specs in |H(f)|
'savefig.facecolor':'white',
'savefig.edgecolor':'white',
'xtick.color':'black',
'ytick.color':'black',
'text.color':'black',
'grid.color':'#222222'
}
# common layout settings
mpl_rc = {'lines.linewidth': 1.5,
'font.size':12, 'legend.fontsize':12,
'axes.labelsize':12, 'axes.titlesize':14, 'axes.linewidth':1,
'axes.formatter.use_mathtext': True,
'figure.figsize':(5,4), 'figure.dpi' : 100}
# ---------------------
# Layout for Qt widgets
# ---------------------
# dark theme
css_dark = {'TopWidget':('QWidget{color:white;background: #222222;}'
'QPushButton{background-color:grey; color:white;}'
'QTableView{alternate-background-color:#222222;'
'background-color:black; gridline-color: white;}'
'QHeaderView::section{background-color:rgb(190,1,1);}'
'QLineEdit{background: #222222; color:white;}'),
'LineEdit':'QLineEdit{background: #222222; color:white;}'
}
# 'QTabBar{color:black;} QTabBar::tab{background:darkgrey;}'
# 'QTabBar::tab:selected{background:lightblue;}'
# light theme
css_light = {'TopWidget':('.QTabWidget>QWidget>QWidget{border: 1px solid grey}'
'QTabWidget>QWidget{border-right: 1px solid grey;}'
'.QWidget{color:black; background: white}'
'QPushButton{background-color:lightgrey; color:black;}'
'QHeaderView::section{background-color:rgb(190,1,1);}'
'QLineEdit{background: white; color:black;}'),
'LineEdit':''
}
# 'TabBar':('QTabWidget::pane {border-top: 2px solid #C2C7CB;}'
# 'QTabBar{color:black;}'
# 'QTabBar::tab:selected {background:lightblue;}'),
# 'LineEdit':'QLineEdit{background: #EEFFFF; color:black;}'
# }
# common layout settings
TabBarCss = """
QTabWidget::pane { /* The tab widget frame */
border-top: 2px solid #C2C7CB;
}
QTabWidget::tab-bar {
left: 1px; /* move to the right by 1px */
}
/* Style the tab using the tab sub-control. Note that
it reads QTabBar _not_ QTabWidget */
QTabBar::tab{color:black;}
QTabBar::tab {
background: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 1,
stop: 0 white, stop: 0.5 lightgray, stop: 1.0 #C2C7CB);
border: 1px solid #C4C4C3;
border-bottom-color: #C2C7CB; /* same as the pane color */
border-top-left-radius: 4px;
border-top-right-radius: 4px;
min-width: 8ex;
padding: 2px;
}
QTabBar::tab:selected, QTabBar::tab:hover {background:lightblue;}
QTabBar::tab:selected {
border-color: #9B9B9B;
border-bottom-color: #C2C7CB; /* same as pane color */
}
QTabBar::tab:!selected {
margin-top: 2px; /* make non-selected tabs look smaller */
}
/* make use of negative margins for overlapping tabs */
QTabBar::tab:selected {
/* expand/overlap to the left and right by 4px */
margin-left: -4px;
margin-right: -4px;
}
QTabBar::tab:first:selected {
margin-left: 0; /* the first selected tab has nothing to overlap with on the left */
}
QTabBar::tab:last:selected {
margin-right: 0; /* the last selected tab has nothing to overlap with on the right */
}
QTabBar::tab:only-one {
margin: 0; /* if there is only one tab, we don't want overlapping margins */
}
"""
css_rc = {'TopWidget':('*[state="changed"]{background-color:yellow; color:black}'
'*[state="error"]{background-color:red; color:white}'
'*[state="fail"]{background-color:orange; color:white}'
'*[state="ok"]{background-color:green; color:white}'
'QPushButton:pressed {background-color:black; color:white}'
'QWidget{font-size:12px; font-family: Tahoma;}'
'QTabBar{font-size:13px; font-weight:bold;}') + TabBarCss,
'LineEdit':''
}
if THEME == 'dark':
mpl_rc.update(mpl_dark)
for key in css_rc:
css_rc[key]+= css_dark[key]
else:
mpl_rc.update(mpl_light)
for key in css_rc:
css_rc[key]+= css_light[key]
params = {'N_FFT': 2048} # number of FFT points for plot commands (freqz etc.)
# Dictionary with translations between short method names and long names for
# response types
rt_names = {"LP":"Lowpass", "HP":"Highpass", "BP":"Bandpass",
"BS":"Bandstop", "AP":"Allpass", "MB":"Multiband",
"HIL":"Hilbert", "DIFF":"Differentiator"}
################## Some layout ideas ##########################################
#self.em = QtGui.QFontMetricsF(QtGui.QLineEdit.font()).width('m')
# 'QWidget':('QWidget{Background: #CCCCCC; color:black; font-size:14px;'
# 'font-weight:bold; border-radius: 1px;}')
""" QTabBar::tab:selected, QTabBar::tab:hover {
background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1,
stop: 0 #fafafa, stop: 0.4 #f4f4f4,
stop: 0.5 #e7e7e7, stop: 1.0 #fafafa);
QTabBar::tab {
background: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 1,
stop: 0 #E1E1E1, stop: 0.4 #DDDDDD,
stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);
}
"""
css = """
/*height: 14px;*/
/*
QDialog{
Background-image:url('img/xxx.png');
font-size:14px;
color: black;
}
*/
QToolButton:hover{
Background: #DDEEFF;
}
"""
|
honahursey/pyFDA
|
pyfda/pyfda_rc.py
|
Python
|
apache-2.0
| 7,522
|
# -*- coding: utf-8 -*-
#
# Import script for .csv files.
# Note: manifest a strong printaholism.
#
import sys, os, csv, re
from optparse import OptionParser
# get path of the django project
path = ("/").join( sys.path[0].split("/")[:-1] )
ppath = ("/").join( sys.path[0].split("/")[:-2] )
if path not in sys.path:
sys.path.append(path)
if ppath not in sys.path:
sys.path.append(ppath)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# django specific import
from django.conf import settings
from reanalyseapp.models import Enquete, Texte, Tag
from datetime import datetime
from reanalyseapp.views import *
def update( textes, enquete, csvdict ):
print " %s documents found in enquete: \"%s\", id:%s" % ( textes.count(), enquete.name, enquete.id )
print
for (counter, row) in enumerate(csvdict):
# print row
if counter == 0:
print " keys: %s" % row.keys()
# normally, the second meta_documents csv file line is a field description header.
continue
print " %s." % counter
try:
texte_url = row['*file']
texte_name = row['*name']
locationgeo = re.sub( r'[^0-9\.,-]', '', row['*locationgeo'])
#researcher = row['*researcher']
article = row['*article']
if('/' in row['*date']):
dateFormat = "%d/%m/%y"
elif('_' in row['*date']) :
dateFormat = "%d_%m_%y"
elif('-' in row['*date']) :
dateFormat = "%d-%m-%y"
#print(row['*date'])
date = row['*date']#datetime.datetime.strptime(row['*date'], dateFormat) #"31-12-12"
#date = datetime.datetime.strptime(row['*date'], '%d/%m/%y').strftime(dateFormat)
except KeyError, e:
print " Field format is not valid: %s " % ( e )
break
# print row['*name']doc_name = row['*name']
try:
texte = Texte.objects.get( enquete=enquete, name=row['*name'], locationpath__regex=( ".?%s" % os.path.basename( texte_url ) ) )
except Texte.DoesNotExist, e:
print " No texte found with : \"%s\", %s " % ( texte_name, e )
foo=raw_input('\n Skip this line and go on ? [ Y / N ] : ')
if foo.upper() == 'N':
print " Script stopped !"
break
continue
except Texte.MultipleObjectsReturned, e:
print " More than one texte found with : \"%s\", %s, %s " % ( texte_name, os.path.basename( texte_url ), e )
foo=raw_input('\n Skip this line and go on ? [ Y / N ] : ')
if foo.upper() == 'N':
print " Script stopped !"
break
print " %s \"%s\": %s" % ( texte.id, texte_name, locationgeo )
# get or save tag
print " %s \"%s\": %s" % ( texte.id, texte_name, article )
try:
t = Tag.objects.get( type=Tag.ARTICLE, slug=article )
except Tag.DoesNotExist, e:
print " %s \"%s\": creating tag [%s:%s]" % ( texte.id, texte_name, article, Tag.ARTICLE )
t = Tag( type=Tag.ARTICLE, slug=article, name=article)
t.save()
# save location geo
texte.locationgeo = locationgeo
texte.tags.add( t )
texte.date = date
texte.save()
#try
def install( upload_path, enquete_path ) :
from imexport import importEnqueteUsingMeta
print " from upload path '%s'" % upload_path
if not os.path.exists( upload_path ):
print " upload_path folder '%s' does not exists or it is not readable !" % upload_path
print
return
print " from upload path '%s'" % enquete_path
if not os.path.exists( enquete_path ):
print " enquete_path folder '%s' does not exists or it is not readable !" % enquete_path
print
return
print " call importEnqueteUsingMeta (please follow up in log file)"
importEnqueteUsingMeta( upload_path, enquete_path )
print " installation completed."
from reanalyseapp.models import *
def testTEIparse(texte_id):
texte = Texte.objects.get(id=texte_id)
parseXmlDocument(texte)
def testEnqueteImport(foldName):
folname = foldName
upPath = settings.REANALYSEUPLOADPATH+folname+"/"
enqueterootpath=''
for f in os.listdir(upPath+"extracted/"):
if os.path.exists(upPath+"extracted/"+f+"/_meta/"):
enqueterootpath = upPath+"extracted/"+f+"/"
e = importEnqueteUsingMeta(upPath,enqueterootpath)
if(e != None):
doFiestaToEnquete(e)
else:
print('ok')
def deleteSpeakers(enquete_id):
textes = Texte.objects.filter(enquete_id=enquete_id, doctype="TEI")
for t in textes :
speakers = t.speaker_set.filter()
print(speakers)
for s in speakers :
s.delete()
#
#CheckMetaDocuments
#Check if every file exists in MetaDocuments
#return False with error dictionnary or True
#
def isMetaDocOK(upload_path, enquete_path):
from imexport import importEnqueteUsingMeta
if os.path.exists(enquete_path):
#mandatoryFields = ['*id','*name','*category','*description','*location','*date']
print("=========== PARSING META_DOCUMENTS.CSV TO CHECK IF A FILE IS MISSING IF TRUE IMPORT IS CANCELLED")
###### Parsing Documents
doc = csv.DictReader(open(enquete_path+'_meta/meta_documents.csv'),delimiter='\t')
error = False
error_dict = {}
for counter, row in enumerate(doc):
if row['*id']!='*descr':
file_location = upload_path+row['*file']
try:
open(file_location)
except IOError, e:
if(e.args[0] == 2):#no such file or directory
error = True
error_dict.update({file_location:e.args[1]})
print file_location
if(error is True):
return {'status':False, 'error_dict':error_dict}
else:
return True
def commit_enquete( enquete_id ):
#create dump of prod bdd
os.system('pg_dump -C -h 10.36.1.15 -U app app | psql -h localhost -U reanalyse reanalyse > prod_db.dump')
#create dump of dev bdd
os.system('pg_dump -Ft -b reanalyse > dev_db.dump')
#Create update sql file for production
os.system('apgdiff prod_db.dump dev_db.dump > diff.sql')
#change absolute path (/var/opt/reanalyse to /datas/www/app
#transfert
def main( argv ):
print """
WELCOME TO APP UPDATER
-------------------------------
"""
parser = OptionParser( usage="\n\n%prog --enquete=34 --csv=/home/dgu/meta_documents.csv" )
parser.add_option("-c", "--csv", dest="csvfile", help="csv file absolute path", default="" )
parser.add_option("-e", "--enquete", dest="enquete_id", help="enquete identifier", default=0 )
parser.add_option("-p", "--upload_path", dest="upload_path", help="enquete upload path", default="" ) #use with --func=install
parser.add_option("-x", "--enquete_path", dest="enquete_path", help="enquete extracted path", default="" ) #use with --func=install
parser.add_option("-f", "--function", dest="func", help="update function", default="update" )
parser.add_option("-d", "--document_id", dest="document_id", help="document id (Texte)", default="" )
parser.add_option("-D", "--directory", dest="directory", help="upload directory study", default="" )
( options, argv ) = parser.parse_args()
if options.func == "isMetaDocOK" :
print(options.func)
# install the enquete
return isMetaDocOK( options.upload_path, options.enquete_path )
if options.func == "install" :
if options.csvfile is None:
error("csvfile arg was not found!", parser)
else:
# install the enquete
return install( options.upload_path, options.enquete_path )
if options.func == "testTEIparse" :
print(options.func)
# install the enquete
return testTEIparse( options.document_id )
if options.func == "testDownload" :
print(options.func)
# install the enquete
return testDownload( options.enquete_id )
if options.func == "testEnqueteImport" :
print(options.func)
# install the enquete
return testEnqueteImport( options.directory )
if options.func == "parseAllTeis" :
print(options.func)
# reparseAllteis file of an enquete
return parseAllTeis( options.enquete_id )
if options.func == "deleteSpeakers" :
print(options.func)
# reparseAllteis file of an enquete
return deleteSpeakers( options.enquete_id )
if options.enquete_id is None:
error("enquete_id arg not found!", parser)
if not os.path.exists( options.csvfile ):
error( message="csv file was not found.", parser=parser )
try:
enquete = Enquete.objects.get( id=options.enquete_id )
textes = Texte.objects.filter( enquete=enquete )
except Enquete.DoesNotExist, e:
error("noo %s" % e, parser )
if textes.count() == 0:
error("no Texte is attached ...? Is that possible ?", parser )
# parse csv file !
f = open( options.csvfile, 'rb' )
csvdict = csv.DictReader( f, delimiter="\t" )
for t in textes:
pass# print(textes.count())#print t.name #, t.locationpath
update( textes, enquete, csvdict )
print """
-------------------------------
THANK YOU FOR USING APP UPDATER
Task completed. Bye!
"""
def parseAllTeis(enquete_id):
textes = Texte.objects.filter(enquete_id=enquete_id, doctype="TEI")
for t in textes :
parseXmlDocument(t)
def testDownload(enquete_id):
import zipfile, zlib
"""
zippath = os.path.join( "/tmp/", "enquete_%s.zip" % enquete_id )
zf = zipfile.ZipFile( zippath, mode='w' )
"""
for t in Texte.objects.filter( enquete_id=enquete_id ):
if('é'.decode('utf-8') in t.locationpath):
t.locationpath= t.locationpath.replace('é'.decode('utf-8'), 'e')
if os.path.isfile(t.locationpath.decode('utf-8')):
if( t.locationpath.find('_ol') or t.locationpath.find('_dl') ):
print(t.locationpath.split('/', 7)[7])
"""
zf.write( t.locationpath, compress_type=zipfile.ZIP_DEFLATED,
arcname= t.locationpath.split('/', 5)[5])"""
def error( message="generic error", parser=None):
print
print " ",message
print
print
if parser is not None:
parser.print_help()
exit(-1)
# execute srcipt
if __name__ == '__main__':
main(sys.argv[1:])
|
medialab/reanalyse
|
reanalyseapp/update.py
|
Python
|
lgpl-3.0
| 11,321
|
import re
import os
import cmd
import sys
import common
from getpass import getpass
from kp import KeePassError, get_password
from configmanager import ConfigManager, ConfigManagerError
common.init()
class ParseArgsException(Exception):
def __init__(self, msg):
self.msg = msg
class ModuleCore(cmd.Cmd):
def __init__(self, module = ''):
cmd.Cmd.__init__(self)
self.master = None
if module == '#':
self.prompt_sign = '#>'
elif module != '':
self.prompt_sign = '[' + module + ']>'
else:
self.prompt_sign = '->'
#defaults
self.ruler = '-'
#Completions
self.directories = []
self.file_server_database = []
self.file_server = []
self.do_cd('.')
configs = ConfigManager().get_config_list()
for conf in configs:
self.file_server_database.append(conf)
self.file_server.append(conf)
for srv in ConfigManager('config/' + conf + '.yaml').get_all():
self.file_server_database.append(conf + '.' + srv)
self.file_server.append(conf + '.' + srv)
for db in ConfigManager('config/' + conf + '.yaml').get(srv)['databases']:
self.file_server_database.append(conf + '.' + srv + '.' + db)
def precmd(self, line):
if not sys.stdin.isatty():
print(line)
return line
def postcmd(self, stop, line):
if not sys.stdin.isatty():
print("")
return stop
def parse_args(self, string="", n=0, m=0):
list = re.findall('"+.*"+|[a-zA-Z0-9!@#$%^&*()_+-,./<>?]+', string)
arg_counter = len(list);
if (arg_counter >= n and arg_counter <= m) or (arg_counter == n and m == 0) or n == 0:
r_list = []
for l in list:
r_list.append(l.replace('"', ''))
return (r_list, len(list))
else:
raise ParseArgsException("Incorrect number of arguments")
# wykonuje daną funkcję (callback) na wszystkich bazach
def exec_on_config(self, callback, args, values, view = ''): # link - file.server.base
if values == '': # wykonaj na wszystkich plikach
files = ConfigManager().get_config_list() # pobierz listę plików konfiguracyjnych
# wyświetl na czym będziesz wykonywać
print("Exec on:")
for file in files:
print('+-',file)
ans = input("Are you sure? [NO/yes/info]: ")
if ans == "yes": #wykonaj callback
for file in files:
if view == 'tree': print('+-', file)
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
if view == 'tree': print("| +-", srv)
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("| | +-", db)
if view == 'list': print('[', file, '->', srv, '->', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
elif ans == "info": #podaj tylko informację na czym callback zostałby wykonany
for file in files:
print('+-', file)
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
print('| +-', srv)
databases = servers[srv]["databases"]
for db in databases:
print('| | +-', db)
else: #jeżeli nie zdecydujemy się na wykonanie czegokolwiek
print("aborted")
else: # jeżeli specjalizujemy na czym chcemy wykonać
val = values.split('.') #rozdzielamy nazwę_pliku.serwera.bazy
params = len(val)
if params == 1: # jeżeli podano nazwę tylko pliku to wykonaj na wszystkich serwerach, bazach które są w nim zapisane
file = val[0]
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
for srv in servers:
if view == 'tree': print("+-", srv)
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("| +-", db)
if view == 'list': print('[', srv, '->', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
elif params == 2: # jeżeli podano nazwę pliku i serwer to wykonaj na wszystkich bazach na serwerze
file = val[0]
try:
servers = ConfigManager("config/" + file + ".yaml").get_all()
srv = val[1]
databases = servers[srv]["databases"]
for db in databases:
if view == 'tree': print("+-", db)
if view == 'list': print('[', db, ']')
callback(file, srv, db, *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
elif params == 3: # podano nazwę pliku, serwer i nazwę bazy - wykonaj polecenie dokładnie na niej
try:
callback(val[0], val[1], val[2], *args)
except ConfigManagerError as e:
print(e)
except KeyError as e:
print(e, "is not exist")
# zwraca skróconą ścieżkę do aktualnego katalogu - funkcja pomocnicza
def get_shortpath(self):
path = common.get_cdir()
separator = ''
if '\\' in path:
separator = '\\'
else:
separator = '/'
start = path.find(separator)
end = path.rfind(separator, 0, len(path)-1)
if start < end:
return (path[0:start+1] + '...' + path[end:])
else:
return (path)
# autouzupełnienia dla cmd polecenia cd
def complete_cd(self, text, line, begidx, endidx):
if not text:
completions = self.directories[:]
else:
completions = [f for f in self.directories if f.startswith(text)]
return completions
# polecenie cd - pozwala na przemieszczanie się po katalogach
def do_cd(self, args):
"Move to directory"
if args == '':
print(common.get_cdir())
else:
try:
common.chdir(args)
self.prompt = self.get_shortpath() + ' ' + self.prompt_sign
self.directories = []
for name in os.listdir(common.get_cdir()):
if os.path.isdir(os.path.join(common.get_cdir(), name)):
self.directories.append(name)
except FileNotFoundError as e:
print(e)
# wyświetla wszystkie pliki w lokalizacji
def do_ls(self, args):
"List directory"
for name in os.listdir(common.get_cdir()):
print(name)
# podaje pełną ścieżkę aktualnego katalogu
def do_pwd(self, args):
"Print path"
print(common.get_cdir())
# pozwala na decyzję czy chcemy wyświetlać warningi
def do_warn(self, args):
"""warn <on/off>"""
try:
(values, values_num) = self.parse_args(args, 0, 1)
if values_num == 1:
if values[0] == 'on':
print('Warnings on')
self.warn = True
elif values[0] == 'off':
print('Warnings off')
self.warn = False
else:
print('Incorrect argument.')
else:
if self.warn == True:
print('Status: on')
else:
print('Status: off')
except ParseArgsException as e:
print(e)
# ustawia masterpassword dla keepasa
def do_setMaster(self,args):
"Set master password"
if sys.stdin.isatty(): # jezeli jako shell
p = getpass('Enter Master Password: ')
else:
p = sys.stdin.readline().rstrip()
self.master = p
def do_exit(self, *args):
return True
def do_EOF(self, line):
return True
def emptyline(self):
return False
# Musimy wyłapać wszystko co możliwe, nie ma pliku, zly master itp. i zwrocic 1 wyjątek
def get_password(self, alias):
keepass_path = common.keepass_path
if self.master == None:
raise KeePassError("Master Password Not Set")
try:
return get_password(keepass_path, self.master, alias)
except KeePassError as e:
raise e
def connect_command_builder(self,connection, perm):
try:
command = connection["adress"] + "_" + connection["user"]+ "_" + \
self.get_password(connection["keepass"]) + "_" + str(connection["sshport"]) + "_" + str(connection["remoteport"]) + "_" + perm
except (KeyError, KeePassError) as e1:
try:
command = connection["adress"] + "_" + connection["user"]+ "_" + \
connection["passwd"] + "_" + str(connection["sshport"]) + "_" + str(connection["remoteport"]) + "_" + perm
return command
except KeyError as e2:
if isinstance(e1,KeePassError):
raise KeePassError("Unable to use Keepass(" + e1.value + ") or Password")
else:
raise KeePassError("Invalid connection in yaml file")
raise KeePassError(e1)
return command
|
nokia-wroclaw/innovativeproject-dbshepherd
|
mod_core.py
|
Python
|
mit
| 8,041
|
import logging
from banal import ensure_list
from countrytagger import tag_place
log = logging.getLogger(__name__)
def location_country(location):
code, score, country = tag_place(location)
return ensure_list(country)
|
alephdata/ingestors
|
ingestors/analysis/country.py
|
Python
|
mit
| 230
|
#!/usr/bin/env python
from zenjsonclient import router, ZenJsonClientError
from sets import Set
from time import sleep
############ BASH BINDINGS DECLARATION ############
def bash_getDevices():
import os
import json
os.system("./zenoss_getDevices.sh")
f = open("tmpfile.txt")
data = f.read()
dct_lst = json.loads(data)
return dct_lst
###################################################
#Step 0: Read input data
f = open("output.txt")
f_data = f.read()
srcdata = eval(f_data)
f.close()
#Step 1: Get the list of locations in the zenoss
resp = router.device.getLocations()
zen_locations_dict_list = resp.result["locations"]
zen_locations_list = []
for each in zen_locations_dict_list:
if ('name' in each.keys()):
nm = each['name']
if (nm[0] == "/"):
nm=nm[1:]
zen_locations_list.append(nm)
print "Locations found in zenoss:"
for each in zen_locations_list:
print "\t",each
print "\n"
#Step 2: Get the list of locations from source data
src_locations_list = Set()
for each in srcdata:
if ('LOCATION' in each.keys()):
if ('name' in each['LOCATION'].keys()):
src_locations_list.add(each['LOCATION']['name'])
print "Locations found in the source file:"
for each in src_locations_list:
print "\t",each
print "\n"
#Step 3: Determine which locations are missing in zenoss
zen_locations_set = Set()
for each in zen_locations_list:
zen_locations_set.add(each)
src_locations_set = src_locations_list
locations_difference = src_locations_list - zen_locations_set
if (len(locations_difference)):
print "These locations from source data are missing in zenoss:"
for each in locations_difference:
print "\t",each
#Step 4: Add missing locations to zenoss
for each_location in locations_difference:
resp = router.device.addLocationNode(type='organizer', contextUid='/zport/dmd/Devices/Locations', id=each_location)
if (resp.result['success'] == True):
print "Succesfully added new location \"",each_location,"\" to zenoss."
#Step 5: Get hostnames/devices from zenoss
#resp = router.device.getDevices(uid="/zport/dmd/Devices", sort="name", limit=10000000)
#dev_response = resp.result['devices']
dev_response = bash_getDevices()
#Step 6: Collect devices hostnames from response and source data
dev_response_hostnames_set = Set()
for each in dev_response:
if ('name' in each):
dev_response_hostnames_set.add(each['name'])
print "Hostnames found in zenoss:"
for each in dev_response_hostnames_set:
print "\t",each
print
dev_source_hostnames_set = Set()
for each in srcdata:
if ('HOSTNAME' in each.keys()):
dev_source_hostnames_set.add(each['HOSTNAME'])
print "Hostnames found in source data:"
for each in dev_source_hostnames_set:
print "\t",each
print
dev_hostnames_difference_set_missing_in_zenoss = dev_source_hostnames_set - dev_response_hostnames_set
if (len(dev_hostnames_difference_set_missing_in_zenoss)):
print "These hostnames are missing in zenoss:"
for each in dev_hostnames_difference_set_missing_in_zenoss:
print "\t",each
print
dev_hostnames_difference_set_existing_in_zenoss = dev_response_hostnames_set & dev_source_hostnames_set
if (len(dev_hostnames_difference_set_existing_in_zenoss)):
print "These hostnames are present in zenoss and in the source file both:"
for each in dev_hostnames_difference_set_existing_in_zenoss:
print "\t",each
print
#Step 7: Insert missing hostnames
hosts_locs_table = []
for each in dev_hostnames_difference_set_missing_in_zenoss:
for each1 in srcdata:
if (each == each1['HOSTNAME']):
hosts_locs_table.append(each1)
break
print "Internal table created."
print "Going to insert hostnames/devices into zenoss."
print
#Step 7.2: Insert device with it's location into zenoss
for each_dev in hosts_locs_table:
locationP = each_dev['LOCATION']['name']
resp = router.device.addDevice(deviceName=each_dev['HOSTNAME'],
deviceClass='/Discovered',
locationPath=locationP)
print "Add Device, zenoss says: ", repr(resp.result)
#Step ?: Just printing
print "Now we have added new devices. Usually, zenoss puts it in his background queue,"
print "so we need to make sure that all backgorund task has been finished."
print "I'm going to pull you zenoss instance every 5 secs and check if added devices already in list."
#Step 8: Wait until all devices will be added to zenoss.
polling_finished_flag = False
pulling_list = {}
for each in srcdata:
pulling_list[each['HOSTNAME']]=False
while (not polling_finished_flag):
print "Pulling zenoos..."
#resp = router.device.getDevices(uid="/zport/dmd/Devices", sort="name", limit=10000000)
#dev_list_from_zen = resp.result['devices']
dev_list_from_zen = bash_getDevices()
for each_device in dev_list_from_zen:
if (each_device['name'] in pulling_list.keys()):
pulling_list[each_device['name']]=True
polling_finished_flag=True
for eachD in pulling_list.keys():
if (pulling_list[eachD] == False):
polling_finished_flag = False
if (not polling_finished_flag):
print pulling_list
print "Sleeping 5 seconds..."
sleep(5)
print "Seems that background jobs has been done... Going to do resetIp for devices."
#Step 9: Seems that all missing devices has been added to zenoss, so now we can do resetIP for all devices,
#mentioned in source data (recently added and already existing)
#Step 9.1: Collect devices uids and constract UID<->IP table. Also filter another devices which isn't mentioned in srcdata.
uids_list = []
uid_ip_dict = {}
#resp = router.device.getDevices(uid="/zport/dmd/Devices", sort="name", limit=10000000)
#for each in resp.result['devices']:
for each in bash_getDevices():
if ('uid' in each):
uids_list.append(each['uid'])
nm_to_search = each['name']
for dev_in_source in srcdata:
if dev_in_source['HOSTNAME'] == nm_to_search:
uid_ip_dict[each['uid']]=dev_in_source['IP']
#Step 9.2: Just doing resetIP
print "\n\n\n"
for eachDeviceToResetIP in uid_ip_dict.keys():
uid = eachDeviceToResetIP
new_ip = uid_ip_dict[uid]
if (new_ip):
resp = router.device.resetIp(uids=[uid],hashcheck=1,ip=new_ip)
print "Resetting ip for UID %s, zenoss says: %s" % (uid, repr(resp.result))
#Step 10: Just print some interesting info
print "Device count in the source data: %d" % (len(srcdata))
flag=False
for each in uid_ip_dict.keys():
if (uid_ip_dict[each] == ""):
flag=True
if (flag):
print "Interesting thing! Some devices in the source data doesn't have ip:"
for each in uid_ip_dict.keys():
if (uid_ip_dict[each] == ""):
name = each.split("/")
print "\t",name[len(name)-1]
#Step 11: DONE!!!
print "\n\n\n\n\n"
print "DONE!"
|
thomasvincent/utilities
|
Standalone_Scripts/softlayer_iphostloc_puller/old_src/zenoss_injector.py
|
Python
|
apache-2.0
| 6,959
|
#coding: utf-8
import re
import logging
from urlparse import urljoin
from collections import defaultdict
from itertools import chain
from math import sqrt
from bs4 import BeautifulSoup as BS, Tag, NavigableString
from null import Null
from .utils import tokenize, string_inclusion_ratio
from .webimage import WebImage
from backports.functools_lru_cache import lru_cache
from markupsafe import escape
logger = logging.getLogger(__name__)
# Beautifulsoup will convert all tag names to lower-case
ignored_tags = ('option', 'script', 'noscript', 'style', 'iframe', 'head')
block_tags = {'article', 'header', 'aside', 'hgroup', 'blockquote', 'hr',
'body', 'li', 'br', 'map', 'button', 'object', 'canvas', 'ol', 'caption',
'output', 'col', 'p', 'colgroup', 'pre', 'dd', 'progress', 'div', 'section',
'dl', 'table', 'dt', 'tbody', 'embed', 'textarea', 'fieldset', 'tfoot', 'figcaption',
'th', 'figure', 'thead', 'footer', 'tr', 'form', 'ul', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'video', 'td'}
negative_patt = re.compile(r'comment|combx|disqus|foot|header|menu|rss|'
'shoutbox|sidebar|sponsor|vote|meta|shar|ad-', re.IGNORECASE)
positive_patt = re.compile(r'article|entry|post|column|main|content|'
'section|text|preview|view|story-body', re.IGNORECASE)
def tag_equal(self, other):
return id(self) == id(other)
# Use tag as keys in dom scores,
# two tags with the same content and attributes should not consider equal to each other.
# Tag.__eq__ = tag_equal
class HtmlContentExtractor(object):
"""
see https://github.com/scyclops/Readable-Feeds/blob/master/readability/hn.py
"""
def __init__(self, html, url=''):
# see http://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance
self.calc_img_area_len = lru_cache(1024)(self.calc_img_area_len)
# self.calc_effective_text_len = lru_cache(1024)(self.calc_effective_text_len)
self.max_score = -1
# dict uses __eq__ to identify key, while in BS two different nodes
# will also be considered equal, DO not use that
self.scores = defaultdict(int)
self.doc = BS(html)
self.title = (self.doc.title.string if self.doc.title else u'') or u''
self.article = Null
self.url = url
# call it before purge
self.get_favicon_url()
self.get_meta_description()
self.get_meta_image()
self.purge()
self.find_main_content()
# clean ups
# self.clean_up_html()
self.relative_path2_abs_url()
# def __del__(self):
# # TODO won't call
# logger.info('calc_effective_text_len: %s, parents_of_article_header: %s, calc_img_area_len: %s',
# self.calc_effective_text_len.cache_info(),
# self.parents_of_article_header.cache_info(),
# self.calc_img_area_len.cache_info())
def set_title_parents_point(self, doc):
# First we give a high point to nodes who have
# a descendant that is a header tag and matches title most
def is_article_header(node):
if re.match(r'h\d+|td', node.name, re.I):
if string_inclusion_ratio(node.text, self.title) > .85:
return True
return False
for node in doc.find_all(is_article_header):
# Give eligible node a high score
logger.info('Found an eligible title: %s', node.text.strip())
# self.scores[node] = 1000
for parent in node.parents:
if not parent or parent is doc:
break
parent.score = parent.score or 0 + \
self.calc_effective_text_len(parent) * sqrt(len(node.text))
def set_article_tag_point(self, doc):
for node in doc.find_all('article'):
# Should be less than most titles but better than short ones
node.score = node.score or 0 + self.calc_effective_text_len(node) * 2
def calc_node_score(self, node, depth=.1):
"""
The one with most text is the most likely article, naive and simple
"""
text_len = self.calc_effective_text_len(node)
# img_len = self.calc_img_area_len(cur_node)
#TODO take image as a factor
img_len = 0
impact_factor = 2 if self.has_positive_effect(node) else 1
node.score = (node.score or 0 + text_len + img_len) * impact_factor * (depth**1.5)
if node.score > self.max_score:
self.max_score = node.score
self.article = node
for child in node.children: # the direct children, not descendants
if isinstance(child, Tag):
self.calc_node_score(child, depth+0.1)
def find_main_content(self):
self.calc_effective_text_len(self.doc)
self.set_title_parents_point(self.doc) # Give them the highest score
self.set_article_tag_point(self.doc)
self.calc_node_score(self.doc)
logger.info('Score of the main content is %s', self.article.score or 0)
def get_meta_description(self):
if not hasattr(self, '_meta_desc'):
self._meta_desc = ''
descs = self.doc.find_all('meta', attrs={'name': re.compile('description', re.I)})
if descs:
self._meta_desc = descs[-1].get('content', '')
return self._meta_desc
def get_meta_image(self):
if not hasattr(self, '_meta_image'):
self._meta_image = None
og_images = self.doc.find_all('meta', property=re.compile('og:image', re.I))
if og_images:
self._meta_image = og_images[0].get('content', None)
return self._meta_image
@staticmethod
def has_positive_effect(node):
for attr in node.get('id', ''), node.name, ' '.join(node.get('class', [])):
if positive_patt.search(attr):
return True
return False
@staticmethod
def has_negative_effect(node):
for attr in node.get('id', ''), node.name, ' '.join(node.get('class', [])):
if negative_patt.search(attr):
return True
return False
def calc_effective_text_len(self, node):
"""
Calc the total the length of text in a child, same as
sum(len(s) for s in cur_node.stripped_strings)
"""
if node.text_len is not None:
return node.text_len
text_len = 0
for child in node.children:
if isinstance(child, Tag):
if child.name == 'a':
continue
text_len += self.calc_effective_text_len(child)
# Comment is also an instance of NavigableString,
# so we should not use isinstance(child, NavigableString)
elif type(child) is NavigableString:
text_len += len(child.string.strip()) + child.string.count(',') + \
child.string.count(u',') # Chinese comma
node.text_len = text_len * .2 if self.has_negative_effect(node) else text_len
return node.text_len
def calc_img_area_len(self, cur_node):
return 0
# img_len = 0
# if cur_node.name == 'img':
# img_len = WebImage.cached_builder(self.url, cur_node).to_text_len()
# else:
# for node in cur_node.find_all('img', recursive=False): # only search children first
# img_len += self.calc_img_area_len(node)
# return img_len
def purge(self):
for tname in ignored_tags:
for d in self.doc.find_all(tname):
d.extract() # decompose calls extract with some more steps
for style_links in self.doc.find_all('link', attrs={'type': 'text/css'}):
style_links.extract()
def clean_up_html(self):
trashcan = []
for tag in self.article.descendants:
if isinstance(tag, Tag):
del tag['class']
del tag['id']
# <!-- comment -->
elif isinstance(tag, NavigableString) and type(tag) is not NavigableString:
# Definitely should not modify the iter while looping
# tag.extract()
trashcan.append(tag)
for t in trashcan:
t.extract()
def relative_path2_abs_url(self):
def _rp2au(soup, tp):
d = {tp: True}
for tag in soup.find_all(**d):
tag[tp] = urljoin(self.url, tag[tp])
_rp2au(self.article, 'href')
_rp2au(self.article, 'src')
_rp2au(self.article, 'background')
@staticmethod
def is_link_intensive(node):
all_text = len(node.get_text(separator=u'', strip=True, types=(NavigableString,)))
if not all_text:
return False
link_text = 0
for a in node.find_all('a'):
link_text += len(a.get_text(separator=u'', strip=True, types=(NavigableString,)))
return float(link_text) / all_text >= .65
@staticmethod
def cut_content_to_length(node, length):
cur_length = 0
ret = ['<%s>' % node.name]
for child in node.children:
if isinstance(child, Tag):
cs, cl = HtmlContentExtractor.cut_content_to_length(child, length-cur_length)
ret.append(cs)
cur_length += cl
else:
t = []
for line in unicode(child).split('\n'):
t.append(line)
cur_length += len(t[-1])
if cur_length >= length:
break
ret.append(escape('\n'.join(t)))
if cur_length >= length:
break
if len(ret) == 1: # no children
return unicode(node), 0
ret.append('</%s>' % node.name)
return ''.join(ret), cur_length
def get_summary(self, max_length=300):
preserved_tags = {'pre'}
def is_meta_tag(node):
for attr in chain(node.get('class', []), [node.get('id', '')], [node.name]):
if re.search(r'meta|date|time|author|share|caption|attr|title|header|summary|'
'clear|tag|manage|info|social|avatar|small|sidebar|views|'
'created|name|related|nav|pull',
attr, re.I):
return True
return False
def summarize(node, max_length):
partial_summaries = []
for child in node.children:
if isinstance(child, Tag):
# Put a space between two blocks
partial_summaries.append(' ') # http://paulgraham.com/know.html
# if self.summary_begun: # http://v2ex.com/t/152930
if is_meta_tag(child) and \
1.0*self.calc_effective_text_len(child)/self.calc_effective_text_len(self.article) < .3 and \
self.calc_effective_text_len(child) < max_length:
continue
if child.name in block_tags:
# Ignore too many links and too short paragraphs
if self.is_link_intensive(child) or (len(tokenize(child.text)) < 15 and
1.0*self.calc_effective_text_len(child)/self.calc_effective_text_len(self.article) < .3):
continue
child_summary = summarize(child, max_length).strip()
if len(tokenize(child_summary)) < 15 and \
1.0*self.calc_effective_text_len(child)/self.calc_effective_text_len(self.article) < .3:
continue
partial_summaries.append(child_summary)
else:
partial_summaries.append(summarize(child, max_length))
max_length -= len(partial_summaries[-1])
if max_length < 0:
break
elif type(child) is NavigableString:
# if not child.strip():
# continue
if re.match(r'h\d+|td', child.parent.name, re.I) and \
string_inclusion_ratio(child, self.title) > .85:
continue
self.summary_begun = True
child = re.sub(u'[ ]{2,}', u' ', child) # squeeze spaces
if len(child) > max_length:
for word in tokenize(child):
partial_summaries.append(escape(word))
max_length -= len(partial_summaries[-1])
if max_length < 0:
partial_summaries.append(' ...')
return ''.join(partial_summaries)
else:
partial_summaries.append(escape(child))
max_length -= len(partial_summaries[-1])
return ''.join(partial_summaries)
self.summary_begun = False # miss the nonlocal feature
smr = u''
if self.calc_effective_text_len(self.article):
smr = summarize(self.article, max_length).strip()
if len(smr) <= len(self.get_meta_description()):
logger.info('Calculated summary is shorter than meta description(%s)', self.url)
return self.get_meta_description()
return smr
def get_illustration(self):
for img_node in self.article.find_all('img') + self.doc.find_all('img'):
img = WebImage.from_node(self.url, img_node)
if img.is_candidate:
logger.info('Found a top image %s', img.url)
return img
# Only as a fall back, github use user's avatar as their meta_images
if self.get_meta_image():
img = WebImage.from_attrs(src=self.get_meta_image(), referrer=self.url)
if img.is_candidate:
logger.info('Found a meta image %s', img.url)
return img
logger.info('No top image is found on %s', self.url)
return None
def get_favicon_url(self):
if not hasattr(self, '_favicon_url'):
fa = self.doc.find('link', rel=re.compile('icon', re.I))
favicon_path = fa.get('href', '/favicon.ico') if fa else '/favicon.ico'
self._favicon_url = urljoin(self.url, favicon_path)
return self._favicon_url
|
polyrabbit/hacker-news-digest
|
page_content_extractor/html.py
|
Python
|
lgpl-3.0
| 14,547
|
#! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import os.path
from scipy._lib.six import xrange, u
import numpy as np
from numpy.linalg import norm
from numpy.testing import (verbose, TestCase, run_module_suite, assert_,
assert_raises, assert_array_equal, assert_equal, assert_almost_equal,
assert_allclose)
from scipy.spatial.distance import (squareform, pdist, cdist, matching,
jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule,
num_obs_y, num_obs_dm, is_valid_dm, is_valid_y, minkowski, wminkowski,
euclidean, sqeuclidean, cosine, correlation, hamming, mahalanobis,
canberra, braycurtis, sokalmichener, _validate_vector)
_filenames = ["iris.txt",
"cdist-X1.txt",
"cdist-X2.txt",
"pdist-hamming-ml.txt",
"pdist-boolean-inp.txt",
"pdist-jaccard-ml.txt",
"pdist-cityblock-ml-iris.txt",
"pdist-minkowski-3.2-ml-iris.txt",
"pdist-cityblock-ml.txt",
"pdist-correlation-ml-iris.txt",
"pdist-minkowski-5.8-ml-iris.txt",
"pdist-correlation-ml.txt",
"pdist-minkowski-3.2-ml.txt",
"pdist-cosine-ml-iris.txt",
"pdist-seuclidean-ml-iris.txt",
"pdist-cosine-ml.txt",
"pdist-seuclidean-ml.txt",
"pdist-double-inp.txt",
"pdist-spearman-ml.txt",
"pdist-euclidean-ml.txt",
"pdist-euclidean-ml-iris.txt",
"pdist-chebychev-ml.txt",
"pdist-chebychev-ml-iris.txt",
"random-bool-data.txt"]
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0]], dtype='double')
_ytdist = squareform(_tdist)
# A hashmap of expected output arrays for the tests. These arrays
# come from a list of text files, which are read prior to testing.
# Each test loads inputs and outputs from this dictionary.
eo = {}
def load_testing_files():
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
fp = open(fqfn)
eo[name] = np.loadtxt(fp)
fp.close()
eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
load_testing_files()
class TestCdist(TestCase):
def test_cdist_euclidean_random(self):
eps = 1e-07
# Get the data: the input matrix and the right output.
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'euclidean')
Y2 = cdist(X1, X2, 'test_euclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_euclidean_random_unicode(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, u('euclidean'))
Y2 = cdist(X1, X2, u('test_euclidean'))
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sqeuclidean_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'sqeuclidean')
Y2 = cdist(X1, X2, 'test_sqeuclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cityblock_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cityblock')
Y2 = cdist(X1, X2, 'test_cityblock')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_hamming_double_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_hamming_bool_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'hamming')
Y2 = cdist(X1, X2, 'test_hamming')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_jaccard_double_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_jaccard_bool_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'jaccard')
Y2 = cdist(X1, X2, 'test_jaccard')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_chebychev_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'chebychev')
Y2 = cdist(X1, X2, 'test_chebychev')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=3.8)
Y2 = cdist(X1, X2, 'test_minkowski', p=3.8)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=4.6)
Y2 = cdist(X1, X2, 'test_minkowski', p=4.6)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'minkowski', p=1.23)
Y2 = cdist(X1, X2, 'test_minkowski', p=1.23)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_int_weights(self):
# regression test when using integer weights
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = np.arange(X1.shape[1])
Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=4.6, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=4.6, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_wminkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
w = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, 'wminkowski', p=1.23, w=w)
Y2 = cdist(X1, X2, 'test_wminkowski', p=1.23, w=w)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_seuclidean_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'seuclidean')
Y2 = cdist(X1, X2, 'test_seuclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cosine_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'cosine')
# Naive implementation
def norms(X):
# NumPy 1.7: np.linalg.norm(X, axis=1).reshape(-1, 1)
return np.asarray([np.linalg.norm(row)
for row in X]).reshape(-1, 1)
Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_correlation_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'correlation')
Y2 = cdist(X1, X2, 'test_correlation')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = cdist(X1, X2, 'mahalanobis')
Y2 = cdist(X1, X2, 'test_mahalanobis')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis(self):
# 1-dimensional observations
x1 = np.array([[2], [3]])
x2 = np.array([[2], [5]])
dist = cdist(x1, x2, metric='mahalanobis')
assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]])
# 2-dimensional observations
x1 = np.array([[0, 0], [-1, 0]])
x2 = np.array([[0, 2], [1, 0], [0, -2]])
dist = cdist(x1, x2, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [[rt2, rt2, rt2], [2, 2*rt2, 2]])
# Too few observations
assert_raises(ValueError,
cdist, [[0, 1]], [[2, 3]], metric='mahalanobis')
def test_cdist_canberra_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'canberra')
Y2 = cdist(X1, X2, 'test_canberra')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_braycurtis_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'braycurtis')
Y2 = cdist(X1, X2, 'test_braycurtis')
if verbose > 2:
print(Y1, Y2)
print((Y1-Y2).max())
_assert_within_tol(Y1, Y2, eps)
def test_cdist_yule_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'yule')
Y2 = cdist(X1, X2, 'test_yule')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_matching_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'matching')
Y2 = cdist(X1, X2, 'test_matching')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_kulsinski_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'kulsinski')
Y2 = cdist(X1, X2, 'test_kulsinski')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_dice_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'dice')
Y2 = cdist(X1, X2, 'test_dice')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_rogerstanimoto_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'rogerstanimoto')
Y2 = cdist(X1, X2, 'test_rogerstanimoto')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_russellrao_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'russellrao')
Y2 = cdist(X1, X2, 'test_russellrao')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sokalmichener_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalmichener')
Y2 = cdist(X1, X2, 'test_sokalmichener')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_sokalsneath_random(self):
eps = 1e-07
X1 = eo['cdist-X1'] < 0.5
X2 = eo['cdist-X2'] < 0.5
Y1 = cdist(X1, X2, 'sokalsneath')
Y2 = cdist(X1, X2, 'test_sokalsneath')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestPdist(TestCase):
def test_pdist_euclidean_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_u(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, u('euclidean'))
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = pdist(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_euclidean_iris_double(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = pdist(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_euclidean_iris_nonC(self):
# Test pdist(X, 'test_euclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = pdist(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_nonC(self):
# Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_sqeuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_iris(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_float32(self):
# Tests pdist(X, 'seuclidean') on the Iris data set (float32).
eps = 1e-05
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_nonC(self):
# Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_sqeuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_nonC(self):
# Test pdist(X, 'test_cosine') [the non-C implementation]
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = pdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_iris_float32(self):
eps = 1e-07
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = pdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cosine_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = pdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_random(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_float32(self):
eps = 1e-06
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_nonC(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = pdist(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_iris(self):
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = pdist(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cityblock_iris_nonC(self):
# Test pdist(X, 'test_cityblock') [the non-C implementation] on the
# Iris data set.
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = pdist(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = pdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_iris_float32(self):
eps = 1e-07
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = pdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_correlation_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = pdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_nonC(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_3_2_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = pdist(X, 'minkowski', 3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = pdist(X, 'test_minkowski', 3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_5_8_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_5_8_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = pdist(X, 'minkowski', 5.8)
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_minkowski_5_8_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = pdist(X, 'test_minkowski', 5.8)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_wminkowski(self):
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
p2_expected = [1.0, 1.0, np.sqrt(3),
np.sqrt(2), np.sqrt(2),
np.sqrt(2)]
p1_expected = [0.5, 1.0, 3.5,
1.5, 3.0,
2.5]
dist = pdist(x, metric=wminkowski, w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric=wminkowski, w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[1.0, 1.0, 1.0])
assert_allclose(dist, p2_expected, rtol=1e-14)
dist = pdist(x, metric='wminkowski', w=[0.5, 1.0, 2.0], p=1)
assert_allclose(dist, p1_expected, rtol=1e-14)
def test_pdist_wminkowski_int_weights(self):
# regression test for int weights
x = np.array([[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 1.0, 1.0]])
dist1 = pdist(x, metric='wminkowski', w=np.arange(3), p=1)
dist2 = pdist(x, metric='wminkowski', w=[0., 1., 2.], p=1)
assert_allclose(dist1, dist2, rtol=1e-14)
def test_pdist_mahalanobis(self):
# 1-dimensional observations
x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
dist = pdist(x, metric='mahalanobis')
assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),
np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])
# 2-dimensional observations
x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])
dist = pdist(x, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2*rt2, 2, 2, 2*rt2, 2])
# Too few observations
assert_raises(ValueError,
pdist, [[0, 1], [2, 3]], metric='mahalanobis')
def test_pdist_hamming_random(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_nonC(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_dhamming_random(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = pdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_nonC(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = pdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jaccard_random(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_nonC(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_djaccard_random(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = pdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_nonC(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = pdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebychev_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebychev_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-chebychev']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebychev_random_nonC(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebychev']
Y_test2 = pdist(X, 'test_chebychev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebychev_iris(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebychev_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-chebychev-iris']
Y_test1 = pdist(X, 'chebychev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebychev_iris_nonC(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebychev-iris']
Y_test2 = pdist(X, 'test_chebychev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_matching_mtica1(self):
# Test matching(*,*) with mtica example #1 (nums).
m = matching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = matching(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_matching_mtica2(self):
# Test matching(*,*) with mtica example #2.
m = matching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = matching(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_matching_match(self):
# Test pdist(X, 'matching') to see if the two implementations match on
# random boolean input data.
D = eo['random-bool-data']
B = np.bool_(D)
if verbose > 2:
print(B.shape, B.dtype)
eps = 1e-10
y1 = pdist(B, "matching")
y2 = pdist(B, "test_matching")
y3 = pdist(D, "test_matching")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y1-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_jaccard_mtica1(self):
m = jaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = jaccard(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica2(self):
m = jaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = jaccard(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_jaccard_match(self):
# Test pdist(X, 'jaccard') to see if the two implementations match on
# random double input data.
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "jaccard")
y2 = pdist(D, "test_jaccard")
y3 = pdist(np.bool_(D), "test_jaccard")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_yule_mtica1(self):
m = yule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = yule(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_mtica2(self):
m = yule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = yule(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "yule")
y2 = pdist(D, "test_yule")
y3 = pdist(np.bool_(D), "test_yule")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_dice_mtica1(self):
m = dice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = dice(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/7, rtol=0, atol=1e-10)
assert_allclose(m2, 3/7, rtol=0, atol=1e-10)
def test_pdist_dice_mtica2(self):
m = dice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = dice(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 0.5, rtol=0, atol=1e-10)
assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
def test_pdist_dice_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "dice")
y2 = pdist(D, "test_dice")
y3 = pdist(D, "test_dice")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_sokalsneath_mtica1(self):
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/4, rtol=0, atol=1e-10)
assert_allclose(m2, 3/4, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica2(self):
m = sokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = sokalsneath(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 4/5, rtol=0, atol=1e-10)
assert_allclose(m2, 4/5, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalsneath")
y2 = pdist(D, "test_sokalsneath")
y3 = pdist(np.bool_(D), "test_sokalsneath")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_rogerstanimoto_mtica1(self):
m = rogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = rogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/4, rtol=0, atol=1e-10)
assert_allclose(m2, 3/4, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
m = rogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = rogerstanimoto(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 4/5, rtol=0, atol=1e-10)
assert_allclose(m2, 4/5, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "rogerstanimoto")
y2 = pdist(D, "test_rogerstanimoto")
y3 = pdist(np.bool_(D), "test_rogerstanimoto")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_russellrao_mtica1(self):
m = russellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = russellrao(np.array([1, 0, 1, 1, 0], dtype=np.bool),
np.array([1, 1, 0, 1, 1], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 3/5, rtol=0, atol=1e-10)
assert_allclose(m2, 3/5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica2(self):
m = russellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = russellrao(np.array([1, 0, 1], dtype=np.bool),
np.array([1, 1, 0], dtype=np.bool))
if verbose > 2:
print(m)
assert_allclose(m, 2/3, rtol=0, atol=1e-10)
assert_allclose(m2, 2/3, rtol=0, atol=1e-10)
def test_pdist_russellrao_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "russellrao")
y2 = pdist(D, "test_russellrao")
y3 = pdist(np.bool_(D), "test_russellrao")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_sokalmichener_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "sokalmichener")
y2 = pdist(D, "test_sokalmichener")
y3 = pdist(np.bool_(D), "test_sokalmichener")
if verbose > 2:
print(np.abs(y1-y2).max())
print(np.abs(y2-y3).max())
_assert_within_tol(y1, y2, eps)
_assert_within_tol(y2, y3, eps)
def test_pdist_kulsinski_match(self):
D = eo['random-bool-data']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "kulsinski")
y2 = pdist(D, "test_kulsinski")
y3 = pdist(np.bool_(D), "test_kulsinski")
_assert_within_tol(y1, y2, eps, verbose > 2)
_assert_within_tol(y2, y3, eps)
def test_pdist_canberra_match(self):
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = pdist(D, "canberra")
y2 = pdist(D, "test_canberra")
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_canberra_ticket_711(self):
# Test pdist(X, 'canberra') to see if Canberra gives the right result
# as reported on gh-1238.
eps = 1e-8
pdist_y = pdist(([3.3], [3.4]), "canberra")
right_y = 0.01492537
_assert_within_tol(pdist_y, right_y, eps, verbose > 2)
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def _assert_within_tol(a, b, atol, verbose_=False):
if verbose_:
print(np.abs(a-b).max())
assert_allclose(a, b, rtol=0, atol=atol)
class TestSomeDistanceFunctions(TestCase):
def setUp(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
# 3x1 arrays
x31 = x[:,np.newaxis]
y31 = y[:,np.newaxis]
# 1x3 arrays
x13 = x31.T
y13 = y31.T
self.cases = [(x,y), (x31, y31), (x13, y13)]
def test_minkowski(self):
for x, y in self.cases:
dist1 = minkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = minkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0+2.0**1.5)**(2./3))
dist2 = minkowski(x, y, p=2)
assert_almost_equal(dist2, np.sqrt(5))
def test_wminkowski(self):
w = np.array([1.0, 2.0, 0.5])
for x, y in self.cases:
dist1 = wminkowski(x, y, p=1, w=w)
assert_almost_equal(dist1, 3.0)
dist1p5 = wminkowski(x, y, p=1.5, w=w)
assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
dist2 = wminkowski(x, y, p=2, w=w)
assert_almost_equal(dist2, np.sqrt(5))
def test_euclidean(self):
for x, y in self.cases:
dist = euclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = sqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = cosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0/(np.sqrt(14)*np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0/3, -4.0/3, 5.0-7.0/3])
for x, y in self.cases:
dist = correlation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym)/(norm(xm)*norm(ym)))
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0],[1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
class TestSquareForm(TestCase):
def test_squareform_empty_matrix(self):
A = np.zeros((0,0))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_empty_vector(self):
v = np.zeros((0,))
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (1,1))
assert_equal(rv[0, 0], 0)
def test_squareform_1by1_matrix(self):
A = np.zeros((1,1))
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (0,))
def test_squareform_one_vector(self):
v = np.ones((1,)) * 8.3
rv = squareform(np.array(v, dtype='double'))
assert_equal(rv.shape, (2,2))
assert_equal(rv[0,1], 8.3)
assert_equal(rv[1,0], 8.3)
def test_squareform_one_binary_vector(self):
# Tests squareform on a 1x1 binary matrix; conversion to double was
# causing problems (see pull request 73).
v = np.ones((1,), dtype=np.bool)
rv = squareform(v)
assert_equal(rv.shape, (2,2))
assert_(rv[0,1])
def test_squareform_2by2_matrix(self):
A = np.zeros((2,2))
A[0,1] = 0.8
A[1,0] = 0.8
rA = squareform(np.array(A, dtype='double'))
assert_equal(rA.shape, (1,))
assert_equal(rA[0], 0.8)
def test_squareform_multi_matrix(self):
for n in xrange(2, 5):
yield self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in xrange(0, s[0]):
for j in xrange(i+1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
class TestNumObsY(TestCase):
def test_num_obs_y_multi_matrix(self):
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
assert_raises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in xrange(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM(TestCase):
def test_num_obs_dm_multi_matrix(self):
for n in xrange(1, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
assert_equal(num_obs_dm(A), n)
def test_num_obs_dm_0(self):
# Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
assert_(self.check_D(0))
def test_num_obs_dm_1(self):
# Tests num_obs_dm(D) on a 1x1 distance matrix.
assert_(self.check_D(1))
def test_num_obs_dm_2(self):
assert_(self.check_D(2))
def test_num_obs_dm_3(self):
assert_(self.check_D(2))
def test_num_obs_dm_4(self):
assert_(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM(TestCase):
def test_is_valid_dm_int16_array_E(self):
# Tests is_valid_dm(*) on an int16 array. Exception expected.
D = np.zeros((5, 5), dtype='i')
assert_raises(TypeError, is_valid_dm_throw, (D))
def test_is_valid_dm_int16_array_F(self):
D = np.zeros((5, 5), dtype='i')
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_1D_E(self):
D = np.zeros((5,), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
D = np.zeros((5,), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_3D_E(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
D = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_nonzero_diagonal_E(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_asymmetric_E(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_asymmetric_F(self):
y = np.random.rand(10)
D = squareform(y)
D[1,3] = D[3,1] + 1
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_correct_1_by_1(self):
D = np.zeros((1,1), dtype=np.double)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_2_by_2(self):
y = np.random.rand(1)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_3_by_3(self):
y = np.random.rand(3)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_4_by_4(self):
y = np.random.rand(6)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_5_by_5(self):
y = np.random.rand(10)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY(TestCase):
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_int16_array_E(self):
y = np.zeros((10,), dtype='i')
assert_raises(TypeError, is_valid_y_throw, (y))
def test_is_valid_y_int16_array_F(self):
y = np.zeros((10,), dtype='i')
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3,3,), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3,3,), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3,3,3), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set([])
for n in xrange(2, 16):
a.add(n*(n-1)/2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
# Raise ValueError if p < 1.
p = 0.5
assert_raises(ValueError, minkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(canberra([1,2,3], [2,4,6]), 1)
assert_equal(canberra([1,1,0,0], [1,0,1,0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(braycurtis([1,2,3], [2,4,6]), 1./3, decimal=15)
assert_almost_equal(braycurtis([1,1,0,0], [1,0,1,0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(sqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(euclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
assert_almost_equal(euclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
assert_almost_equal(sqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
assert_almost_equal(sqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, euclidean, x, x)
assert_raises(ValueError, sqeuclidean, x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = euclidean(x, y)
d2 = sqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_hamming_unequal_length():
# Regression test for gh-4290.
x = [0, 0, 1]
y = [1, 0, 1, 0]
# Used to give an AttributeError from ndarray.mean called on bool
assert_raises(ValueError, hamming, x, y)
def test_hamming_string_array():
# https://github.com/scikit-learn/scikit-learn/issues/4014
a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
dtype='|S4')
b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
dtype='|S4')
desired = 0.45
assert_allclose(hamming(a, b), desired)
def test_sqeuclidean_dtypes():
# Assert that sqeuclidean returns the right types of values.
# Integer types should be converted to floating for stability.
# Floating point types should be the same as the input.
x = [1, 2, 3]
y = [4, 5, 6]
for dtype in [np.int8, np.int16, np.int32, np.int64]:
d = sqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_(np.issubdtype(d.dtype, np.floating))
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
d1 = sqeuclidean([0], np.asarray([-1], dtype=dtype))
d2 = sqeuclidean(np.asarray([-1], dtype=dtype), [0])
assert_equal(d1, d2)
assert_equal(d1, np.float64(np.iinfo(dtype).max) ** 2)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in ['float16', 'float128']:
# These aren't present in older numpy versions; float128 may also not
# be present on all platforms.
if hasattr(np, dtype):
dtypes.append(getattr(np, dtype))
for dtype in dtypes:
d = sqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_equal(d.dtype, dtype)
def test_sokalmichener():
# Test that sokalmichener has the same result for bool and int inputs.
p = [True, True, False]
q = [True, False, True]
x = [int(b) for b in p]
y = [int(b) for b in q]
dist1 = sokalmichener(p, q)
dist2 = sokalmichener(x, y)
# These should be exactly the same.
assert_equal(dist1, dist2)
def test__validate_vector():
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x = np.arange(5).reshape(1, -1, 1)
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_array_equal(y, x[0, :, 0])
x = [[1, 2], [3, 4]]
assert_raises(ValueError, _validate_vector, x)
if __name__ == "__main__":
run_module_suite()
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/scipy/spatial/tests/test_distance.py
|
Python
|
gpl-2.0
| 56,820
|
"""Projects module widgets."""
from __future__ import unicode_literals
WIDGETS = {'widget_tasks_assigned_to_me': {'title': 'Tasks Assigned To Me',
'size': "95%"}}
def get_widgets(request):
"""Return a set of all available widgets."""
return WIDGETS
|
tovmeod/anaf
|
anaf/projects/widgets.py
|
Python
|
bsd-3-clause
| 304
|
# -*- coding: utf-8 -*-
# Scrapy settings for cluj_info project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'cluj_info'
SPIDER_MODULES = ['cluj_info.spiders']
NEWSPIDER_MODULE = 'cluj_info.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'cluj_info (+http://www.yourdomain.com)'
|
atorok/tripadvisor_reviews_cluj
|
scrapy_phantomjs/cluj_info/settings.py
|
Python
|
apache-2.0
| 498
|
# Copyright (c) 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from murano.cmd import engine
from murano.common import config
from murano.tests.unit import base
CONF = cfg.CONF
class TestEngineWorkers(base.MuranoTestCase):
def setUp(self):
super(TestEngineWorkers, self).setUp()
@mock.patch.object(config, 'parse_args')
@mock.patch.object(logging, 'setup')
@mock.patch('oslo_service.service.launch')
def test_workers_default(self, launch, setup, parse_args):
engine.main()
launch.assert_called_once_with(mock.ANY, mock.ANY,
workers=processutils.get_worker_count())
@mock.patch.object(config, 'parse_args')
@mock.patch.object(logging, 'setup')
@mock.patch('oslo_service.service.launch')
def test_workers_good_setting(self, launch, setup, parse_args):
self.override_config("workers", 8, "engine")
engine.main()
launch.assert_called_once_with(mock.ANY, mock.ANY, workers=8)
@mock.patch.object(config, 'parse_args')
@mock.patch.object(logging, 'setup')
@mock.patch('oslo_service.service.launch')
def test_workers_zero_setting(self, launch, setup, parse_args):
self.override_config("workers", 0, "engine")
engine.main()
launch.assert_called_once_with(mock.ANY, mock.ANY,
workers=processutils.get_worker_count())
|
satish-avninetworks/murano
|
murano/tests/unit/cmd/test_engine_workers.py
|
Python
|
apache-2.0
| 2,110
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tempfile import NamedTemporaryFile
from typing import List, Optional, Union
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.ftp.hooks.ftp import FTPHook
class FTPToS3Operator(BaseOperator):
"""
This operator enables the transfer of files from a FTP server to S3. It can be used to
transfer one or multiple files.
:param ftp_path: The ftp remote path. For one file it is mandatory to include the file as well.
For multiple files, it is the route where the files will be found.
:type ftp_path: str
:param s3_bucket: The targeted s3 bucket in which to upload the file(s).
:type s3_bucket: str
:param s3_key: The targeted s3 key. For one file it must include the file path. For several,
it must end with "/".
:type s3_key: str
:param ftp_filenames: Only used if you want to move multiple files. You can pass a list
with exact filenames present in the ftp path, or a prefix that all files must meet. It
can also be the string '*' for moving all the files within the ftp path.
:type ftp_filenames: Union(str, list)
:param s3_filenames: Only used if you want to move multiple files and name them different from
the originals from the ftp. It can be a list of filenames or file prefix (that will replace
the ftp prefix).
:type s3_filenames: Union(str, list)
:param ftp_conn_id: The ftp connection id. The name or identifier for
establishing a connection to the FTP server.
:type ftp_conn_id: str
:param aws_conn_id: The s3 connection id. The name or identifier for
establishing a connection to S3.
:type aws_conn_id: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param gzip: If True, the file will be compressed locally
:type gzip: bool
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:type acl_policy: str
"""
template_fields = ('ftp_path', 's3_bucket', 's3_key', 'ftp_filenames', 's3_filenames')
def __init__(
self,
*,
ftp_path: str,
s3_bucket: str,
s3_key: str,
ftp_filenames: Optional[Union[str, List[str]]] = None,
s3_filenames: Optional[Union[str, List[str]]] = None,
ftp_conn_id: str = 'ftp_default',
aws_conn_id: str = 'aws_default',
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: str = None,
**kwargs,
):
super().__init__(**kwargs)
self.ftp_path = ftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.ftp_filenames = ftp_filenames
self.s3_filenames = s3_filenames
self.aws_conn_id = aws_conn_id
self.ftp_conn_id = ftp_conn_id
self.replace = replace
self.encrypt = encrypt
self.gzip = gzip
self.acl_policy = acl_policy
self.s3_hook = None
self.ftp_hook = None
def __upload_to_s3_from_ftp(self, remote_filename, s3_file_key):
with NamedTemporaryFile() as local_tmp_file:
self.ftp_hook.retrieve_file(
remote_full_path=remote_filename, local_full_path_or_buffer=local_tmp_file.name
)
self.s3_hook.load_file(
filename=local_tmp_file.name,
key=s3_file_key,
bucket_name=self.s3_bucket,
replace=self.replace,
encrypt=self.encrypt,
gzip=self.gzip,
acl_policy=self.acl_policy,
)
self.log.info(f'File upload to {s3_file_key}')
def execute(self, context):
self.ftp_hook = FTPHook(ftp_conn_id=self.ftp_conn_id)
self.s3_hook = S3Hook(self.aws_conn_id)
if self.ftp_filenames:
if isinstance(self.ftp_filenames, str):
self.log.info(f'Getting files in {self.ftp_path}')
list_dir = self.ftp_hook.list_directory(
path=self.ftp_path,
)
if self.ftp_filenames == '*':
files = list_dir
else:
files = list(filter(lambda file: self.ftp_filenames in file, list_dir))
for file in files:
self.log.info(f'Moving file {file}')
if self.s3_filenames:
filename = file.replace(self.ftp_filenames, self.s3_filenames)
else:
filename = file
s3_file_key = f'{self.s3_key}{filename}'
self.__upload_to_s3_from_ftp(file, s3_file_key)
else:
if self.s3_filenames:
for ftp_file, s3_file in zip(self.ftp_filenames, self.s3_filenames):
self.__upload_to_s3_from_ftp(self.ftp_path + ftp_file, self.s3_key + s3_file)
else:
for ftp_file in self.ftp_filenames:
self.__upload_to_s3_from_ftp(self.ftp_path + ftp_file, self.s3_key + ftp_file)
else:
self.__upload_to_s3_from_ftp(self.ftp_path, self.s3_key)
|
apache/incubator-airflow
|
airflow/providers/amazon/aws/transfers/ftp_to_s3.py
|
Python
|
apache-2.0
| 6,354
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
import json
import re
from avocado.utils import process
from sos_tests import StageTwoReportTest
class FullCleanTest(StageTwoReportTest):
"""Run an unrestricted report execution through sos clean, ensuring that
our obfuscation is reliable across arbitrary plugin sets and not just the
'main' plugins that tend to collect data needing obfuscation
:avocado: tags=stagetwo
"""
sos_cmd = '-v --clean'
sos_timeout = 600
# replace with an empty placeholder, make sure that this test case is not
# influenced by previous clean runs
files = ['/etc/sos/cleaner/default_mapping']
packages = {
'rhel': ['python3-systemd'],
'ubuntu': ['python3-systemd']
}
def pre_sos_setup(self):
# ensure that case-insensitive matching of FQDNs and shortnames work
from systemd import journal
from socket import gethostname
host = gethostname()
short = host.split('.')[0]
sosfd = journal.stream('sos-testing')
sosfd.write(
"This is a test line from sos clean testing. The hostname %s "
"should not appear, nor should %s in an obfuscated archive. The "
"shortnames of %s and %s should also not appear."
% (host.lower(), host.upper(), short.lower(), short.upper())
)
def test_private_map_was_generated(self):
self.assertOutputContains('A mapping of obfuscated elements is available at')
map_file = re.findall('/.*sosreport-.*-private_map', self.cmd_output.stdout)[-1]
self.assertFileExists(map_file)
def test_tarball_named_obfuscated(self):
self.assertTrue('obfuscated' in self.archive)
def test_archive_type_correct(self):
self.assertSosLogContains('Loaded .* as type sos report directory')
def test_hostname_not_in_any_file(self):
host = self.sysinfo['pre']['networking']['hostname']
short = host.split('.')[0]
# much faster to just use grep here
content = self.grep_for_content(host) + self.grep_for_content(short)
if not content:
assert True
else:
self.fail("Hostname appears in files: %s"
% "\n".join(f for f in content))
def test_no_empty_obfuscations(self):
# get the private map file name
map_file = re.findall('/.*sosreport-.*-private_map', self.cmd_output.stdout)[-1]
with open(map_file, 'r') as mf:
map_json = json.load(mf)
for mapping in map_json:
for key, val in map_json[mapping].items():
assert key, "Empty key found in %s" % mapping
assert val, "%s mapping for '%s' empty" % (mapping, key)
def test_ip_not_in_any_file(self):
ip = self.sysinfo['pre']['networking']['ip_addr']
content = self.grep_for_content(ip)
if not content:
assert True
else:
self.fail("IP appears in files: %s" % "\n".join(f for f in content))
|
sosreport/sos
|
tests/cleaner_tests/full_report_run.py
|
Python
|
gpl-2.0
| 3,341
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-21 12:58
from __future__ import unicode_literals
from django.db import migrations, models
import base.models.enums.duration_unit
class Migration(migrations.Migration):
dependencies = [
('base', '0160_auto_20170926_0828'),
]
operations = [
migrations.AddField(
model_name='educationgroupyear',
name='duration',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='educationgroupyear',
name='duration_unit',
field=models.CharField(blank=True, choices=[('QUADRIMESTER', 'QUADRIMESTER'), ('TRIMESTER', 'TRIMESTER'), ('MONTH', 'MONTH'), ('WEEK', 'WEEK'), ('DAY', 'DAY')], default=base.models.enums.duration_unit.DurationUnits('QUADRIMESTER'), max_length=40, null=True),
),
migrations.AddField(
model_name='educationgroupyear',
name='keywords',
field=models.CharField(blank=True, max_length=320, null=True),
),
migrations.AddField(
model_name='educationgroupyear',
name='title_english',
field=models.CharField(blank=True, max_length=240, null=True),
),
migrations.AddField(
model_name='educationgroupyear',
name='enrollment_enabled',
field=models.BooleanField(default=False),
),
]
|
uclouvain/OSIS-Louvain
|
base/migrations/0161_new_fields_educationgroupyear.py
|
Python
|
agpl-3.0
| 1,465
|
from pandac.PandaModules import VBase3, BitMask32
GameTime = 60
NumBarrels = 4
BarrelStartingPositions = (
VBase3(4.3, 4, 0),
VBase3(4.3, -4, 0),
VBase3(-4.3, 4, 0),
VBase3(-4.3, -4, 0)
)
ToonStartingPositions = (
VBase3(0, 16, 0),
VBase3(0, -16, 0),
VBase3(-16, 0, 0),
VBase3(16, 0, 0)
)
CogStartingPositions = (
VBase3(35, 18, 0),
VBase3(35, 0, 0),
VBase3(35, -18, 0),
VBase3(-35, 18, 0),
VBase3(-35, 0, 0),
VBase3(-35, -18, 0),
VBase3(0, 27, 0),
VBase3(0, -27, 0),
VBase3(35, 9, 0),
VBase3(-35, 9, 0),
VBase3(35, -9, 0),
VBase3(-35, -9, 0)
)
CogReturnPositions = (
VBase3(-35, 28, 0),
VBase3(-14, 28, 0),
VBase3(14, 28, 0),
VBase3(35, 28, 0),
VBase3(35, 0, 0),
VBase3(35, -28, 0),
VBase3(-14, -28, 0),
VBase3(14, -28, 0),
VBase3(-35, -28, 0),
VBase3(-35, 0, 0)
)
StageHalfWidth = 25
StageHalfHeight = 18
NoGoal = 0
BarrelGoal = 1
ToonGoal = 2
RunAwayGoal = 3
InvalidGoalId = -1
GoalStr = {
NoGoal: 'NoGoal',
BarrelGoal: 'BarrelGoal',
ToonGoal: 'ToonGoal',
RunAwayGoal: 'RunAwayGoal',
InvalidGoalId: 'InvalidGoa'
}
BarrelBitmask = BitMask32(512)
BarrelOnGround = -1
NoBarrelCarried = -1
LyingDownDuration = 2.0
MAX_SCORE = 20
MIN_SCORE = 3
def calcScore(t):
range = MAX_SCORE - MIN_SCORE
score = range * (float(t) / GameTime) + MIN_SCORE
return int(score + 0.5)
def getMaxScore():
result = calcScore(GameTime)
return result
NumCogsTable = [
{2000: 5,
1000: 5,
5000: 5,
4000: 5,
3000: 5,
9000: 5},
{2000: 7,
1000: 7,
5000: 7,
4000: 7,
3000: 7,
9000: 7},
{2000: 9,
1000: 9,
5000: 9,
4000: 9,
3000: 9,
9000: 9},
{2000: 11,
1000: 11,
5000: 11,
4000: 11,
3000: 11,
9000: 11}
]
CogSpeedTable = [
{2000: 6.0,
1000: 6.4,
5000: 6.8,
4000: 7.2,
3000: 7.6,
9000: 8.0},
{2000: 6.0,
1000: 6.4,
5000: 6.8,
4000: 7.2,
3000: 7.6,
9000: 8.0},
{2000: 6.0,
1000: 6.4,
5000: 6.8,
4000: 7.2,
3000: 7.6,
9000: 8.0},
{2000: 6.0,
1000: 6.4,
5000: 6.8,
4000: 7.2,
3000: 7.6,
9000: 8.0}
]
ToonSpeed = 9.0
PerfectBonus = [8, 6, 4, 2]
def calculateCogs(numPlayers, safezone):
result = 5
if numPlayers <= len(NumCogsTable):
if safezone in NumCogsTable[numPlayers - 1]:
result = NumCogsTable[numPlayers - 1][safezone]
return result
def calculateCogSpeed(numPlayers, safezone):
result = 6.0
if numPlayers <= len(NumCogsTable):
if safezone in CogSpeedTable[numPlayers - 1]:
result = CogSpeedTable[numPlayers - 1][safezone]
return result
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/minigame/CogThiefGameGlobals.py
|
Python
|
apache-2.0
| 2,523
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class LibraryConfig(AppConfig):
name = 'library'
|
harikvpy/django-popupcrud
|
demo/library/apps.py
|
Python
|
bsd-3-clause
| 154
|
from pyticketswitch.mixins import JSONMixin
class Media(JSONMixin, object):
"""Describes some event media asset
Attributes:
caption (str): caption in plain text describing the asset.
caption_html (str): caption as html describing the asset.
name (str): name of the asset.
url (str): url for the asset.
secure (bool): indicates if the assert url is secure or not.
width (int): width of the asset in pixels. Only present on the video
height (int): height of the asset in pixels. Only present on the video
asset.
"""
def __init__(self, caption=None, caption_html=None, name=None, url=None,
secure=None, width=0, height=0):
self.caption = caption
self.caption_html = caption_html
self.name = name
self.url = url
self.secure = secure
self.width = width
self.height = height
@classmethod
def from_api_data(cls, data):
"""Creates a new Media object from API data from ticketswitch.
Args:
data (dict): the part of the response from a ticketswitch API call
that concerns a media asset.
Returns:
:class:`Media <pyticketswitch.media.Media>`: a new
:class:`Media <pyticketswitch.media.Media>` object
populated with the data from the api.
"""
url = data.get('secure_complete_url', None)
secure = True
if not url:
url = data.get('insecure_complete_url', None)
secure = False
kwargs = {
'caption': data.get('caption', None),
'caption_html': data.get('caption_html', None),
'name': data.get('name', None),
'url': url,
'secure': secure,
'width': data.get('width'),
'height': data.get('height'),
}
return cls(**kwargs)
|
ingresso-group/pyticketswitch
|
pyticketswitch/media.py
|
Python
|
mit
| 1,930
|
# -*- coding: utf-8 -*-
import click
@click.command(
context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True
},
add_help_option=False
)
@click.pass_context
def serve(ctx):
"""
# start app from current dir
"""
from .. import runner
runner.serve()
if __name__ == '__main__':
# pylint: disable=E1120
serve()
|
MrKiven/REST_ARCH
|
rest_arch/skt/cmds/serve.py
|
Python
|
mit
| 386
|
../../../../../../share/pyshared/twisted/web/test/test_static.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/twisted/web/test/test_static.py
|
Python
|
gpl-3.0
| 64
|
# Constructs diff between graphs.
# Return best match between them and its score.
# Algorithm is naive.
# g1_or, g2_or - dict of lists (graph)
def diff_construct(g1_or, g2_or):
g1, g2 = split_graph_by_labels(g1_or), split_graph_by_labels(g2_or)
extend_graphs(g1, g2)
g1, g2 = add_zero_nodes(g1, g2)
# min score.
# edge_metric - number of matched edges.
# node_metric - number of matched nodes.
best = {}
score = (0, 0)
for g1_to_g2 in graph_maps(g1, g2):
node_metric = 0
edge_metric = 0
for node, li in g1_or.items():
# If 0 in num of one of the nodes, it means that there is no match for another.
if node.split("_")[1] == "0" or g1_to_g2[node].split("_")[1] == "0":
continue
node_metric += 1
edge_metric += sum([int(g1_to_g2[to_node] in g2_or[g1_to_g2[node]]) for to_node in li])
if (edge_metric, node_metric) > score:
best = g1_to_g2
score = (edge_metric, node_metric)
return best, score
# Creates a copy of gr that is splitted by value of the node's labels.
# gr - dict of lists (graph)
def split_graph_by_labels(gr):
from collections import defaultdict
res = defaultdict(list)
for num,_ in gr.items():
num = str(num)
# transform num to label_number format
num = num if "_" in num else num + "_1"
label, number = num.split("_")
# dict from label to list of number of node in graph.
res[int(label)].append((int(number)))
return res;
# Extend graph with new nodes to have nodes woth all labels in interval (0, m + 1)
# m = max(labels(g1), labels(g2))
# g1, g2 - dict of lists of pairs (graph)
def extend_graphs(g1, g2):
if len(g1.keys()) == 0 and len(g2.keys()) == 0:
return
m = max(max(g1.keys()), max(g2.keys()))
for i in range(1, m + 1):
if i not in g1.keys():
g1[i] = [1]
if i not in g2.keys():
g2[i] = [1]
# Adds zero-numed nodes to labels in order to have the ability
# to match nodes from g2 with "not" nodes from g1.
# g1, g2 - dict of lists of pairs (graph)
def add_zero_nodes(g1, g2):
# Returns new list that for each label has n nodes from g1 and m 0-nodes,
# where m is number of nonzero nodes under label questioned.
def add_zero_nodes_helper(g1, g2):
return { label1: (l1 + [0] * len([ num for num in g2[label1] if num != 0 ])) for label1, l1 in g1.items() }
return add_zero_nodes_helper(g1, g2), add_zero_nodes_helper(g2, g1)
# Returns all possible matches between nodes of the graphs.
# Including not matching node with any node from another graph.
# g1, g2 - dict of lists of pairs (graph)
def graph_maps(g1, g2):
from itertools import product
# Returns all possible maps from g1 to g2 for each label without duplicated and mapping zero node to zero node.
# It is important, that mapping are for labels.
def graph_maps_for_each_label(g1, g2):
# Map permutations to l1
def permuts(l1):
from itertools import permutations
return [list(l) for l in permutations(l1)]
# Removing any duplicates from l1
# list(set()) conversion does not work for hashiable types.
def remove_dupli(l1):
res = []
for elem in l1:
if elem not in res:
res.append(elem)
return res
# zip(sorted(g1.items()), sorted(g2.items())) guarantees, that
# if extend_graph func was applied to g1, g2, label1 == label2.
# That is why only label1 is taken as label.
return { label: remove_dupli({ k: v for k, v in zip(lr1, lr2) if k != 0 or v != 0}
for lr1,lr2 in product(permuts(l1), permuts(l2)))
for (label, l1), (_, l2) in zip(sorted(g1.items()), sorted(g2.items())) }
# Products maps for all labels into one map.
# The result is tuple(dict, tuple(dict, t... (dict,dict))) structure of maps from g1 to g2.
def produce_all_possible_maps(g1_to_g2_for_each_label):
from functools import reduce
if len(g1_to_g2_for_each_label) == 0:
return {}
return reduce(product,
[ [{str(label) + "_" + str(num1): str(label) + "_" + str(num2) for num1, num2 in gmap.items()}
for gmap in gmap_list ]
for label, gmap_list in g1_to_g2_for_each_label.items() ])
print (list(produce_all_possible_maps(graph_maps_for_each_label(g1, g2))))
l = list(produce_all_possible_maps(graph_maps_for_each_label(g1, g2)))
if len(l) == 1:
return l
for l in produce_all_possible_maps(graph_maps_for_each_label(g1, g2)):
for x in l:
print(x)
# Sums tuple(dict, tuple(dict, t...)) to one dict.
return [ dict(sum( [ list(x.items()) for x in l], [] ) ) for l in produce_all_possible_maps(graph_maps_for_each_label(g1, g2))]
|
alexander-bzikadze/graph_diff
|
src_depricated/diff_construct.py
|
Python
|
apache-2.0
| 4,468
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.