text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'abdul'
import os
from mongoctl.processes import create_subprocess
from mongoctl.mongoctl_logging import *
from mongoctl import repository
from mongoctl.utils import execute_command
###############################################################################
# CONSTS
###############################################################################
DEFAULT_TAIL_LINES = 15
###############################################################################
# tail log command
###############################################################################
def tail_log_command(parsed_options):
server = repository.lookup_server(parsed_options.server)
server.validate_local_op("tail-log")
log_path = server.get_log_file_path()
# check if log file exists
if os.path.exists(log_path):
log_tailer = tail_server_log(server)
log_tailer.communicate()
else:
log_info("Log file '%s' does not exist." % log_path)
###############################################################################
def tail_server_log(server):
try:
logpath = server.get_log_file_path()
# touch log file to make sure it exists
log_verbose("Touching log file '%s'" % logpath)
execute_command(["touch", logpath])
tail_cmd = ["tail", "-f", "-n", str(DEFAULT_TAIL_LINES), logpath]
log_verbose("Executing command: %s" % (" ".join(tail_cmd)))
return create_subprocess(tail_cmd)
except Exception, e:
log_exception(e)
log_error("Unable to tail server log file. Cause: %s" % e)
return None
###############################################################################
def stop_tailing(log_tailer):
try:
if log_tailer:
log_verbose("-- Killing tail log path subprocess")
log_tailer.terminate()
except Exception, e:
log_exception(e)
log_verbose("Failed to kill tail subprocess. Cause: %s" % e)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/server/tail_log.py",
"copies": "1",
"size": "1962",
"license": "mit",
"hash": -6119972931859933000,
"line_mean": 35.3333333333,
"line_max": 79,
"alpha_frac": 0.4831804281,
"autogenerated": false,
"ratio": 4.52073732718894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011046133853151398,
"num_lines": 54
} |
__author__ = 'abdul'
import os
import mongoctl.repository as repository
from base import DocumentWrapper
from mongoctl.utils import resolve_path, document_pretty_string, is_host_local
from pymongo.errors import AutoReconnect
from mongoctl.mongoctl_logging import (
log_verbose, log_error, log_warning, log_exception, log_debug
)
from mongoctl.mongo_version import version_obj
from mongoctl.config import get_default_users
from mongoctl.errors import MongoctlException
from mongoctl.prompt import read_username, read_password
from bson.son import SON
from pymongo.connection import Connection
import datetime
from mongoctl import config
from mongoctl import users
###############################################################################
# CONSTANTS
###############################################################################
# This is mongodb's default dbpath
DEFAULT_DBPATH = '/data/db'
# default pid file name
PID_FILE_NAME = "pid.txt"
LOG_FILE_NAME = "mongodb.log"
KEY_FILE_NAME = "keyFile"
# This is mongodb's default port
DEFAULT_PORT = 27017
# db connection timeout, 10 seconds
CONN_TIMEOUT = 10000
REPL_KEY_SUPPORTED_VERSION = '2.0.0'
###############################################################################
# Server Class
###############################################################################
class Server(DocumentWrapper):
###########################################################################
# Constructor
###########################################################################
def __init__(self, server_doc):
DocumentWrapper.__init__(self, server_doc)
self.__db_connection__ = None
self.__seed_users__ = None
self.__login_users__ = {}
self.__mongo_version__ = None
self._connection_address = None
###########################################################################
# Properties
###########################################################################
###########################################################################
def get_description(self):
return self.get_property("description")
###########################################################################
def set_description(self, desc):
return self.set_property("description", desc)
###########################################################################
def get_server_home(self):
home_dir = self.get_property("serverHome")
if home_dir:
return resolve_path(home_dir)
else:
return None
###########################################################################
def set_server_home(self, val):
self.set_property("serverHome", val)
###########################################################################
def get_pid_file_path(self):
return self.get_server_file_path("pidfilepath", PID_FILE_NAME)
###########################################################################
def get_log_file_path(self):
return self.get_server_file_path("logpath", LOG_FILE_NAME)
###########################################################################
def get_key_file(self):
kf = self.get_cmd_option("keyFile")
if kf:
return resolve_path(kf)
###########################################################################
def get_default_key_file_path(self):
return self.get_server_file_path("keyFile", KEY_FILE_NAME)
###########################################################################
def get_server_file_path(self, cmd_prop, default_file_name):
file_path = self.get_cmd_option(cmd_prop)
if file_path is not None:
return resolve_path(file_path)
else:
return self.get_default_file_path(default_file_name)
###########################################################################
def get_default_file_path(self, file_name):
return self.get_server_home() + os.path.sep + file_name
###########################################################################
def get_address(self):
"""
@return: the address set in the server descriptor manually.
If no address is given, NOTE THAT this function returns None.
"""
address = self.get_property("address")
if address is not None:
if address.find(":") > 0:
return address
else:
return "%s:%s" % (address, self.get_port())
else:
return None
###########################################################################
def get_address_display(self):
"""
@return: A synthesized usable address for this server.
Always call this function if you want to connect to a server.
"""
display = self.get_address()
if display is None:
display = self.get_local_address()
return display
###########################################################################
def get_host_address(self):
if self.get_address() is not None:
return self.get_address().split(":")[0]
else:
return None
###########################################################################
def get_connection_host_address(self):
return self.get_connection_address().split(":")[0]
###########################################################################
def set_address(self, address):
self.set_property("address", address)
###########################################################################
def get_local_address(self):
return "localhost:%s" % self.get_port()
def is_local_address(self):
address = self.get_address()
if address is None or "localhost" in address:
return True
return False
###########################################################################
def get_port(self):
port = self.get_cmd_option("port")
if port is None:
port = DEFAULT_PORT
return port
###########################################################################
def set_port(self, port):
self.set_cmd_option("port", port)
###########################################################################
def is_fork(self):
"""
@return: true if the server process is running in background as a deamon
"""
fork = self.get_cmd_option("fork")
return fork or fork is None
###########################################################################
def get_mongo_version(self):
"""
Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property
"""
if self.__mongo_version__:
return self.__mongo_version__
if self.is_online():
mongo_version = self.get_db_connection().server_info()['version']
else:
mongo_version = self.get_property("mongoVersion")
self.__mongo_version__ = mongo_version
return self.__mongo_version__
###########################################################################
def get_mongo_version_obj(self):
version_str = self.get_mongo_version()
if version_str is not None:
return version_obj(version_str)
else:
return None
###########################################################################
def get_cmd_option(self, option_name):
cmd_options = self.get_cmd_options()
if cmd_options and cmd_options.has_key(option_name):
return cmd_options[option_name]
else:
return None
###########################################################################
def set_cmd_option(self, option_name, option_value):
cmd_options = self.get_cmd_options()
if cmd_options:
cmd_options[option_name] = option_value
###########################################################################
def get_cmd_options(self):
return self.get_property('cmdOptions')
###########################################################################
def set_cmd_options(self, cmd_options):
return self.set_property('cmdOptions', cmd_options)
###########################################################################
def export_cmd_options(self, options_override=None):
cmd_options = self.get_cmd_options().copy()
# reset some props to exporting vals
cmd_options['pidfilepath'] = self.get_pid_file_path()
# apply the options override
if options_override is not None:
for (option_name, option_val) in options_override.items():
cmd_options[option_name] = option_val
# set the logpath if forking..
if (self.is_fork() or (options_override is not None and
options_override.get("fork"))):
cmd_options['fork'] = True
if "logpath" not in cmd_options:
cmd_options["logpath"] = self.get_log_file_path()
# Specify the keyFile arg if needed
if self.needs_repl_key() and "keyFile" not in cmd_options:
key_file_path = (self.get_key_file() or
self.get_default_key_file_path())
cmd_options["keyFile"] = key_file_path
return cmd_options
###########################################################################
def get_seed_users(self):
if self.__seed_users__ is None:
seed_users = self.get_property('seedUsers')
## This hidden for internal user and should not be documented
if not seed_users:
seed_users = get_default_users()
self.__seed_users__ = seed_users
return self.__seed_users__
###########################################################################
def get_login_user(self, dbname):
login_user = self.__login_users__.get(dbname)
# if no login user found then check global login
if not login_user:
login_user = users.get_global_login_user(self, dbname)
# if dbname is local and we cant find anything yet
# THEN assume that local credentials == admin credentials
if not login_user and dbname == "local":
login_user = self.get_login_user("admin")
return login_user
###########################################################################
def lookup_password(self, dbname, username):
# look in seed users
db_seed_users = self.get_db_seed_users(dbname)
if db_seed_users:
user = filter(lambda user: user['username'] == username,
db_seed_users)
if user and "password" in user[0]:
return user[0]["password"]
###########################################################################
def set_login_user(self, dbname, username, password):
self.__login_users__[dbname] = {
"username": username,
"password": password
}
###########################################################################
def get_admin_users(self):
return self.get_db_seed_users("admin")
###########################################################################
def get_db_seed_users(self, dbname):
return self.get_seed_users().get(dbname)
###########################################################################
def get_cluster(self):
return repository.lookup_cluster_by_server(self)
###########################################################################
def get_validate_cluster(self):
cluster = repository.lookup_cluster_by_server(self)
if not cluster:
raise MongoctlException("No cluster found for server '%s'" %
self.id)
repository.validate_cluster(cluster)
return cluster
###########################################################################
def is_cluster_member(self):
return self.get_cluster() is not None
###########################################################################
# DB Methods
###########################################################################
def disconnecting_db_command(self, cmd, dbname):
try:
result = self.db_command(cmd, dbname)
return result
except AutoReconnect, e:
log_verbose("This is an expected exception that happens after "
"disconnecting db commands: %s" % e)
finally:
self.__db_connection__ = None
###########################################################################
def timeout_maybe_db_command(self, cmd, dbname):
try:
result = self.db_command(cmd, dbname)
return result
except Exception, e:
log_exception(e)
if "timed out" in str(e):
log_warning("Command %s is taking a while to complete. "
"This is not necessarily bad. " %
document_pretty_string(cmd))
else:
raise
finally:
self.__db_connection__ = None
###########################################################################
def db_command(self, cmd, dbname, **kwargs):
need_auth = self.command_needs_auth(dbname, cmd)
db = self.get_db(dbname, no_auth=not need_auth)
if cmd.has_key("addShard"):
shard_given_name = kwargs.get("name", "")
return db.command("addShard", cmd.get('addShard'), name=shard_given_name)
return db.command(cmd, **kwargs)
###########################################################################
def command_needs_auth(self, dbname, cmd):
return self.needs_to_auth(dbname)
###########################################################################
def get_db(self, dbname, no_auth=False, username=None, password=None,
retry=True, never_auth_with_admin=False):
conn = self.get_db_connection()
db = conn[dbname]
# If the DB doesn't need to be authenticated to (or at least yet)
# then don't authenticate. this piece of code is important for the case
# where you are connecting to the DB on local host where --auth is on
# but there are no admin users yet
if no_auth:
return db
if (not username and
(not self.needs_to_auth(dbname))):
return db
if username:
self.set_login_user(dbname, username, password)
login_user = self.get_login_user(dbname)
# if there is no login user for this database then use admin db unless
# it was specified not to
if (not never_auth_with_admin and
not login_user and
dbname not in ["admin", "local"]):
# if this passes then we are authed!
admin_db = self.get_db("admin", retry=retry)
return admin_db.connection[dbname]
auth_success = self.authenticate_db(db, dbname, retry=retry)
# If auth failed then give it a try by auth into admin db unless it
# was specified not to
if (not never_auth_with_admin and
not auth_success
and dbname != "admin"):
admin_db = self.get_db("admin", retry=retry)
return admin_db.connection[dbname]
if auth_success:
return db
else:
raise MongoctlException("Failed to authenticate to %s db" % dbname)
###########################################################################
def authenticate_db(self, db, dbname, retry=True):
"""
Returns True if we manage to auth to the given db, else False.
"""
login_user = self.get_login_user(dbname)
username = None
password = None
auth_success = False
if login_user:
username = login_user["username"]
if "password" in login_user:
password = login_user["password"]
# have three attempts to authenticate
no_tries = 0
while not auth_success and no_tries < 3:
if not username:
username = read_username(dbname)
if not password:
password = self.lookup_password(dbname, username)
if not password:
password = read_password("Enter password for user '%s\%s'" %
(dbname, username))
# if auth success then exit loop and memoize login
auth_success = db.authenticate(username, password)
if auth_success or not retry:
break
else:
log_error("Invalid login!")
username = None
password = None
no_tries += 1
if auth_success:
self.set_login_user(dbname, username, password)
return auth_success
###########################################################################
def get_working_login(self, database, username=None, password=None):
"""
authenticate to the specified database starting with specified
username/password (if present), try to return a successful login
within 3 attempts
"""
# this will authenticate and update login user
self.get_db(database, username=username, password=password,
never_auth_with_admin=True)
login_user = self.get_login_user(database)
if login_user:
username = login_user["username"]
password = (login_user["password"] if "password" in login_user
else None)
return username, password
###########################################################################
def is_online(self):
try:
self.new_db_connection()
return True
except Exception, e:
log_exception(e)
return False
###########################################################################
def can_function(self):
status = self.get_status()
if status['connection']:
if 'error' not in status:
return True
else:
log_verbose("Error while connecting to server '%s': %s " %
(self.id, status['error']))
###########################################################################
def is_online_locally(self):
return self.is_use_local() and self.is_online()
###########################################################################
def is_use_local(self):
return (self.get_address() is None or
is_assumed_local_server(self.id)
or self.is_local())
###########################################################################
def is_local(self):
try:
server_host = self.get_host_address()
return server_host is None or is_host_local(server_host)
except Exception, e:
log_exception(e)
log_error("Unable to resolve address '%s' for server '%s'."
" Cause: %s" %
(self.get_host_address(), self.id, e))
return False
###########################################################################
def needs_to_auth(self, dbname):
"""
Determines if the server needs to authenticate to the database.
NOTE: we stopped depending on is_auth() since its only a configuration
and may not be accurate
"""
log_debug("Checking if server '%s' needs to auth on db '%s'...." %
(self.id, dbname))
try:
conn = self.new_db_connection()
db = conn[dbname]
db.collection_names()
result = False
except (RuntimeError, Exception), e:
log_exception(e)
result = "authorized" in str(e)
log_debug("needs_to_auth check for server '%s' on db '%s' : %s" %
(self.id, dbname, result))
return result
###########################################################################
def get_status(self, admin=False):
status = {}
## check if the server is online
try:
self.get_db_connection()
status['connection'] = True
# grab status summary if it was specified + if i am not an arbiter
if admin:
server_summary = self.get_server_status_summary()
status["serverStatusSummary"] = server_summary
except (RuntimeError, Exception), e:
log_exception(e)
self.sever_db_connection() # better luck next time!
status['connection'] = False
status['error'] = "%s" % e
if "timed out" in status['error']:
status['timedOut'] = True
return status
###########################################################################
def get_server_status_summary(self):
server_status = self.db_command(SON([('serverStatus', 1)]), "admin")
server_summary = {
"host": server_status['host'],
"connections": server_status['connections'],
"version": server_status['version']
}
return server_summary
###########################################################################
def get_db_connection(self):
if self.__db_connection__ is None:
self.__db_connection__ = self.new_db_connection()
return self.__db_connection__
###########################################################################
def sever_db_connection(self):
if self.__db_connection__ is not None:
self.__db_connection__.close()
self.__db_connection__ = None
###########################################################################
def new_db_connection(self):
return make_db_connection(self.get_connection_address())
###########################################################################
def get_connection_address(self):
if self._connection_address:
return self._connection_address
# try to get the first working connection address
if (self.is_use_local() and
self.has_connectivity_on(self.get_local_address())):
self._connection_address = self.get_local_address()
elif self.has_connectivity_on(self.get_address()):
self._connection_address = self.get_address()
# use old logic
if not self._connection_address:
if self.is_use_local():
self._connection_address = self.get_local_address()
else:
self._connection_address = self.get_address()
return self._connection_address
###########################################################################
###########################################################################
def has_connectivity_on(self, address):
try:
log_verbose("Checking if server '%s' is accessible on "
"address '%s'" % (self.id, address))
make_db_connection(address)
return True
except Exception, e:
log_exception(e)
log_verbose("Check failed for server '%s' is accessible on "
"address '%s': %s" % (self.id, address, e))
return False
###########################################################################
def get_rs_config(self):
try:
return self.get_db('local')['system.replset'].find_one()
except (Exception, RuntimeError), e:
log_exception(e)
if type(e) == MongoctlException:
raise e
else:
log_verbose("Cannot get rs config from server '%s'. "
"cause: %s" % (self.id, e))
return None
###########################################################################
def validate_local_op(self, op):
# If the server has been assumed to be local then skip validation
if is_assumed_local_server(self.id):
log_verbose("Skipping validation of server's '%s' address '%s' to be"
" local because --assume-local is on" %
(self.id, self.get_host_address()))
return
log_verbose("Validating server address: "
"Ensuring that server '%s' address '%s' is local on this "
"machine" % (self.id, self.get_host_address()))
if not self.is_local():
log_verbose("Server address validation failed.")
raise MongoctlException("Cannot %s server '%s' on this machine "
"because server's address '%s' does not appear "
"to be local to this machine. Pass the "
"--assume-local option if you are sure that "
"this server should be running on this "
"machine." % (op,
self.id,
self.get_host_address()))
else:
log_verbose("Server address validation passed. "
"Server '%s' address '%s' is local on this "
"machine !" % (self.id, self.get_host_address()))
###########################################################################
def log_server_activity(self, activity):
if is_logging_activity():
log_record = {"op": activity,
"ts": datetime.datetime.utcnow(),
"serverDoc": self.get_document(),
"server": self.id,
"serverDisplayName": self.get_description()}
log_verbose("Logging server activity \n%s" %
document_pretty_string(log_record))
repository.get_activity_collection().insert(log_record)
###########################################################################
def needs_repl_key(self):
"""
We need a repl key if you are auth + a cluster member +
version is None or >= 2.0.0
"""
cluster = self.get_cluster()
return (self.supports_repl_key() and
cluster is not None and cluster.get_repl_key() is not None)
###########################################################################
def supports_repl_key(self):
"""
We need a repl key if you are auth + a cluster member +
version is None or >= 2.0.0
"""
version = self.get_mongo_version_obj()
return (version is None or
version >= version_obj(REPL_KEY_SUPPORTED_VERSION))
###########################################################################
def get_pid(self):
pid_file_path = self.get_pid_file_path()
if os.path.exists(pid_file_path):
pid_file = open(pid_file_path, 'r')
pid = pid_file.readline().strip('\n')
if pid and pid.isdigit():
return int(pid)
else:
log_warning("Unable to determine pid for server '%s'. "
"Not a valid number in '%s"'' %
(self.id, pid_file_path))
else:
log_warning("Unable to determine pid for server '%s'. "
"pid file '%s' does not exist" %
(self.id, pid_file_path))
return None
def make_db_connection(address):
try:
return Connection(address,
socketTimeoutMS=CONN_TIMEOUT,
connectTimeoutMS=CONN_TIMEOUT)
except Exception, e:
log_exception(e)
error_msg = "Cannot connect to '%s'. Cause: %s" % \
(address, e)
raise MongoctlException(error_msg, cause=e)
###############################################################################
def is_logging_activity():
return (repository.consulting_db_repository() and
config.get_mongoctl_config_val("logServerActivity", False))
###############################################################################
__assumed_local_servers__ = []
def assume_local_server(server_id):
global __assumed_local_servers__
if server_id not in __assumed_local_servers__:
__assumed_local_servers__.append(server_id)
###############################################################################
def is_assumed_local_server(server_id):
global __assumed_local_servers__
return server_id in __assumed_local_servers__
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/objects/server.py",
"copies": "1",
"size": "29298",
"license": "mit",
"hash": 7119833183326902000,
"line_mean": 37.0988296489,
"line_max": 85,
"alpha_frac": 0.4465833845,
"autogenerated": false,
"ratio": 5.093532684283727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6040116068783727,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import os
import mongoctl.repository as repository
from base import DocumentWrapper
from mongoctl.utils import resolve_path, document_pretty_string, is_host_local, timedelta_total_seconds, is_pid_alive
import pymongo
from pymongo.errors import AutoReconnect, OperationFailure, ConnectionFailure
from mongoctl.mongoctl_logging import (
log_verbose, log_error, log_warning, log_exception, log_debug
)
from mongoctl.config import get_default_users
from mongoctl.errors import MongoctlException, is_auth_error
from mongoctl.prompt import read_username, read_password
from bson.son import SON
import datetime
from mongoctl import config
from mongoctl import users
from mongoctl.mongodb_version import MongoDBEdition, make_version_info
import ssl
from mongoctl import mongo_utils
###############################################################################
# CONSTANTS
###############################################################################
# default pid file name
PID_FILE_NAME = "pid.txt"
LOG_FILE_NAME = "mongodb.log"
KEY_FILE_NAME = "keyFile"
# This is mongodb's default port
DEFAULT_PORT = 27017
REPL_KEY_SUPPORTED_VERSION = '2.0.0'
# CLIENT_SSL_MODE global flag to for turning off ssl
# TODO this is temporary and should be deleted
CLIENT_SSL_MODE = None
# A global config that is set through --use-alt-address option that will use
# a different "address" property of when making connections to servers
USE_ALT_ADDRESS = None
###############################################################################
VERSION_2_6 = make_version_info("2.6.0")
VERSION_3_0 = make_version_info("3.0.0")
VERSION_3_2 = make_version_info("3.2.0")
###############################################################################
class ClientSslMode(object):
DISABLED = "disabled"
ALLOW = "allow"
REQUIRE = "require"
PREFER = "prefer"
###############################################################################
# Server Class
###############################################################################
class Server(DocumentWrapper):
###########################################################################
# Constructor
###########################################################################
def __init__(self, server_doc):
DocumentWrapper.__init__(self, server_doc)
self._mongo_client = None
self._seed_users = None
self._mongo_version = None
self._mongodb_edition = None
self._cluster = None
self._connection_address = None
# default connection timeout
self._connection_timeout_ms = None
###########################################################################
# Properties
###########################################################################
###########################################################################
def get_description(self):
return self.get_ignore_str_property("description")
###########################################################################
def set_description(self, desc):
return self.set_property("description", desc)
###########################################################################
def get_server_home(self):
home_dir = self.get_property("serverHome")
if home_dir:
return resolve_path(home_dir)
else:
return None
###########################################################################
def set_server_home(self, val):
self.set_property("serverHome", val)
###########################################################################
def get_pid_file_path(self):
return self.get_server_file_path("pidfilepath", PID_FILE_NAME)
###########################################################################
def get_log_file_path(self):
return self.get_server_file_path("logpath", LOG_FILE_NAME)
###########################################################################
def get_key_file(self):
kf = self.get_cmd_option("keyFile")
if kf:
return resolve_path(kf)
###########################################################################
def get_client_ssl_mode(self):
mode = CLIENT_SSL_MODE
ssl_option = self.get_cmd_option("sslMode")
if not mode and ssl_option:
if ssl_option == "requireSSL":
mode = ClientSslMode.REQUIRE
elif ssl_option in ["preferSSL", "allowSSL"]:
mode = ClientSslMode.PREFER
elif ssl_option == "disabled":
mode = ClientSslMode.DISABLED
return mode
###########################################################################
def use_ssl_client(self):
#TODO XXX Temporarily disable ssl for local
return not self.is_use_local() and (self.get_client_ssl_mode() == ClientSslMode.REQUIRE or
self.prefer_use_ssl())
###########################################################################
def prefer_use_ssl(self):
if self.get_client_ssl_mode() != ClientSslMode.PREFER:
return False
log_debug("prefer_use_ssl() Checking if we prefer ssl for '%s'" %
self.id)
try:
self.new_ssl_test_mongo_client()
return True
except (OperationFailure, AutoReconnect), ofe:
log_exception(ofe)
return True
except ConnectionFailure, ce:
if "SSL handshake failed" in str(ce):
return False
###########################################################################
def get_default_key_file_path(self):
return self.get_server_file_path("keyFile", KEY_FILE_NAME)
###########################################################################
def get_server_file_path(self, cmd_prop, default_file_name):
file_path = self.get_cmd_option(cmd_prop)
if file_path is not None:
return resolve_path(file_path)
else:
return self.get_default_file_path(default_file_name)
###########################################################################
def get_default_file_path(self, file_name):
return self.get_server_home() + os.path.sep + file_name
###########################################################################
def get_address(self):
address = self.get_property("address")
if USE_ALT_ADDRESS:
address = self.get_property(USE_ALT_ADDRESS)
if not address:
raise MongoctlException(
"No alternative address '%s' found in server '%s'" %
(USE_ALT_ADDRESS, self.id))
if address is not None:
if address.find(":") > 0:
return address
else:
return "%s:%s" % (address, self.get_port())
else:
return None
###########################################################################
def get_address_display(self):
display = self.get_address()
if display is None:
display = self.get_local_address()
return display
###########################################################################
def get_host_address(self):
if self.get_address() is not None:
return self.get_address().split(":")[0]
else:
return None
###########################################################################
def get_connection_host_address(self):
return self.get_connection_address().split(":")[0]
###########################################################################
def set_address(self, address):
self.set_property("address", address)
###########################################################################
def get_local_address(self):
return "localhost:%s" % self.get_port()
###########################################################################
def get_port(self):
port = self.get_cmd_option("port")
if port is None:
port = DEFAULT_PORT
return port
###########################################################################
def set_port(self, port):
self.set_cmd_option("port", port)
###########################################################################
def is_fork(self):
fork = self.get_cmd_option("fork")
return fork or fork is None
###########################################################################
def is_auth(self):
if self.get_cmd_option("auth") or self.get_cmd_option("keyFile"):
return True
else:
cluster = self.get_cluster()
if cluster:
return cluster.get_repl_key() is not None
###########################################################################
def get_mongo_version(self):
"""
Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property
"""
if self._mongo_version:
return self._mongo_version
mongo_version = self.read_current_mongo_version()
if not mongo_version:
mongo_version = self.get_configured_mongo_version()
self._mongo_version = mongo_version
return self._mongo_version
###########################################################################
def get_configured_mongo_version(self):
return self.get_property("mongoVersion")
###########################################################################
def read_current_mongo_version(self):
if self.is_online():
try:
return self.get_mongo_client().server_info()['version']
except Exception, e:
log_exception(e)
return None
###########################################################################
def get_server_build_info(self):
"""
issues a buildinfo command
"""
if self.is_online():
try:
return self.get_mongo_client().server_info()
except OperationFailure, ofe:
log_exception(ofe)
if "there are no users authenticated" in str(ofe):
# this is a pymongo 3.6.1 regression where the buildinfo command fails on non authenticated client
# fall-back to an authenticated client
admin_db = self.get_db("admin", no_auth=False)
return admin_db.command("buildinfo")
except Exception, e:
log_exception(e)
return None
###########################################################################
def get_mongodb_edition(self):
if self._mongodb_edition:
return self._mongodb_edition
server_info = self.get_server_build_info()
if server_info:
if ("gitVersion" in server_info and
("subscription" in server_info["gitVersion"] or
"enterprise" in server_info["gitVersion"])):
edition = MongoDBEdition.ENTERPRISE
elif (("OpenSSLVersion" in server_info and
server_info["OpenSSLVersion"]) or
(server_info.get("openssl") and server_info["openssl"]["running"] != "disabled")):
edition = MongoDBEdition.COMMUNITY_SSL
else:
edition = MongoDBEdition.COMMUNITY
else:
edition = self.get_property("mongoEdition")
self._mongodb_edition = edition
return self._mongodb_edition
###########################################################################
def get_mongo_version_info(self):
version_number = self.get_mongo_version()
if version_number is not None:
return make_version_info(version_number,
edition=self.get_mongodb_edition())
else:
return None
###########################################################################
def get_cmd_option(self, option_name):
cmd_options = self.get_cmd_options()
if cmd_options and cmd_options.has_key(option_name):
return cmd_options[option_name]
else:
return None
###########################################################################
def set_cmd_option(self, option_name, option_value):
cmd_options = self.get_cmd_options()
if cmd_options:
cmd_options[option_name] = option_value
###########################################################################
def get_cmd_options(self):
return self.get_property('cmdOptions')
###########################################################################
def set_cmd_options(self, cmd_options):
return self.set_property('cmdOptions' , cmd_options)
###########################################################################
def get_environment_variables(self):
env_vars = self.get_property('environmentVariables') or {}
allowed = self.get_allowed_environment_variables()
for v in env_vars.keys():
if v not in allowed:
raise MongoctlException("Unknown environment variable '%s'" % v)
return env_vars
###########################################################################
def get_allowed_environment_variables(self):
"""
:return: whitelist of allowed env vars
"""
return []
###########################################################################
def apply_cmd_options_overrides(self, options_overrides):
if options_overrides:
for option, val in options_overrides.items():
self.set_cmd_option(option, val)
# if port was in the overrides then make sure we clear out the cached
# connection address
if "port" in options_overrides:
self._connection_address = None
###########################################################################
def export_cmd_options(self, options_override=None):
cmd_options = self.get_cmd_options().copy()
# reset some props to exporting vals
cmd_options['pidfilepath'] = self.get_pid_file_path()
# apply the options override
if options_override is not None:
for (option_name, option_val) in options_override.items():
cmd_options[option_name] = option_val
# set the logpath if forking..
if (self.is_fork() or (options_override is not None and
options_override.get("fork"))):
cmd_options['fork'] = True
if "logpath" not in cmd_options:
cmd_options["logpath"] = self.get_log_file_path()
# Specify the keyFile arg if needed
if self.needs_repl_key() and "keyFile" not in cmd_options:
key_file_path = (self.get_key_file() or
self.get_default_key_file_path())
cmd_options["keyFile"] = key_file_path
return cmd_options
###########################################################################
def get_seed_users(self):
if self._seed_users is None:
seed_users = self.get_property('seedUsers')
## This hidden for internal user and should not be documented
if not seed_users:
seed_users = get_default_users()
self._seed_users = seed_users
return self._seed_users
###########################################################################
def get_login_user(self, dbname):
login_user = users.get_server_login_user(self, dbname)
# if no login user found then check global login
if not login_user:
login_user = users.get_global_login_user(self, dbname)
# if dbname is local and we cant find anything yet
# THEN assume that local credentials == admin credentials
if not login_user and dbname == "local":
login_user = self.get_login_user("admin")
if login_user and not login_user.get("password"):
login_user["password"] = self.lookup_password(dbname, login_user["username"])
return login_user
###########################################################################
def lookup_password(self, dbname, username):
# look in seed users
db_seed_users = self.get_db_seed_users(dbname)
if db_seed_users:
user = filter(lambda user: user['username'] == username,
db_seed_users)
if user and "password" in user[0]:
return user[0]["password"]
###########################################################################
def set_login_user(self, dbname, username, password):
users.set_server_login_user(self, dbname, username, password)
###########################################################################
def get_admin_users(self):
return self.get_db_seed_users("admin")
###########################################################################
def get_db_seed_users(self, dbname):
return self.get_seed_users().get(dbname)
###########################################################################
def get_cluster(self):
if self._cluster is None:
self._cluster = repository.lookup_cluster_by_server(self)
return self._cluster
###########################################################################
def get_validate_cluster(self):
cluster = self.get_cluster()
if not cluster:
raise MongoctlException("No cluster found for server '%s'" %
self.id)
repository.validate_cluster(cluster)
return cluster
###########################################################################
def is_cluster_member(self):
return self.get_cluster() is not None
###########################################################################
def is_cluster_connection_member(self):
"""
Override!
:return: true if the server should be included in a cluster connection
"""
pass
###########################################################################
# DB Methods
###########################################################################
def disconnecting_db_command(self, cmd, dbname):
try:
result = self.db_command(cmd, dbname)
return result
except AutoReconnect,e:
log_verbose("This is an expected exception that happens after "
"disconnecting db commands: %s" % e)
###########################################################################
def timeout_maybe_db_command(self, cmd, dbname):
try:
result = self.db_command(cmd, dbname)
return result
except Exception, e:
log_exception(e)
if "timed out" in str(e):
log_warning("Command %s is taking a while to complete. "
"This is not necessarily bad. " %
document_pretty_string(cmd))
else:
raise
###########################################################################
def db_command(self, cmd, dbname):
# try without auth first if server allows it (i.e. version >= 3.0.0)
if self.try_on_auth_failures():
need_auth = False
else:
need_auth = self.command_needs_auth(dbname, cmd)
log_verbose("Server '%s': DB Command requested on db %s, need auth ? %s, command: %s" %
(self.id, dbname, need_auth, document_pretty_string(cmd)))
db = self.get_db(dbname, no_auth=not need_auth)
try:
return db.command(cmd)
except (RuntimeError,Exception), e:
if is_auth_error(e) and self.try_on_auth_failures():
db = self.get_db(dbname, no_auth=False)
return db.command(cmd)
else:
raise
###########################################################################
def command_needs_auth(self, dbname, cmd):
return self.needs_to_auth(dbname)
###########################################################################
def get_db(self, dbname, no_auth=False, username=None, password=None,
retry=True, never_auth_with_admin=False):
mongo_client = self.get_mongo_client()
db = mongo_client.get_database(dbname)
# If the DB doesn't need to be authenticated to (or at least yet)
# then don't authenticate. this piece of code is important for the case
# where you are connecting to the DB on local host where --auth is on
# but there are no admin users yet
if no_auth:
return db
if (not username and
(not self.needs_to_auth(dbname))):
return db
if username:
self.set_login_user(dbname, username, password)
login_user = self.get_login_user(dbname)
# auth with local ?
local_user = self.get_login_user("local")
# if we have the system user then always auth with it
if local_user and users.is_system_user(local_user["username"]) and dbname != "local":
local_db = self.get_db("local", retry=retry)
return local_db.client.get_database(dbname)
is_system_user = (login_user and
users.is_system_user(login_user.get("username")))
# if there is no login user for this database then use admin db unless
# it was specified not to
# ALSO use admin if this is 'local' db for mongodb >= 2.6.0
if ((not never_auth_with_admin and
not login_user and
dbname != "admin")
or
(dbname == "local" and
not is_system_user and
not self.supports_local_users())):
# if this passes then we are authed!
admin_db = self.get_db("admin", retry=retry)
return admin_db.client.get_database(dbname)
# no retries on local db, so if we fail to auth to local we always
# attempt to use admin
retry = retry and dbname != "local"
auth_success = self.authenticate_db(db, dbname, retry=retry)
# If auth failed then give it a try by auth into admin db unless it
# was specified not to
if (not never_auth_with_admin and
not auth_success
and dbname != "admin"):
admin_db = self.get_db("admin", retry=retry)
return admin_db.client.get_database(dbname)
if auth_success:
return db
else:
raise MongoctlException("Failed to authenticate to %s db" % dbname)
###########################################################################
def authenticate_db(self, db, dbname, retry=True):
"""
Returns True if we manage to auth to the given db, else False.
"""
log_verbose("Server '%s' attempting to authenticate to db '%s'" % (self.id, dbname))
login_user = self.get_login_user(dbname)
username = None
password = None
auth_success = False
if login_user:
username = login_user["username"]
if "password" in login_user:
password = login_user["password"]
# have three attempts to authenticate
no_tries = 0
while not auth_success and no_tries < 3:
if not username:
username = read_username(dbname)
if not password:
password = self.lookup_password(dbname, username)
if not password:
password = read_password("Enter password for user '%s\%s'"%
(dbname, username))
# if auth success then exit loop and memoize login
try:
auth_success = db.authenticate(username, password)
log_verbose("Authentication attempt #%s to db '%s' result: %s" % (no_tries, dbname, auth_success))
except OperationFailure, ofe:
if "auth fails" in str(ofe):
auth_success = False
if auth_success or not retry:
break
else:
log_error("Invalid login!")
username = None
password = None
no_tries += 1
if auth_success:
self.set_login_user(dbname, username, password)
log_verbose("Authentication Succeeded!")
else:
log_verbose("Authentication failed")
return auth_success
###########################################################################
def get_working_login(self, database, username=None, password=None):
"""
authenticate to the specified database starting with specified
username/password (if present), try to return a successful login
within 3 attempts
"""
login_user = None
# this will authenticate and update login user
self.get_db(database, username=username, password=password,
never_auth_with_admin=True)
login_user = self.get_login_user(database)
if login_user:
username = login_user["username"]
password = (login_user["password"] if "password" in login_user
else None)
return username, password
###########################################################################
def is_online(self):
log_debug("(BEGIN) is_online() for %s" % self.id)
start_date = datetime.datetime.now()
result = False
try:
self.new_default_mongo_client()
result = True
except (OperationFailure, AutoReconnect), ofe:
log_exception(ofe)
result = "refused" not in str(ofe)
except ConnectionFailure, cfe:
log_exception(cfe)
result = "connection closed" in str(cfe)
duration = timedelta_total_seconds(datetime.datetime.now() - start_date)
log_debug("(BEGIN) is_online() for %s finished in %s seconds" % (self.id, duration))
return result
###########################################################################
def can_function(self):
status = self.get_status()
if status['connection']:
if 'error' not in status:
return True
else:
log_verbose("Error while connecting to server '%s': %s " %
(self.id, status['error']))
###########################################################################
def is_online_locally(self):
return self.is_use_local() and self.is_online()
###########################################################################
def is_use_local(self):
return (self.get_address() is None or
is_assumed_local_server(self.id)
or self.is_local())
###########################################################################
def is_local(self):
try:
server_host = self.get_host_address()
return server_host is None or is_host_local(server_host)
except Exception, e:
log_exception(e)
log_error("Unable to resolve address '%s' for server '%s'."
" Cause: %s" %
(self.get_host_address(), self.id, e))
return False
###########################################################################
def needs_to_auth(self, dbname):
"""
Determines if the server needs to authenticate to the database.
NOTE: we stopped depending on is_auth() since its only a configuration
and may not be accurate
"""
log_debug("Checking if server '%s' needs to auth on db '%s'...." %
(self.id, dbname))
try:
client = self.get_mongo_client()
db = client.get_database(dbname)
db.collection_names()
result = False
except (RuntimeError,Exception), e:
log_exception(e)
# updated for to handle auth failures from mongodb 3.6
result = "authorized" in str(e) or "there are no users authenticated" in str(e)
log_debug("needs_to_auth check for server '%s' on db '%s' : %s" %
(self.id, dbname, result))
return result
###########################################################################
def get_status(self, admin=False):
status = {}
## check if the server is online
try:
self.new_default_mongo_client()
status['connection'] = True
# grab status summary if it was specified + if i am not an arbiter
if admin:
server_summary = self.get_server_status_summary()
status["serverStatusSummary"] = server_summary
except (RuntimeError, Exception), e:
log_exception(e)
status['connection'] = False
status['error'] = "%s" % e
if "timed out" in status['error'].lower():
status['timedOut'] = True
return status
###########################################################################
def get_server_status_summary(self):
server_status = self.server_status()
connections = server_status['connections']
# remove totalCreated if it exists
if "totalCreated" in connections:
del(connections["totalCreated"])
server_summary = {
"host": server_status['host'],
"connections": connections,
"version": server_status['version']
}
return server_summary
###########################################################################
def server_status(self):
return self.db_command(SON([('serverStatus', 1)]), "admin")
###########################################################################
def get_uptime(self):
server_status = self._server_status_command()
if server_status:
return server_status.get("uptime")
###########################################################################
def _server_status_command(self):
if self.is_online():
return self.db_command(SON([('serverStatus', 1)]), "admin")
###########################################################################
def get_mongo_client(self):
if self._mongo_client is None:
self._mongo_client = self.new_default_mongo_client()
return self._mongo_client
###########################################################################
def new_default_mongo_client(self):
client_params = self.get_client_params()
return self.new_mongo_client(**client_params)
###########################################################################
def new_mongo_client(self, **kwargs):
address = self.get_connection_address()
kwargs = kwargs or {}
if self.connection_timeout_ms:
kwargs["connectTimeoutMS"] = self.connection_timeout_ms
return mongo_utils.mongo_client(address, **kwargs)
###########################################################################
def new_ssl_test_mongo_client(self):
options = {"ssl": True}
return self.new_mongo_client(**options)
###########################################################################
def get_client_params(self):
params = {}
params.update(self.get_client_ssl_params())
return params
###########################################################################
def get_client_ssl_params(self):
use_ssl = False
client_ssl_mode = self.get_client_ssl_mode()
if client_ssl_mode in [None, ClientSslMode.DISABLED]:
use_ssl = False
elif client_ssl_mode == ClientSslMode.REQUIRE:
use_ssl = True
elif client_ssl_mode == ClientSslMode.ALLOW:
try:
# attempt a plain connection
self.new_mongo_client()
use_ssl = False
except Exception, e:
use_ssl = True
else:
## PREFER
try:
# attempt an ssl connection
self.new_ssl_test_mongo_client()
use_ssl = True
except Exception, e:
use_ssl = False
ssl_params = {}
if use_ssl:
ssl_params["ssl"] = True
return ssl_params
###########################################################################
@property
def connection_timeout_ms(self):
return self._connection_timeout_ms
@connection_timeout_ms.setter
def connection_timeout_ms(self, val):
self._connection_timeout_ms = val
###########################################################################
def get_connection_address(self):
if self._connection_address:
return self._connection_address
# try to get the first working connection address
# only use this technique if the server is not assumed locally
if not is_assumed_local_server(self.id):
if (self.is_use_local() and
self.has_connectivity_on(self.get_local_address())):
self._connection_address = self.get_local_address()
elif self.has_connectivity_on(self.get_address()):
self._connection_address = self.get_address()
# use old logic
if not self._connection_address:
if self.is_use_local():
self._connection_address = self.get_local_address()
else:
self._connection_address = self.get_address()
log_verbose("Using connection address '%s' for server '%s'" % (self._connection_address, self.id))
return self._connection_address
###########################################################################
def has_connectivity_on(self, address):
try:
log_verbose("Checking if server '%s' is accessible on "
"address '%s'" % (self.id, address))
mongo_utils.mongo_client(address)
return True
except Exception, e:
log_exception(e)
log_verbose("Check failed for server '%s' is accessible on "
"address '%s': %s" % (self.id, address, e))
return False
###########################################################################
def get_rs_config(self):
rs_conf = None
try:
if self.version_greater_than_3_0():
rs_conf = self.db_command(SON([('replSetGetConfig', 1)]), "admin")["config"]
else:
rs_conf = self.get_db('local')['system.replset'].find_one()
except (Exception,RuntimeError), e:
log_debug("Error whille trying to read rs config from "
"server '%s': %s" % (self.id, e))
log_exception(e)
if type(e) == MongoctlException:
raise e
else:
log_verbose("Cannot get rs config from server '%s'. "
"cause: %s" % (self.id, e))
log_verbose("get_rs_config() for server '%s': Returning: %s" % (self.id, document_pretty_string(rs_conf)))
return rs_conf
###########################################################################
def validate_local_op(self, op):
# If the server has been assumed to be local then skip validation
if is_assumed_local_server(self.id):
log_verbose("Skipping validation of server's '%s' address '%s' to be"
" local because --assume-local is on" %
(self.id, self.get_host_address()))
return
log_verbose("Validating server address: "
"Ensuring that server '%s' address '%s' is local on this "
"machine" % (self.id, self.get_host_address()))
if not self.is_local():
log_verbose("Server address validation failed.")
raise MongoctlException("Cannot %s server '%s' on this machine "
"because server's address '%s' does not appear "
"to be local to this machine. Pass the "
"--assume-local option if you are sure that "
"this server should be running on this "
"machine." % (op,
self.id,
self.get_host_address()))
else:
log_verbose("Server address validation passed. "
"Server '%s' address '%s' is local on this "
"machine !" % (self.id, self.get_host_address()))
###########################################################################
def log_server_activity(self, activity):
if is_logging_activity():
log_record = {"op": activity,
"ts": datetime.datetime.utcnow(),
"serverDoc": self.get_document(),
"server": self.id,
"serverDisplayName": self.get_description()}
log_verbose("Logging server activity \n%s" %
document_pretty_string(log_record))
repository.get_activity_collection().insert(log_record)
###########################################################################
def needs_repl_key(self):
"""
We need a repl key if you are auth + a cluster member +
version is None or >= 2.0.0
"""
cluster = self.get_cluster()
return (self.supports_repl_key() and
cluster is not None and cluster.get_repl_key() is not None)
###########################################################################
def supports_repl_key(self):
"""
We need a repl key if you are auth + a cluster member +
version is None or >= 2.0.0
"""
version = self.get_mongo_version_info()
return (version is None or
version >= make_version_info(REPL_KEY_SUPPORTED_VERSION))
###########################################################################
def get_pid(self):
pid_file_path = self.get_pid_file_path()
if os.path.exists(pid_file_path):
pid_file = open(pid_file_path, 'r')
pid = pid_file.readline().strip('\n')
if pid and pid.isdigit():
return int(pid)
else:
log_warning("Unable to determine pid for server '%s'. "
"Not a valid number in '%s"'' %
(self.id, pid_file_path))
else:
log_warning("Unable to determine pid for server '%s'. "
"pid file '%s' does not exist" %
(self.id, pid_file_path))
return None
###########################################################################
def is_server_pid_alive(self):
pid = self.get_pid()
return pid and is_pid_alive(pid)
###############################################################################
def supports_local_users(self):
version = self.get_mongo_version_info()
return version and version < VERSION_2_6
###############################################################################
def try_on_auth_failures(self):
return self.version_greater_than_3_0()
###############################################################################
def version_greater_than_3_0(self):
version = self.get_mongo_version_info()
return version and version >= VERSION_3_0
###############################################################################
def is_logging_activity():
return (repository.consulting_db_repository() and
config.get_mongoctl_config_val("logServerActivity" , False))
###############################################################################
__assumed_local_servers__ = []
def assume_local_server(server_id):
global __assumed_local_servers__
if server_id not in __assumed_local_servers__:
log_verbose("Assuming server '%s' to be local" % server_id)
__assumed_local_servers__.append(server_id)
###############################################################################
def is_assumed_local_server(server_id):
global __assumed_local_servers__
return server_id in __assumed_local_servers__
###############################################################################
def set_client_ssl_mode(mode):
allowed_modes = [ClientSslMode.DISABLED,
ClientSslMode.ALLOW,
ClientSslMode.REQUIRE,
ClientSslMode.PREFER]
if mode not in allowed_modes:
raise MongoctlException("Invalid ssl mode '%s'. Mush choose from %s" %
(mode, allowed_modes))
global CLIENT_SSL_MODE
CLIENT_SSL_MODE = mode
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/objects/server.py",
"copies": "1",
"size": "41911",
"license": "mit",
"hash": 1707077582523177700,
"line_mean": 37.4504587156,
"line_max": 118,
"alpha_frac": 0.4588055642,
"autogenerated": false,
"ratio": 4.971059186336141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5929864750536141,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import platform
import urllib
import shutil
from subprocess import CalledProcessError
import mongoctl.config as config
from mongoctl.prompt import prompt_execute_task
from mongoctl.mongodb_version import MongoDBEdition
from mongoctl.mongoctl_logging import *
from mongoctl.errors import MongoctlException
from mongoctl.utils import (
download_url, extract_archive, call_command, which, ensure_dir,
validate_openssl, execute_command, list_dir_files, is_exe
)
from mongoctl.mongodb_version import make_version_info, is_valid_version_info
from mongoctl.commands.command_utils import (
find__all_mongo_installations, get_mongo_installation
)
from mongoctl.binary_repo import download_mongodb_binary, get_template_args
###############################################################################
# CONSTS
###############################################################################
LATEST_VERSION_FILE_URL = "https://raw.github.com/mongolab/mongoctl/master/" \
"mongo_latest_stable_version.txt"
###############################################################################
# install command
###############################################################################
def install_command(parsed_options):
install_mongodb(parsed_options.version,
mongodb_edition=parsed_options.edition,
from_source=parsed_options.fromSource,
build_threads=parsed_options.buildThreads,
build_tmp_dir=parsed_options.buildTmpDir,
include_only=parsed_options.includeOnly)
###############################################################################
# uninstall command
###############################################################################
def uninstall_command(parsed_options):
uninstall_mongodb(parsed_options.version, edition=parsed_options.edition)
###############################################################################
# list-versions command
###############################################################################
def list_versions_command(parsed_options):
mongo_installations = find__all_mongo_installations()
bar = "-" * 80
print bar
formatter = "%-20s %-20s %s"
print formatter % ("VERSION", "EDITION", "LOCATION")
print bar
for install_dir,version in mongo_installations:
print formatter % (version.version_number,
version.edition, install_dir)
print "\n"
###############################################################################
# install_mongodb
###############################################################################
def install_mongodb(mongodb_version=None, mongodb_edition=None, from_source=False,
build_threads=1,
build_tmp_dir=None,
include_only=None):
if mongodb_version is None:
mongodb_version = fetch_latest_stable_version()
log_info("Installing latest stable MongoDB version '%s'..." %
mongodb_version)
version_info = make_version_info(mongodb_version, mongodb_edition)
mongo_installation = get_mongo_installation(version_info)
mongodb_edition = version_info.edition
if mongo_installation is not None: # no-op
log_info("You already have MongoDB %s installed ('%s'). "
"Nothing to do." % (version_info, mongo_installation))
return mongo_installation
target_dir = get_install_target_dir(mongodb_version, mongodb_edition)
if os.path.exists(target_dir):
raise MongoctlException("Target directory '%s' already exists" %
target_dir)
if mongodb_edition not in MongoDBEdition.ALL:
raise MongoctlException("Unknown edition '%s'. Please select from %s" %
(mongodb_edition, MongoDBEdition.ALL))
if from_source:
install_from_source(mongodb_version, mongodb_edition,
build_threads=build_threads,
build_tmp_dir=build_tmp_dir)
return
bits = platform.architecture()[0].replace("bit", "")
os_name = platform.system().lower()
if os_name == 'darwin' and platform.mac_ver():
os_name = "osx"
mongodb_installs_dir = config.get_mongodb_installs_dir()
if not mongodb_installs_dir:
raise MongoctlException("No mongoDBInstallationsDirectory configured"
" in mongoctl.config")
# ensure the mongo installs dir
ensure_dir(mongodb_installs_dir)
platform_spec = get_validate_platform_spec(os_name, bits)
log_verbose("INSTALL_MONGODB: OS='%s' , BITS='%s' , VERSION='%s', "
"PLATFORM_SPEC='%s'" % (os_name, bits, version_info,
platform_spec))
# XXX LOOK OUT! Two processes installing same version simultaneously => BAD.
# TODO: mutex to protect the following
try:
## download the url
archive_path = download_mongodb_binary(mongodb_version,
mongodb_edition)
archive_name = os.path.basename(archive_path)
mongo_dir_name = extract_archive(archive_name)
# apply include_only if specified
if include_only:
apply_include_only(mongo_dir_name, include_only)
log_info("Deleting archive %s" % archive_name)
os.remove(archive_name)
target_dir_name = os.path.basename(target_dir)
os.rename(mongo_dir_name, target_dir_name)
# move target to mongodb install dir (Unless target is already there!
# i.e current working dir == mongodb_installs_dir
if os.getcwd() != mongodb_installs_dir:
log_info("Moving extracted folder to %s" % mongodb_installs_dir)
shutil.move(target_dir_name, mongodb_installs_dir)
install_dir = os.path.join(mongodb_installs_dir, mongo_dir_name)
# install validation
validate_mongodb_install(target_dir)
log_info("MongoDB %s installed successfully!" % version_info)
return install_dir
except Exception, e:
log_exception(e)
msg = "Failed to install MongoDB '%s'. Cause: %s" % (version_info, e)
raise MongoctlException(msg)
###############################################################################
# install from source
###############################################################################
def install_from_source(mongodb_version, mongodb_edition, build_threads=None,
build_tmp_dir=None):
"""
:param version:
:param ssl:
:param repo_name: The repo to use to generate archive name
:return:
"""
version_info = make_version_info(mongodb_version, mongodb_edition)
if build_tmp_dir:
ensure_dir(build_tmp_dir)
os.chdir(build_tmp_dir)
allowed_build_editions = [MongoDBEdition.COMMUNITY,
MongoDBEdition.COMMUNITY_SSL]
if mongodb_edition not in allowed_build_editions:
raise MongoctlException("build is only allowed for %s editions" %
allowed_build_editions)
log_info("Installing MongoDB '%s %s' from source" % (mongodb_version,
mongodb_edition))
source_archive_name = "r%s.tar.gz" % mongodb_version
target_dir = get_install_target_dir(mongodb_version, mongodb_edition)
source_url = ("https://github.com/mongodb/mongo/archive/%s" %
source_archive_name)
response = urllib.urlopen(source_url)
if response.getcode() != 200:
msg = ("Unable to find a mongodb release for version '%s' in MongoDB"
" github repo. See https://github.com/mongodb/mongo/releases "
"for possible releases (response code '%s'). " %
(mongodb_version, response.getcode()))
raise MongoctlException(msg)
log_info("Downloading MongoDB '%s' source from github '%s' ..." %
(mongodb_version, source_url))
download_url(source_url)
log_info("Extract source archive ...")
source_dir = extract_archive(source_archive_name)
log_info("Building with scons!")
scons_exe = which("scons")
if not scons_exe:
raise MongoctlException("scons command not found in your path")
scons_cmd = [scons_exe, "core", "tools", "install"]
if build_threads:
scons_cmd.extend(["-j", str(build_threads)])
scons_cmd.append("--prefix=%s" % target_dir)
if mongodb_edition == MongoDBEdition.COMMUNITY_SSL:
validate_openssl()
scons_cmd.append("--ssl")
log_info("Running scons command: %s" % " ".join(scons_cmd))
call_command(scons_cmd, cwd=source_dir)
# cleanup
log_info("Cleanup")
try:
os.remove(source_archive_name)
shutil.rmtree(source_dir)
except Exception, e:
log_error(str(e))
log_exception(e)
# install validation
validate_mongodb_install(target_dir)
log_info("MongoDB %s installed successfully!" % version_info)
###############################################################################
def validate_mongodb_install(install_dir):
log_info("Verifying mongodb installation %s" % install_dir)
mongod_exe = os.path.join(install_dir, "bin", "mongod")
cmd = [mongod_exe, "--version"]
try:
execute_command(cmd)
log_info("Validation passed!")
except CalledProcessError, cpe:
log_exception(cpe)
raise MongoctlException("MongoDB installation failed. Validation command %s failed with error: %s" %
(" ".join(cmd), cpe.output))
###############################################################################
# uninstall_mongodb
###############################################################################
def uninstall_mongodb(version_number, edition=None):
version_info = make_version_info(version_number, edition=edition)
# validate version string
if not is_valid_version_info(version_info):
raise MongoctlException("Invalid version '%s'. Please provide a"
" valid MongoDB version." % version_info)
mongo_installation = get_mongo_installation(version_info)
if mongo_installation is None: # no-op
msg = ("Cannot find a MongoDB installation for version '%s'. Please"
" use list-versions to see all possible versions " %
version_info)
log_info(msg)
return
log_info("Found MongoDB '%s' in '%s'" % (version_info, mongo_installation))
def rm_mongodb():
# make sure that the mongo installation to be removed does not have
# any running processes
ensure_mongo_home_not_used(mongo_installation)
log_info("Deleting '%s'" % mongo_installation)
shutil.rmtree(mongo_installation)
log_info("MongoDB '%s' Uninstalled successfully!" % version_info)
prompt_execute_task("Proceed uninstall?" , rm_mongodb)
###############################################################################
def fetch_latest_stable_version():
response = urllib.urlopen(LATEST_VERSION_FILE_URL)
if response.getcode() == 200:
return response.read().strip()
else:
raise MongoctlException("Unable to fetch MongoDB latest stable version"
" from '%s' (Response code %s)" %
(LATEST_VERSION_FILE_URL, response.getcode()))
###############################################################################
def get_install_target_dir(mongodb_version, mongodb_edition):
template = "mongodb-{platform_spec}-{mongodb_edition}-{mongodb_version}"
args = get_template_args(mongodb_version, mongodb_edition)
dir_name = template.format(**args)
return os.path.join(config.get_mongodb_installs_dir(), dir_name)
###############################################################################
def get_validate_platform_spec(os_name, bits):
if os_name not in ["linux", "osx", "win32", "sunos5"]:
raise MongoctlException("Unsupported OS %s" % os_name)
if bits == "64":
return "%s-x86_64" % os_name
else:
if os_name == "linux":
return "linux-i686"
elif os_name in ["osx" , "win32"]:
return "%s-i386" % os_name
elif os_name == "sunos5":
return "i86pc"
###############################################################################
def ensure_mongo_home_not_used(mongo_installation):
output = execute_command([
"ps",
"-eaf"
])
if mongo_installation in output:
msg = ("ERROR: Cannot uninstall '%s' because its currently being used. "
"Please terminate all running processes then try again." %
mongo_installation)
raise MongoctlException(msg)
###############################################################################
def apply_include_only(mongo_dir_name, include_only):
"""
:param mongo_dir_name:
:param include_only: list of exes names to include only
:return:
"""
log_info("Keep include-only files (%s) from new mongo installation..." %
include_only)
bin_dir = os.path.join(mongo_dir_name, "bin")
exes = list_dir_files(bin_dir)
for exe_name in exes:
# we always keep mongod because it used to determine mongo
# installation version
if exe_name == "mongod":
continue
exe_path = os.path.join(bin_dir, exe_name)
if is_exe(exe_path):
if exe_name not in include_only:
log_info("Removing %s" % exe_name)
os.remove(exe_path)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/misc/install.py",
"copies": "1",
"size": "13808",
"license": "mit",
"hash": 7503289784046250000,
"line_mean": 35.3368421053,
"line_max": 108,
"alpha_frac": 0.5523609502,
"autogenerated": false,
"ratio": 4.297541238717709,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349902188917709,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import pymongo
import pymongo.uri_parser
import pymongo.errors
import mongoctl_logging
from pymo import mongo_client as _mongo_client
###############################################################################
# db connection timeout, 10 seconds
CONN_TIMEOUT_MS = 10000
###############################################################################
def mongo_client(*args, **kwargs):
"""
wrapper around mongo client
:param args:
:param kwargs:
:return:
"""
kwargs = kwargs or {}
connection_timeout_ms = kwargs.get("connectTimeoutMS") or CONN_TIMEOUT_MS
kwargs.update({
"socketTimeoutMS": connection_timeout_ms,
"connectTimeoutMS": connection_timeout_ms,
"maxPoolSize": 1
})
if pymongo.get_version_string().startswith("3.2"):
if kwargs and kwargs.get("serverSelectionTimeoutMS") is None:
kwargs["connect"] = True
kwargs["serverSelectionTimeoutMS"] = connection_timeout_ms
return _mongo_client(*args, **kwargs)
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/mongo_utils.py",
"copies": "1",
"size": "1051",
"license": "mit",
"hash": 3581037885767684000,
"line_mean": 24.6341463415,
"line_max": 79,
"alpha_frac": 0.5613701237,
"autogenerated": false,
"ratio": 4.530172413793103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02508710801393728,
"num_lines": 41
} |
__author__ = 'abdul'
import repository
from mongoctl_logging import log_info, log_verbose, log_warning, log_exception, log_error
from pymongo.errors import OperationFailure, AutoReconnect
from errors import MongoctlException, is_auth_error
from prompt import read_password
###############################################################################
__global_login_user__ = {
"serverId": None,
"database": "admin",
"username": None,
"password": None
}
###############################################################################
def parse_global_login_user_arg(username, password, server_id):
# if -u or --username was not specified then nothing to do
if not username:
return
global __global_login_user__
__global_login_user__['serverId'] = server_id
__global_login_user__['username'] = username
__global_login_user__['password'] = password
if username == "__system":
__global_login_user__["database"] = "local"
###############################################################################
def get_global_login_user(server, dbname):
global __global_login_user__
# all server or exact server + db match
if ((not __global_login_user__["serverId"] or
__global_login_user__["serverId"] == server.id) and
__global_login_user__["username"] and
__global_login_user__["database"] == dbname):
return __global_login_user__
# same cluster members and DB is not 'local'?
if (__global_login_user__["serverId"] and
__global_login_user__["database"] == dbname and
dbname != "local"):
global_login_server = repository.lookup_server(__global_login_user__["serverId"])
global_login_cluster = global_login_server.get_replicaset_cluster()
cluster = server.get_replicaset_cluster()
if (global_login_cluster and cluster and
global_login_cluster.id == cluster.id):
return __global_login_user__
###############################################################################
def setup_server_users(server):
"""
Seeds all users returned by get_seed_users() IF there are no users seed yet
i.e. system.users collection is empty
"""
"""if not should_seed_users(server):
log_verbose("Not seeding users for server '%s'" % server.id)
return"""
log_info("Checking if there are any users that need to be added for "
"server '%s'..." % server.id)
seed_users = server.get_seed_users()
count_new_users = 0
# Note: If server member of a replica then don't setup admin
# users because primary server will do that at replinit
# Now create admin ones
if not server.is_slave():
count_new_users += setup_server_admin_users(server)
for dbname, db_seed_users in seed_users.items():
# create the admin ones last so we won't have an auth issue
if dbname in ["admin", "local"]:
continue
count_new_users += setup_server_db_users(server, dbname, db_seed_users)
if count_new_users > 0:
log_info("Added %s users." % count_new_users)
else:
log_verbose("Did not add any new users.")
###############################################################################
def setup_cluster_users(cluster, primary_server):
log_verbose("Setting up cluster '%s' users using primary server '%s'" %
(cluster.id, primary_server.id))
return setup_server_users(primary_server)
###############################################################################
def should_seed_db_users(server, dbname):
log_verbose("See if we should seed users for database '%s'" % dbname)
return not list_database_users(server, server.get_db(dbname))
###############################################################################
def setup_db_users(server, db, db_users):
count_new_users = 0
for user in db_users :
username = user['username']
log_verbose("adding user '%s' to db '%s'" % (username, db.name))
password = user.get('password')
if not password:
password = read_seed_password(db.name, username)
_mongo_add_user(server, db, username, password)
# if there is no login user for this db then set it to this new one
db_login_user = server.get_login_user(db.name)
if not db_login_user:
server.set_login_user(db.name, username, password)
# inc new users
count_new_users += 1
return count_new_users
###############################################################################
def _mongo_add_user(server, db, username, password, read_only=False,
num_tries=1):
try:
kwargs = {}
if server.is_config_server():
# majority is the only valid write concern when writing to config server replica sets
kwargs["writeConcern"] = {"w": "majority"}
# TODO this method is deprecated and will be removed in 4.0. So we should change this when we upgrade to 4.0
db.add_user(username, password, read_only, **kwargs)
except OperationFailure, ofe:
# This is a workaround for PYTHON-407. i.e. catching a harmless
# error that is raised after adding the first
if "login" in str(ofe):
pass
else:
raise
except AutoReconnect, ar:
log_exception(ar)
if num_tries < 3:
log_warning("_mongo_add_user: Caught a AutoReconnect error. %s " %
ar)
# check if the user/pass was saved successfully
if db.authenticate(username, password):
log_info("_mongo_add_user: user was added successfully. "
"no need to retry")
else:
log_warning("_mongo_add_user: re-trying ...")
_mongo_add_user(server, db, username, password,
read_only=read_only, num_tries=num_tries+1)
else:
raise
###############################################################################
def list_database_users(server, db):
if server.supports_local_users():
result = db['system.users'].find()
else:
result = server.get_db("admin")["system.users"].find({
"db": db.name
})
return list(result)
###############################################################################
def setup_server_db_users(server, dbname, db_users):
log_verbose("Checking if there are any users that needs to be added for "
"database '%s'..." % dbname)
if not should_seed_db_users(server, dbname):
log_verbose("Not seeding users for database '%s'" % dbname)
return 0
db = server.get_db(dbname)
try:
any_new_user_added = setup_db_users(server, db, db_users)
if not any_new_user_added:
log_verbose("No new users added for database '%s'" % dbname)
return any_new_user_added
except Exception, e:
log_exception(e)
raise MongoctlException(
"Error while setting up users for '%s'" \
" database on server '%s'."
"\n Cause: %s" % (dbname, server.id, e))
###############################################################################
def prepend_global_admin_user(other_users, server):
"""
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
"""
cred0 = get_global_login_user(server, "admin")
if cred0 and cred0["username"] and cred0["password"]:
log_verbose("Seeding : CRED0 to the front of the line!")
return [cred0] + other_users if other_users else [cred0]
else:
return other_users
###############################################################################
def setup_server_admin_users(server):
admin_users = server.get_admin_users()
if server.is_auth():
admin_users = prepend_global_admin_user(admin_users, server)
if admin_users is None or len(admin_users) < 1:
log_verbose("No users configured for admin DB...")
return 0
root_user_added = setup_root_admin_user(server, admin_users)
if not root_user_added:
log_verbose("Not seeding users for database 'admin'")
return 0
log_verbose("Checking setup for other admin users...")
count_new_users = 1
try:
admin_db = server.get_db("admin")
count_new_users += setup_db_users(server, admin_db, admin_users[1:])
return count_new_users
except Exception, e:
log_exception(e)
raise MongoctlException(
"Error while setting up admin users on server '%s'."
"\n Cause: %s" % (server.id, e))
###############################################################################
def setup_root_admin_user(server, admin_users):
log_info("Setting up root admin user...")
if not admin_users:
log_info("No admin users passed/configured. NOOP")
return
admin_user = server.get_login_user("admin") or admin_users[0]
admin_db = server.get_db("admin", no_auth=True)
# try to authenticate with the admin user to see if it is already setup
try:
success = admin_db.authenticate(admin_user["username"], admin_user["password"])
except OperationFailure, of:
success = False
if success:
log_info("root admin user already added. NOOP")
return
try:
log_info("Adding root admin user")
_mongo_add_user(server, admin_db, admin_user["username"], admin_user["password"])
# if there is no login user for this db then set it to this new one
db_login_user = server.get_login_user("admin")
if not db_login_user:
server.set_login_user("admin", admin_user["username"], admin_user["password"])
return True
except Exception, ex:
log_exception(ex)
if is_auth_error(ex):
log_error("Failed to add root admin user: %s" % ex)
# attempt to recover by prompting for proper admin credz
server.get_db("admin")
return False
else:
raise MongoctlException("Failed to add root admin user: %s" % ex)
###############################################################################
def setup_server_local_users(server):
seed_local_users = False
try:
local_db = server.get_db("local", retry=False)
if not local_db['system.users'].find_one():
seed_local_users = True
except Exception, e:
log_exception(e)
pass
if not seed_local_users:
log_verbose("Not seeding users for database 'local'")
return 0
try:
local_users = server.get_db_seed_users("local")
if server.is_auth():
local_users = prepend_global_admin_user(local_users, server)
if local_users:
return setup_db_users(server, local_db, local_users)
else:
return 0
except Exception, e:
log_exception(e)
raise MongoctlException(
"Error while setting up local users on server '%s'."
"\n Cause: %s" % (server.id, e))
###############################################################################
def read_seed_password(dbname, username):
return read_password("Please create a password for user '%s' in DB '%s'" %
(username, dbname))
###############################################################################
# Login users
###############################################################################
# Global variable to hold logins for servers/clusters
LOGIN_USERS = {}
###############################################################################
def set_server_login_user(server, dbname, username, password):
login_user = {
"username": username,
"password": password
}
login_record = _get_server_login_record(server)
login_record[dbname] = login_user
###############################################################################
def get_server_login_user(server, dbname):
login_record = _get_server_login_record(server)
if login_record and dbname in login_record:
return login_record[dbname]
###############################################################################
def _get_server_login_record(server, create_new=True):
cluster = server.get_cluster()
if cluster is not None:
key = cluster.id
else:
key = server.id
login_record = LOGIN_USERS.get(key)
if not login_record and create_new:
login_record = {}
LOGIN_USERS[key] = login_record
return login_record
###############################################################################
def is_system_user(username):
return username == "__system" | {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/users.py",
"copies": "1",
"size": "12981",
"license": "mit",
"hash": -4402335543446741500,
"line_mean": 35.776203966,
"line_max": 116,
"alpha_frac": 0.5311609275,
"autogenerated": false,
"ratio": 4.318363273453094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016850710628821524,
"num_lines": 353
} |
__author__ = 'abdul'
import repository
from mongoctl_logging import log_info, log_verbose, log_warning, log_exception
from pymongo.errors import OperationFailure, AutoReconnect
from errors import MongoctlException
from prompt import read_password
import pymongo.auth
import mongo_version
###############################################################################
__global_login_user__ = {
"serverId": None,
"database": "admin",
"username": None,
"password": None
}
###############################################################################
def parse_global_login_user_arg(username, password, server_id):
# if -u or --username was not specified then nothing to do
if not username:
return
global __global_login_user__
__global_login_user__['serverId'] = server_id
__global_login_user__['username'] = username
__global_login_user__['password'] = password
###############################################################################
def get_global_login_user(server, dbname):
global __global_login_user__
# all server or exact server + db match
if ((not __global_login_user__["serverId"] or
__global_login_user__["serverId"] == server.id) and
__global_login_user__["username"] and
__global_login_user__["database"] == dbname):
return __global_login_user__
# same cluster members and DB is not 'local'?
if (__global_login_user__["serverId"] and
__global_login_user__["database"] == dbname and
dbname != "local"):
global_login_server = repository.lookup_server(__global_login_user__["serverId"])
global_login_cluster = global_login_server.get_replicaset_cluster()
cluster = server.get_replicaset_cluster()
if (global_login_cluster and cluster and
global_login_cluster.id == cluster.id):
return __global_login_user__
###############################################################################
def setup_server_users(server):
"""
Seeds all users returned by get_seed_users() IF there are no users seed yet
i.e. system.users collection is empty
"""
"""if not should_seed_users(server):
log_verbose("Not seeding users for server '%s'" % server.id)
return"""
log_info("Checking if there are any users that need to be added for "
"server '%s'..." % server.id)
seed_users = server.get_seed_users()
count_new_users = 0
for dbname, db_seed_users in seed_users.items():
# create the admin ones last so we won't have an auth issue
if (dbname == "admin"):
continue
count_new_users += setup_server_db_users(server, dbname, db_seed_users)
# Note: If server member of a replica then don't setup admin
# users because primary server will do that at replinit
# Now create admin ones
if not server.is_slave():
count_new_users += setup_server_admin_users(server)
if count_new_users > 0:
log_info("Added %s users." % count_new_users)
else:
log_verbose("Did not add any new users.")
###############################################################################
def setup_cluster_users(cluster, primary_server):
log_verbose("Setting up cluster '%s' users using primary server '%s'" %
(cluster.id, primary_server.id))
return setup_server_users(primary_server)
###############################################################################
def should_seed_users(server):
log_verbose("See if we should seed users for server '%s'" %
server.id)
try:
connection = server.get_db_connection()
dbnames = connection.database_names()
for dbname in dbnames:
if connection[dbname]['system.users'].find_one():
return False
return True
except Exception, e:
log_exception(e)
return False
###############################################################################
def should_seed_db_users(server, dbname):
log_verbose("See if we should seed users for database '%s'" % dbname)
try:
connection = server.get_db_connection()
if connection[dbname]['system.users'].find_one():
return False
else:
return True
except Exception, e:
log_exception(e)
return False
###############################################################################
def setup_db_users(server, db, db_users):
count_new_users = 0
for user in db_users :
username = user['username']
log_verbose("adding user '%s' to db '%s'" % (username, db.name))
password = user.get('password')
if not password:
password = read_seed_password(db.name, username)
_mongo_add_user(server, db, username, password)
# if there is no login user for this db then set it to this new one
db_login_user = server.get_login_user(db.name)
if not db_login_user:
server.set_login_user(db.name, username, password)
# inc new users
count_new_users += 1
return count_new_users
###############################################################################
DEV_2_6_VERSION = mongo_version.version_obj("2.5.3")
###############################################################################
def _mongo_add_user(server, db, username, password, read_only=False,
num_tries=1):
try:
kwargs = {}
version = server.get_mongo_version_obj()
if version and version >= DEV_2_6_VERSION:
kwargs = _make_2_6_dev_add_user_kwargs(db, username, password)
db.add_user(username, password, read_only, **kwargs)
except OperationFailure, ofe:
# This is a workaround for PYTHON-407. i.e. catching a harmless
# error that is raised after adding the first
if "login" in str(ofe):
pass
else:
raise
except AutoReconnect, ar:
log_exception(ar)
if num_tries < 3:
log_warning("_mongo_add_user: Caught a AutoReconnect error. %s " %
ar)
# check if the user/pass was saved successfully
if db.authenticate(username, password):
log_info("_mongo_add_user: user was added successfully. "
"no need to retry")
else:
log_warning("_mongo_add_user: re-trying ...")
_mongo_add_user(server, db, username, password,
read_only=read_only, num_tries=num_tries+1)
else:
raise
###############################################################################
def _make_2_6_dev_add_user_kwargs(db, username, password):
pwd_hash = pymongo.auth._password_digest(username, password)
return {
"db": db.name,
"roles": [
{
"role": "root",
"db": db.name,
"hasRole": True,
"canDelegate": False
}
],
"credentials": {
"MONGODB-CR": pwd_hash
}
}
###############################################################################
def setup_server_db_users(server, dbname, db_users):
log_verbose("Checking if there are any users that needs to be added for "
"database '%s'..." % dbname)
if not should_seed_db_users(server, dbname):
log_verbose("Not seeding users for database '%s'" % dbname)
return 0
db = server.get_db(dbname)
try:
any_new_user_added = setup_db_users(server, db, db_users)
if not any_new_user_added:
log_verbose("No new users added for database '%s'" % dbname)
return any_new_user_added
except Exception, e:
log_exception(e)
raise MongoctlException(
"Error while setting up users for '%s'" \
" database on server '%s'."
"\n Cause: %s" % (dbname, server.id, e))
###############################################################################
def prepend_global_admin_user(other_users, server):
"""
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
"""
cred0 = get_global_login_user(server, "admin")
if cred0 and cred0["username"] and cred0["password"]:
log_verbose("Seeding : CRED0 to the front of the line!")
return [cred0] + other_users if other_users else [cred0]
else:
return other_users
###############################################################################
def setup_server_admin_users(server):
if not should_seed_db_users(server, "admin"):
log_verbose("Not seeding users for database 'admin'")
return 0
admin_users = server.get_admin_users()
if server.is_auth():
admin_users = prepend_global_admin_user(admin_users, server)
if (admin_users is None or len(admin_users) < 1):
log_verbose("No users configured for admin DB...")
return 0
log_verbose("Checking setup for admin users...")
count_new_users = 0
try:
admin_db = server.get_db("admin")
# potentially create the 1st admin user
count_new_users += setup_db_users(server, admin_db, admin_users[0:1])
# the 1st-time init case:
# BEFORE adding 1st admin user, auth. is not possible --
# only localhost cxn gets a magic pass.
# AFTER adding 1st admin user, authentication is required;
# so, to be sure we now have authenticated cxn, re-pull admin db:
admin_db = server.get_db("admin")
# create the rest of the users
count_new_users += setup_db_users(server, admin_db, admin_users[1:])
return count_new_users
except Exception, e:
log_exception(e)
raise MongoctlException(
"Error while setting up admin users on server '%s'."
"\n Cause: %s" % (server.id, e))
###############################################################################
def setup_server_local_users(server):
seed_local_users = False
try:
local_db = server.get_db("local", retry=False)
if not local_db['system.users'].find_one():
seed_local_users = True
except Exception, e:
log_exception(e)
pass
if not seed_local_users:
log_verbose("Not seeding users for database 'local'")
return 0
try:
local_users = server.get_db_seed_users("local")
if server.is_auth():
local_users = prepend_global_admin_user(local_users, server)
if local_users:
return setup_db_users(server, local_db, local_users)
else:
return 0
except Exception, e:
log_exception(e)
raise MongoctlException(
"Error while setting up local users on server '%s'."
"\n Cause: %s" % (server.id, e))
###############################################################################
def read_seed_password(dbname, username):
return read_password("Please create a password for user '%s' in DB '%s'" %
(username, dbname))
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/users.py",
"copies": "2",
"size": "11344",
"license": "mit",
"hash": 680903846841352800,
"line_mean": 35.5935483871,
"line_max": 89,
"alpha_frac": 0.5269746121,
"autogenerated": false,
"ratio": 4.280754716981132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013819074120918796,
"num_lines": 310
} |
__author__ = 'abdul'
import sys
import getpass
from errors import MongoctlException
###############################################################################
# Global flags and their functions
###############################################################################
__interactive_mode__ = True
def set_interactive_mode(value):
global __interactive_mode__
__interactive_mode__ = value
###############################################################################
def is_interactive_mode():
global __interactive_mode__
return __interactive_mode__
###############################################################################
__say_yes_to_everything__ = False
__say_no_to_everything__ = False
###############################################################################
def say_yes_to_everything():
global __say_yes_to_everything__
__say_yes_to_everything__ = True
###############################################################################
def is_say_yes_to_everything():
global __say_yes_to_everything__
return __say_yes_to_everything__
###############################################################################
def say_no_to_everything():
global __say_no_to_everything__
__say_no_to_everything__ = True
###############################################################################
def is_say_no_to_everything():
global __say_no_to_everything__
return __say_no_to_everything__
###############################################################################
def is_interactive_mode():
global __interactive_mode__
return __interactive_mode__
###############################################################################
def read_input(message):
# If we are running in a noninteractive mode then fail
if not is_interactive_mode():
msg = ("Error while trying to prompt you for '%s'. Prompting is not "
"allowed when running with --noninteractive mode. Please pass"
" enough arguments to bypass prompting or run without "
"--noninteractive" % message)
raise MongoctlException(msg)
print >> sys.stderr, message,
return raw_input()
###############################################################################
def read_username(dbname):
# If we are running in a noninteractive mode then fail
if not is_interactive_mode():
msg = ("mongoctl needs username in order to proceed. Please pass the"
" username using the -u option or run without --noninteractive")
raise MongoctlException(msg)
return read_input("Enter username for database '%s': " % dbname)
###############################################################################
def read_password(message=''):
if not is_interactive_mode():
msg = ("mongoctl needs password in order to proceed. Please pass the"
" password using the -p option or run without --noninteractive")
raise MongoctlException(msg)
print >> sys.stderr, message
return getpass.getpass()
###############################################################################
def prompt_execute_task(message, task_function):
yes = prompt_confirm(message)
if yes:
return (True,task_function())
else:
return (False,None)
###############################################################################
def prompt_confirm(message):
# return False if noninteractive or --no was specified
if (not is_interactive_mode() or
is_say_no_to_everything()):
return False
# return True if --yes was specified
if is_say_yes_to_everything():
return True
valid_choices = {"yes":True,
"y":True,
"ye":True,
"no":False,
"n":False}
while True:
print >> sys.stderr, message + " [y/n] ",
sys.stderr.flush()
choice = raw_input().lower()
if not valid_choices.has_key(choice):
print >> sys.stderr, ("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
elif valid_choices[choice]:
return True
else:
return False
| {
"repo_name": "richardxx/mongoctl-service",
"path": "mongoctl/prompt.py",
"copies": "3",
"size": "4256",
"license": "mit",
"hash": -6148542557409588000,
"line_mean": 34.173553719,
"line_max": 79,
"alpha_frac": 0.4494830827,
"autogenerated": false,
"ratio": 5.097005988023952,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7046489070723951,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import sys
import os
import traceback
import logging
import utils
import mongoctl_globals
from logging.handlers import TimedRotatingFileHandler
###############################################################################
LOG_DIR = "logs"
logger = None
# logger settings
_log_to_stdout = True
_logging_level = logging.INFO
VERBOSE = 15
logging.addLevelName(VERBOSE, "VERBOSE")
###############################################################################
def get_logger():
global logger, _logging_level
if logger:
return logger
logger = logging.getLogger("MongoctlLogger")
log_file_name="mongoctl.log"
conf_dir = mongoctl_globals.DEFAULT_CONF_ROOT
log_dir = utils.resolve_path(os.path.join(conf_dir, LOG_DIR))
utils.ensure_dir(log_dir)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname)8s | %(asctime)s | %(message)s")
logfile = os.path.join(log_dir, log_file_name)
fh = TimedRotatingFileHandler(logfile, backupCount=50, when="midnight")
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
# add the handler to the root logger
logging.getLogger().addHandler(fh)
global _log_to_stdout
if _log_to_stdout:
sh = logging.StreamHandler(sys.stdout)
std_formatter = logging.Formatter("%(message)s")
sh.setFormatter(std_formatter)
sh.setLevel(_logging_level)
logging.getLogger().addHandler(sh)
return logger
###############################################################################
def setup_logging(log_level=logging.INFO, log_to_stdout=True):
global _log_to_stdout, _logging_level
_log_to_stdout = log_to_stdout
_logging_level = log_level
###############################################################################
def turn_logging_verbose_on():
global _logging_level
_logging_level = VERBOSE
###############################################################################
def log_info(msg):
get_logger().info(msg)
###############################################################################
def log_error(msg):
get_logger().error(msg)
###############################################################################
def log_warning(msg):
get_logger().warning(msg)
###############################################################################
def log_verbose(msg):
get_logger().log(VERBOSE, msg)
###############################################################################
def log_debug(msg):
get_logger().debug(msg)
###############################################################################
def log_exception(exception):
log_debug("EXCEPTION: %s" % exception)
log_debug(traceback.format_exc())
###############################################################################
def stdout_log(msg):
print msg
###############################################################################
def log_db_command(cmd):
log_info( "Executing db command %s" % utils.document_pretty_string(cmd))
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/mongoctl_logging.py",
"copies": "1",
"size": "3054",
"license": "mit",
"hash": 3163634021946838000,
"line_mean": 28.3653846154,
"line_max": 79,
"alpha_frac": 0.4711853307,
"autogenerated": false,
"ratio": 4.684049079754601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5655234410454602,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import sys
import types
import inspect
from time import sleep, time as now
################################################################################
# Dealing with explosions - the Context Manager way
################################################################################
class robust_execution_context(object):
def __init__(self, handle_it=None, abort_phrase="aborting", logger=None):
self._handle_it = (lambda: 42) if handle_it is None else handle_it
self._logger = _logger_for_sure(logger)
self._abort_phrase = abort_phrase
self._abort_banner = _banner("WHOOPS!", abort_phrase, "& rolling back!")
def __enter__(self):
def handle_it_n_rollback():
self._handle_it()
self.rollback()
#set things up
self._rollback_hook = Hook(lifo=True)
self._cleanup_hook = Hook()
(self._begin_to, self._abort) = make_abort_handler(handle_it_n_rollback,
self._abort_banner,
self._logger,
swallow=True)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
#tear things down
if exc_type is None:
self.cleanup()
else:
self.abort(exc_type, exc_value, exc_traceback, swallow=True)
return False # i.e., will propagate error/exception
def push_rollback_step(self, f):
self._rollback_hook.add(f)
def clear_rollback_steps(self):
self._rollback_hook.clear()
def push_cleanup_step(self, f):
self._cleanup_hook.add(f)
def push_unwind_step(self, f):
"""Pushes f so as to execute during either normal OR abortive exit."""
self.push_rollback_step(f)
self.push_cleanup_step(f)
def begin_to(self, phase, morespew=""):
self._begin_to(phase, morespew)
def abort(self, exc_type, exc_value, exc_traceback, swallow=False):
self._abort(exc_value, swallow=swallow)
def rollback(self):
self._rollback_hook.run()
self.clear_rollback_steps()
def cleanup(self):
self._cleanup_hook.run()
def _banner(*msg_parts):
banner_bracket = " *** !!! *** !!! *** !!! *** "
return banner_bracket + " ".join(msg_parts) + banner_bracket
################################################################################
# Dealing with explosions - the Functional way
################################################################################
def reraise(new_exc):
(et, ev, etb) = sys.exc_info()
raise new_exc, None, etb
def make_abort_handler(handle_it, abort_phrase="aborting", logger=None,
swallow=False):
_phase = { 'ima' : "get it done" } # so you call this a closure...
logger = _logger_for_sure(logger)
def begin_to(phase, morespew=""):
_phase['ima'] = phase
logger.info("Setting about to %s %s..." % (phase, morespew))
def abort(err=None, swallow=swallow):
(et, ev, etb) = sys.exc_info()
whats_happening = ("%s after failing to %s" %
(abort_phrase, _phase['ima']))
logger.error("%s : %s" % (whats_happening, err or "Ow."))
logger.info(_banner(" handler execution commencing "))
try:
handle_it()
except Exception, e:
logger.error("Exception while %s : %s" % (whats_happening, e))
reraise(e)
except:
logger.error("Unspecified error while %s - gosh!" % whats_happening)
raise
finally:
logger.info(_banner(" handler execution concluded "))
# Successfully handled original issue; now resume aborting
if err is None:
if not swallow:
raise
else:
if not swallow:
raise err, None, etb
return (begin_to, abort)
class Hook(object):
"""
A Hook is a place to hang sequences of functions you'd like to run
potentially at some future time.
Order is guaranteed; `lifo` arg to constructor says it's reversed.
"""
def __init__(self, lifo=False):
self.hook_functions = []
self.lifo = lifo
def clear(self):
self.hook_functions = []
def add(self, hook_fcn):
if hook_fcn:
self.hook_functions.append(hook_fcn)
def run(self):
stuff_to_do = (self.hook_functions[::-1] if self.lifo else
self.hook_functions)
return [hfcn() for hfcn in stuff_to_do]
def _logger_for_sure(a_logger_maybe):
if a_logger_maybe is not None:
almbrs = dict(inspect.getmembers(a_logger_maybe))
if all(map(lambda n: almbrs.has_key(n) and inspect.isroutine(almbrs[n]),
['error', 'warn', 'info', 'debug'])):
return a_logger_maybe
return NoOpLogger()
class NoOpLogger(object):
def error(*_, **__):
pass
def warn(*_, **__):
pass
def info(*_, **__):
pass
def debug(*_, **__):
pass
################################################################################
# Living with properties (are they attributes? items? "pythonic"?)
################################################################################
def getprop(x, prop_name, val_if_missing=None):
"""
Returns the given property of its first argument.
"""
x_has_prop = x and hasattr(x, "has_key") and x.has_key(prop_name)
return x[prop_name] if x_has_prop else val_if_missing
def safe_getprop(x, prop_name, val_if_missing=None, val_if_error=None):
try:
return getprop(x, prop_name, val_if_missing)
except Exception, e:
return val_if_missing if val_if_error is None else val_if_error
def get_dict_prop(x, dict_name, prop_name_within_dict, val_if_missing=None):
"""
If dict_name is a property of x whose value is a dict,
returns the indicated property value from within that dict.
Otherwise, val_if_missing.
"""
the_dict = getprop(x, dict_name)
if the_dict is not None and prop_name_within_dict in the_dict:
return the_dict[prop_name_within_dict]
else:
return val_if_missing
def getprop_chain(orig_x, *prop_seq, **kwargs):
"""
Follow the given chain of properties, starting from orig_x.
Example:
>>> x = { 'a' : { 'b': { 'c' : 'surprise!' } } }
>>> getprop_chain(x, 'a', 'b')
{'c': 'surprise!'}
Optional keyword arguments:
val_if_missing : value to return if one of the properties is missing
safe : swallow exceptions -- e.g.,
>>> getprop_chain(6, 'a', safe=True) # ==> None
short_circuit : return val_if_missing immediately upon encountering
first missing property. otherwise, will
try to retrieve the next property from that value.
"""
return getprop_sequence(orig_x, prop_seq, **kwargs)
def getprop_sequence(orig_x, prop_seq,
val_if_missing=None, safe=False, short_circuit=True):
"""
Exactly like getprop_chain, but property chain is a single list argument.
Example:
>>> getprop_sequence(x, ['a', 'b'])
{'c': 'surprise!'}
"""
x = orig_x
der_proppengetten = safe_getprop if safe else getprop
__SENTINEL__ = object() # guaranteed unique!
for prop in prop_seq:
x = der_proppengetten(x, prop, val_if_missing=__SENTINEL__)
if x == __SENTINEL__ :
x = val_if_missing
if short_circuit:
break
return x
def getprop_star(orig_x, prop_path,
val_if_missing=None, safe=False, short_circuit=True):
"""
Follow the "property path", starting from orig_x.
Example:
>>> x = { 'a' : { 'b': { 'c' : 'surprise!' } } }
>>> getprop_star(x, 'a.b')
{'c': 'surprise!'}
"""
return getprop_sequence(orig_x, prop_path.split('.'),
val_if_missing=val_if_missing,
safe=safe, short_circuit=short_circuit)
def setprop_star(orig_x, prop_path, new_val):
prop_path_components = prop_path.split('.')
x = orig_x
for prop in prop_path_components[:-1] :
last_x = x
x = getprop(x, prop)
if x is None:
last_x[prop] = x = {}
x[ prop_path_components[-1] ] = new_val
return orig_x
def make_property_overlayer(props_n_vals, propval_callback=None, ignore_nones=False):
def overlay_properties(substrate_obj):
for k,v in props_n_vals.iteritems():
old_val = getprop_star(substrate_obj, k)
if v is not None or not ignore_nones:
do_if_doable(propval_callback, k, old_val, v)
setprop_star(substrate_obj, k, v)
return substrate_obj
return overlay_properties
DEFAULT_HERITAGE_PATH = {'object': 'parent'}
def get_inheritable_prop(obj, prop_name, heritage_path=None, no_value=None):
"""
Returns O[prop_name], where O == the 'obj' argument or its nearest ancestor.
"Nearest ancestor" is defined as a walk up the object graph along some set
of object properties, which set is specified by 'heritage_path'.
By default, each object points to its parent via the property 'parent'.
"""
heritage_path = heritage_path or DEFAULT_HERITAGE_PATH
if obj is None:
raise Exception("Cannot get properties from None object!")
elif obj.has_key(prop_name):
return obj[prop_name]
else:
no_value = ((lambda v: v is None) if no_value is None else no_value)
for iama in obj.__class__.__mro__:
iama_type_fqn = (("" if iama.__module__ == '__builtin__' else
(iama.__module__ + "."))
+
iama.__name__)
# HERITAGE_PATH keys may be <type 'type'> objects or fully qualified class names
if heritage_path.has_key(iama):
inherit_via = heritage_path[iama]
elif heritage_path.has_key(iama_type_fqn):
inherit_via = heritage_path[iama_type_fqn]
else:
continue
# HERITAGE_PATH values may be functions to apply to obj, or attribute names within obj
if inspect.isroutine(inherit_via):
parent_maybe = inherit_via(obj)
elif isinstance(inherit_via, basestring):
parent_maybe = obj[inherit_via] if obj.has_key(inherit_via) else None
else:
raise Exception("Don't understand how to inherit from a %s via %s" %
(iama_type_fqn, inherit_via))
if parent_maybe is None or no_value(parent_maybe):
continue
else:
return get_inheritable_prop(parent_maybe, prop_name, heritage_path)
return None # live to recurse another day.
# yoinked from :
# http://stackoverflow.com/questions/3012421/python-lazy-property-decorator
def lazyprop(fn):
attr_name = '_lazy_' + fn.__name__
@property
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazyprop
################################################################################
# A dash of higher-order functional programming
################################################################################
def make_getprop(prop_name, val_if_missing=None):
"""
Returns a procedure that returns the given property of its argument.
"""
return lambda x: getprop(x, prop_name, val_if_missing)
def prop_equals(prop_name, val):
"""
Returns a procedure that returns True iff its argument's value for the given property is VAL
"""
return lambda x: prop_name in x and x[prop_name] == val
def is_equal(y):
return lambda x: x == y
def find(f, seq, exc_msg=None):
"""
Return first item in sequence for which f(item) tests True.
"""
result_ish = or_map(lambda x: f(x) and [x], seq, exc_msg=exc_msg)
return result_ish[0] if result_ish else None
def or_map(f, seq, exc_msg=None):
"""
Return first result f(item) in sequence that tests True;
else returns the last result f(item)
"""
maybe_result = None
for item in seq:
maybe_result = f(item)
if maybe_result:
return maybe_result
if exc_msg:
raise Exception(exc_msg)
else:
return maybe_result
def partition(has_it, items):
"""
Returns a 2-tuple containing the list of items satisfying
and the list of items not satisfying the predicate has_it.
"""
haves = []
have_nots = []
for i in items:
if has_it(i):
haves.append(i)
else:
have_nots.append(i)
return ( haves, have_nots )
#################### dict vs. tuple #####################################
##
## Transform simple dict objects into something hashable, and back again.
##
## Not indicated for use with dicts having complex (unhashable) values.
##
def dict_to_tuple(sd):
"""
>>> dict_to_tuple({'a':1, 'b':2})
(('a', 1), ('b', 2))
>>> dict_to_tuple({'a':1, 'b':2}) == dict_to_tuple({'b':2, 'a':1})
True
"""
return tuple((k,v) for k,v in sorted(sd.items()))
def tuple_to_dict(tp):
"""
>>> x = tuple_to_dict((('a', 1), ('b', 2)))
>>> x['b']
2
"""
return dict(tp)
#################### compose_dicts ######################################
def compose_dicts(*dict_list):
result = {}
for this_dict in dict_list:
result.update(this_dict)
return result
#################### extract_map ########################################
def extract_map( things,
key_extractor=None,
keys_extractor=None,
value_extractor=None,
values_extractor=None,
value_accumulator=None,
thing_summarizer=str,
knil=None,
exclude_none_key=False,
result=None ):
result = {} if result is None else result
thing2vals = _make_thing2stuffs( value_extractor, values_extractor )
thing2keys = _make_thing2stuffs( key_extractor, keys_extractor )
accumulate = ( value_accumulator if value_accumulator is not None else
lambda v_old, v_new: v_old + v_new )
for thing in things:
try:
vals = thing2vals( thing )
for k in thing2keys( thing ) :
if k is None and exclude_none_key:
continue
for v in vals :
if k in result :
result[k] = accumulate( result[k], v )
elif knil is not None:
result[k] = accumulate( knil, v )
else :
result[k] = v
except Exception, e:
(et, ev, etb) = sys.exc_info()
print "Problem with %s : %s" % (thing_summarizer(thing), str(e))
raise e, None, etb
return result
def _make_thing2stuffs( stuff_ex, stuffs_ex ):
if stuffs_ex is not None:
return stuffs_ex
elif stuff_ex is not None:
return lambda thng: listify( stuff_ex( thng ) )
else:
return listify
# this is less clear than the previous?
def _make_thing2stuffzes( stuff_ex, stuffs_ex ):
return ( stuffs_ex if stuffs_ex is not None else
listify if stuff_ex is None else
lambda thng: [ stuff_ex( thng ) ] )
################################################################################
def listify( thing ):
return [ thing ]
################################################################################
# Robustification: functions & decorators to auto-retry failure-prone operations
################################################################################
from functools import wraps
def robustify(**retry_kwargs):
"""
This decorator factory produces a decorator which wraps its decorated
function with retry_till_done() (q.v.), invoked according to the given
optional keyword arguments.
>>> y = 3
>>> #noinspection PyUnresolvedReferences
>>> @robustify(max_attempts=3, failure_val="drat")
... def foo(x):
... print "i think i can"
... global y
... y += x
... if y < 10:
... raise Exception("not there yet")
... return y
>>> foo(1)
i think i can
i think i can
i think i can
'drat'
>>> foo(3)
i think i can
i think i can
12
Robustification ==> the Exception was never permitted to escape.
"""
def robustificator(f):
@wraps(f)
def f_robustified(*args, **kwargs):
return retry_till_done(lambda: f(*args, **kwargs),
**retry_kwargs)
return f_robustified
return robustificator
################################################################################
def robustify_methods(**retry_kwargs):
original_methods = {}
def method_robustificator(methname):
orig_f = original_methods[methname]
def f_robustified(*args, **kwargs):
do_the_stuff = lambda: orig_f(*args, **kwargs)
return retry_till_done(do_the_stuff, **retry_kwargs)
return f_robustified
def class_robustificator(c):
for method_name in filter(lambda n: hasmethod(c, n), dir(c)):
oldmeth = getattr(c, method_name)
original_methods[method_name] = oldmeth # pyclosure ftl
newmeth = method_robustificator(method_name)
newmeth.__name__ = method_name + "_robustified"
newmeth.__doc__ = (None if oldmeth.__doc__ is None else
oldmeth.__doc__ + "\n (Robustified)")
setattr(c, method_name, newmeth)
return c
return class_robustificator
################################################################################
def retry_till_done(do_it, is_done=None,
max_wait_in_secs=None, max_attempts=None,
do_on_exception=True, do_on_error=None,
success_val=None, failure_val=False,
do_on_failure=None,
do_between_attempts=None, retry_interval=5):
"""
Repeatedly calls do_it() until it succeeds or bails.
Succeeds ==> does not raise an Error or Exception, and (if is_done is given)
is_done() is true.
==> Returns success_val if given (non-None), or else
[default] returns the value from the last do_it() call.
Bails ==> an Error or Exception is not handled, or (if they are given)
one of max_attempts or max_wait_in_secs has been exceeded.
==> Runs do_on_failure() if it is given, and then
==> Returns failure_val if given, or else the last do_it() return
==> Returns the last do_it() return if failure_val is None, or else
[default] returns failure_val itself -- by default: False.
Waits retry_interval seconds between do_it() attempts, after having
run do_between_attempts() if that function is given..
Errors and Exceptions are caught and handled according to
do_on_error and do_on_exception, respectively. If the value is:
True ==> keep on truckin' (silently mask)
None or False ==> re-raise
a function FOO ==> run FOO() and continue
The default behavior is to re-raise Errors, and silently mask Exceptions.
"""
num_attempts = 0
start_time = now()
succeeded = False
how_was_it = None
def had_enough():
return ((max_attempts is not None and num_attempts >= max_attempts) or
(max_wait_in_secs is not None and now() - start_time >= max_wait_in_secs))
def handle_via(do_handling, *args):
if isinstance(do_handling, bool) and do_handling:
pass # True ==> persevere through all that
elif not do_handling:
raise # None/False ==> let that propagate back to caller
else:
do_if_doable(do_handling, *args)
while not had_enough():
try:
if num_attempts > 0:
do_if_doable(do_between_attempts)
sleep(retry_interval)
num_attempts += 1
how_was_it = do_it()
succeeded = (is_done is None or is_done())
if succeeded:
break
except Exception, e:
handle_via(do_on_exception, e)
except:
handle_via(do_on_error)
if succeeded :
return how_was_it if success_val is None else success_val
else :
do_if_doable(do_on_failure)
return how_was_it if failure_val is None else failure_val
def do_if_doable(doable_or_none, *args):
if is_applicable(doable_or_none):
return doable_or_none(*args)
def is_applicable(doable_or_none):
return type(doable_or_none) in [ types.FunctionType, types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType ]
################################################################################
# pyflection
################################################################################
# credit:
# http://stackoverflow.com/questions/1091259/how-to-test-if-a-class-attribute-is-an-instance-method
#
def hasmethod(kls, name):
return hasattr(kls, name) and type(getattr(kls, name)) == types.MethodType
| {
"repo_name": "gregbanks/robustify",
"path": "robustify/__init__.py",
"copies": "1",
"size": "21700",
"license": "mit",
"hash": -703810962950583400,
"line_mean": 33.1732283465,
"line_max": 101,
"alpha_frac": 0.5387557604,
"autogenerated": false,
"ratio": 3.890980814057737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4929736574457737,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.mongo_uri_tools import is_mongo_uri, parse_mongo_uri
from mongoctl.utils import resolve_path, ensure_dir
from mongoctl.mongoctl_logging import log_info , log_warning
from mongoctl.commands.command_utils import (
is_db_address, is_dbpath, extract_mongo_exe_options, get_mongo_executable,
options_to_command_args, VersionPreference
)
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command
from mongoctl.objects.server import Server
from mongoctl.mongodb_version import MongoDBVersionInfo
###############################################################################
# CONSTS
###############################################################################
SUPPORTED_MONGO_DUMP_OPTIONS = [
"directoryperdb",
"journal",
"collection",
"out",
"query",
"oplog",
"repair",
"forceTableScan",
"ipv6",
"verbose",
"authenticationDatabase",
"dumpDbUsersAndRoles"
]
###############################################################################
# dump command
###############################################################################
def dump_command(parsed_options):
# get and validate dump target
target = parsed_options.target
use_best_secondary = parsed_options.useBestSecondary
#max_repl_lag = parsed_options.maxReplLag
is_addr = is_db_address(target)
is_path = is_dbpath(target)
if is_addr and is_path:
msg = ("Ambiguous target value '%s'. Your target matches both a dbpath"
" and a db address. Use prefix 'file://', 'cluster://' or"
" 'server://' to make it more specific" % target)
raise MongoctlException(msg)
elif not (is_addr or is_path):
raise MongoctlException("Invalid target value '%s'. Target has to be"
" a valid db address or dbpath." % target)
dump_options = extract_mongo_dump_options(parsed_options)
if is_addr:
mongo_dump_db_address(target,
username=parsed_options.username,
password=parsed_options.password,
use_best_secondary=use_best_secondary,
max_repl_lag=None,
dump_options=dump_options)
else:
dbpath = resolve_path(target)
mongo_dump_db_path(dbpath, dump_options=dump_options)
###############################################################################
# mongo_dump
###############################################################################
def mongo_dump_db_address(db_address,
username=None,
password=None,
use_best_secondary=False,
max_repl_lag=None,
dump_options=None):
if is_mongo_uri(db_address):
mongo_dump_uri(uri=db_address, username=username, password=password,
use_best_secondary=use_best_secondary,
dump_options=dump_options)
return
# db_address is an id string
id_path = db_address.split("/")
id = id_path[0]
database = id_path[1] if len(id_path) == 2 else None
server = repository.lookup_server(id)
if server:
mongo_dump_server(server, database=database, username=username,
password=password, dump_options=dump_options)
return
else:
cluster = repository.lookup_cluster(id)
if cluster:
mongo_dump_cluster(cluster, database=database, username=username,
password=password,
use_best_secondary=use_best_secondary,
max_repl_lag=max_repl_lag,
dump_options=dump_options)
return
# Unknown destination
raise MongoctlException("Unknown db address '%s'" % db_address)
###############################################################################
def mongo_dump_db_path(dbpath, dump_options=None):
do_mongo_dump(dbpath=dbpath,
dump_options=dump_options)
###############################################################################
def mongo_dump_uri(uri,
username=None,
password=None,
use_best_secondary=False,
dump_options=None):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database
username = username if username else uri_wrapper.username
password = password if password else uri_wrapper.password
server_or_cluster = repository.build_server_or_cluster_from_uri(uri)
if isinstance(server_or_cluster, Server):
mongo_dump_server(server_or_cluster,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
mongo_dump_cluster(server_or_cluster,
database=database,
username=username,
password=password,
use_best_secondary=use_best_secondary,
dump_options=dump_options)
###############################################################################
def mongo_dump_server(server,
database=None,
username=None,
password=None,
dump_options=None):
repository.validate_server(server)
auth_db = database or "admin"
# auto complete password if possible
if username:
if not password and database:
password = server.lookup_password(database, username)
if not password:
password = server.lookup_password("admin", username)
do_mongo_dump(host=server.get_connection_host_address(),
port=server.get_port(),
database=database,
username=username,
password=password,
version_info=server.get_mongo_version_info(),
dump_options=dump_options,
ssl=server.use_ssl_client())
###############################################################################
def mongo_dump_cluster(cluster,
database=None,
username=None,
password=None,
use_best_secondary=False,
max_repl_lag=False,
dump_options=None):
repository.validate_cluster(cluster)
if use_best_secondary:
mongo_dump_cluster_best_secondary(cluster=cluster,
max_repl_lag=max_repl_lag,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
mongo_dump_cluster_primary(cluster=cluster,
database=database,
username=username,
password=password,
dump_options=dump_options)
###############################################################################
def mongo_dump_cluster_primary(cluster,
database=None,
username=None,
password=None,
dump_options=None):
log_info("Locating default server for cluster '%s'..." % cluster.id)
default_server = cluster.get_default_server()
if default_server:
log_info("Dumping default server '%s'..." % default_server.id)
mongo_dump_server(default_server,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
raise MongoctlException("No default server found for cluster '%s'" %
cluster.id)
###############################################################################
def mongo_dump_cluster_best_secondary(cluster,
max_repl_lag=None,
database=None,
username=None,
password=None,
dump_options=None):
#max_repl_lag = max_repl_lag or 3600
log_info("Finding best secondary server for cluster '%s' with replication"
" lag less than max (%s seconds)..." %
(cluster.id, max_repl_lag))
best_secondary = cluster.get_dump_best_secondary(max_repl_lag=max_repl_lag)
if best_secondary:
server = best_secondary.get_server()
log_info("Found secondary server '%s'. Dumping..." % server.id)
mongo_dump_server(server, database=database, username=username,
password=password, dump_options=dump_options)
else:
raise MongoctlException("No secondary server found for cluster '%s'" %
cluster.id)
###############################################################################
def do_mongo_dump(host=None,
port=None,
dbpath=None,
database=None,
username=None,
password=None,
version_info=None,
dump_options=None,
ssl=False):
# create dump command with host and port
dump_cmd = [get_mongo_dump_executable(version_info)]
# ssl options
if ssl:
dump_cmd.append("--ssl")
if host:
dump_cmd.extend(["--host", host])
if port:
dump_cmd.extend(["--port", str(port)])
# dbpath
if dbpath:
dump_cmd.extend(["--dbpath", dbpath])
# database
if database:
dump_cmd.extend(["-d", database])
# username and password
if username:
dump_cmd.extend(["-u", username, "-p"])
if password:
dump_cmd.append(password)
# ignore authenticationDatabase option is version_info is less than 2.4.0
if (dump_options and "authenticationDatabase" in dump_options and
version_info and version_info < MongoDBVersionInfo("2.4.0")):
dump_options.pop("authenticationDatabase", None)
# ignore dumpDbUsersAndRoles option is version_info is less than 2.6.0
if (dump_options and "dumpDbUsersAndRoles" in dump_options and
version_info and version_info < MongoDBVersionInfo("2.6.0")):
dump_options.pop("dumpDbUsersAndRoles", None)
# append shell options
if dump_options:
dump_cmd.extend(options_to_command_args(dump_options))
# ensure destination dir if specified
if dump_options and "out" in dump_options:
ensure_dir(dump_options["out"])
cmd_display = dump_cmd[:]
# mask user/password
if username:
cmd_display[cmd_display.index("-u") + 1] = "****"
if password:
cmd_display[cmd_display.index("-p") + 1] = "****"
log_info("Executing command: \n%s" % " ".join(cmd_display))
call_command(dump_cmd, bubble_exit_code=True)
###############################################################################
def extract_mongo_dump_options(parsed_args):
return extract_mongo_exe_options(parsed_args,
SUPPORTED_MONGO_DUMP_OPTIONS)
###############################################################################
def get_mongo_dump_executable(version_info):
dump_exe = get_mongo_executable(version_info,
'mongodump',
version_check_pref=
VersionPreference.LATEST_MINOR)
# Warn the user if it is not an exact match (minor match)
if version_info and version_info != dump_exe.version:
log_warning("Using mongodump '%s' that does not exactly match "
"server version '%s'" % (dump_exe.version, version_info))
return dump_exe.path
| {
"repo_name": "mongolab/mongoctl",
"path": "mongoctl/commands/common/dump.py",
"copies": "1",
"size": "12412",
"license": "mit",
"hash": -7310268598031091000,
"line_mean": 36.9571865443,
"line_max": 79,
"alpha_frac": 0.4948436996,
"autogenerated": false,
"ratio": 4.863636363636363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5858480063236364,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abdul'
import mongoctl.repository as repository
from mongoctl.mongo_uri_tools import is_mongo_uri, parse_mongo_uri
from mongoctl.utils import resolve_path
from mongoctl.mongoctl_logging import log_info , log_warning
from mongoctl.commands.command_utils import (
is_db_address, is_dbpath, extract_mongo_exe_options, get_mongo_executable,
options_to_command_args,
VERSION_PREF_EXACT_OR_MINOR
)
from mongoctl.errors import MongoctlException
from mongoctl.utils import call_command
from mongoctl.objects.server import Server
from mongoctl.mongo_version import version_obj, MongoctlNormalizedVersion
###############################################################################
# CONSTS
###############################################################################
SUPPORTED_MONGO_DUMP_OPTIONS = [
"directoryperdb",
"journal",
"collection",
"out",
"query",
"oplog",
"repair",
"forceTableScan",
"ipv6",
"verbose",
"authenticationDatabase"
]
###############################################################################
# dump command
###############################################################################
def dump_command(parsed_options):
# get and validate dump target
target = parsed_options.target
use_best_secondary = parsed_options.useBestSecondary
#max_repl_lag = parsed_options.maxReplLag
is_addr = is_db_address(target)
is_path = is_dbpath(target)
if is_addr and is_path:
msg = ("Ambiguous target value '%s'. Your target matches both a dbpath"
" and a db address. Use prefix 'file://', 'cluster://' or"
" 'server://' to make it more specific" % target)
raise MongoctlException(msg)
elif not (is_addr or is_path):
raise MongoctlException("Invalid target value '%s'. Target has to be"
" a valid db address or dbpath." % target)
dump_options = extract_mongo_dump_options(parsed_options)
if is_addr:
mongo_dump_db_address(target,
username=parsed_options.username,
password=parsed_options.password,
use_best_secondary=use_best_secondary,
max_repl_lag=None,
dump_options=dump_options)
else:
dbpath = resolve_path(target)
mongo_dump_db_path(dbpath, dump_options=dump_options)
###############################################################################
# mongo_dump
###############################################################################
def mongo_dump_db_address(db_address,
username=None,
password=None,
use_best_secondary=False,
max_repl_lag=None,
dump_options=None):
if is_mongo_uri(db_address):
mongo_dump_uri(uri=db_address, username=username, password=password,
use_best_secondary=use_best_secondary,
dump_options=dump_options)
return
# db_address is an id string
id_path = db_address.split("/")
id = id_path[0]
database = id_path[1] if len(id_path) == 2 else None
server = repository.lookup_server(id)
if server:
mongo_dump_server(server, database=database, username=username,
password=password, dump_options=dump_options)
return
else:
cluster = repository.lookup_cluster(id)
if cluster:
mongo_dump_cluster(cluster, database=database, username=username,
password=password,
use_best_secondary=use_best_secondary,
max_repl_lag=max_repl_lag,
dump_options=dump_options)
return
# Unknown destination
raise MongoctlException("Unknown db address '%s'" % db_address)
###############################################################################
def mongo_dump_db_path(dbpath, dump_options=None):
do_mongo_dump(dbpath=dbpath,
dump_options=dump_options)
###############################################################################
def mongo_dump_uri(uri,
username=None,
password=None,
use_best_secondary=False,
dump_options=None):
uri_wrapper = parse_mongo_uri(uri)
database = uri_wrapper.database
username = username if username else uri_wrapper.username
password = password if password else uri_wrapper.password
server_or_cluster = repository.build_server_or_cluster_from_uri(uri)
if isinstance(server_or_cluster, Server):
mongo_dump_server(server_or_cluster,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
mongo_dump_cluster(server_or_cluster,
database=database,
username=username,
password=password,
use_best_secondary=use_best_secondary,
dump_options=dump_options)
###############################################################################
def mongo_dump_server(server,
database=None,
username=None,
password=None,
dump_options=None):
repository.validate_server(server)
auth_db = database or "admin"
# auto complete password if possible
if username:
if not password and database:
password = server.lookup_password(database, username)
if not password:
password = server.lookup_password("admin", username)
do_mongo_dump(host=server.get_connection_host_address(),
port=server.get_port(),
database=database,
username=username,
password=password,
server_version=server.get_mongo_version(),
dump_options=dump_options)
###############################################################################
def mongo_dump_cluster(cluster,
database=None,
username=None,
password=None,
use_best_secondary=False,
max_repl_lag=False,
dump_options=None):
repository.validate_cluster(cluster)
if use_best_secondary:
mongo_dump_cluster_best_secondary(cluster=cluster,
max_repl_lag=max_repl_lag,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
mongo_dump_cluster_primary(cluster=cluster,
database=database,
username=username,
password=password,
dump_options=dump_options)
###############################################################################
def mongo_dump_cluster_primary(cluster,
database=None,
username=None,
password=None,
dump_options=None):
log_info("Locating default server for cluster '%s'..." % cluster.id)
default_server = cluster.get_default_server()
if default_server:
log_info("Dumping default server '%s'..." % default_server.id)
mongo_dump_server(default_server,
database=database,
username=username,
password=password,
dump_options=dump_options)
else:
raise MongoctlException("No default server found for cluster '%s'" %
cluster.id)
###############################################################################
def mongo_dump_cluster_best_secondary(cluster,
max_repl_lag=None,
database=None,
username=None,
password=None,
dump_options=None):
#max_repl_lag = max_repl_lag or 3600
log_info("Finding best secondary server for cluster '%s' with replication"
" lag less than max (%s seconds)..." %
(cluster.id, max_repl_lag))
best_secondary = cluster.get_dump_best_secondary(max_repl_lag=max_repl_lag)
if best_secondary:
server = best_secondary.get_server()
log_info("Found secondary server '%s'. Dumping..." % server.id)
mongo_dump_server(server, database=database, username=username,
password=password, dump_options=dump_options)
else:
raise MongoctlException("No secondary server found for cluster '%s'" %
cluster.id)
###############################################################################
def do_mongo_dump(host=None,
port=None,
dbpath=None,
database=None,
username=None,
password=None,
server_version=None,
dump_options=None):
# create dump command with host and port
dump_cmd = [get_mongo_dump_executable(server_version)]
if host:
dump_cmd.extend(["--host", host])
if port:
dump_cmd.extend(["--port", str(port)])
# dbpath
if dbpath:
dump_cmd.extend(["--dbpath", dbpath])
# database
if database:
dump_cmd.extend(["-d", database])
# username and password
if username:
dump_cmd.extend(["-u", username, "-p"])
if password:
dump_cmd.append(password)
# ignore authenticationDatabase option is server_version is less than 2.4.0
if (dump_options and "authenticationDatabase" in dump_options and
server_version and
version_obj(server_version) < MongoctlNormalizedVersion("2.4.0")):
dump_options.pop("authenticationDatabase", None)
# append shell options
if dump_options:
dump_cmd.extend(options_to_command_args(dump_options))
cmd_display = dump_cmd[:]
# mask user/password
if username:
cmd_display[cmd_display.index("-u") + 1] = "****"
if password:
cmd_display[cmd_display.index("-p") + 1] = "****"
log_info("Executing command: \n%s" % " ".join(cmd_display))
call_command(dump_cmd, bubble_exit_code=True)
###############################################################################
def extract_mongo_dump_options(parsed_args):
return extract_mongo_exe_options(parsed_args,
SUPPORTED_MONGO_DUMP_OPTIONS)
###############################################################################
def get_mongo_dump_executable(server_version):
dump_exe = get_mongo_executable(server_version,
'mongodump',
version_check_pref=
VERSION_PREF_EXACT_OR_MINOR)
# Warn the user if it is not an exact match (minor match)
if server_version and version_obj(server_version) != dump_exe.version:
log_warning("Using mongodump '%s' that does not exactly match "
"server version '%s'" % (dump_exe.version, server_version))
return dump_exe.path
| {
"repo_name": "richardxx/mongoctl-service",
"path": "build/lib.linux-x86_64-2.7/mongoctl/commands/common/dump.py",
"copies": "2",
"size": "11929",
"license": "mit",
"hash": -2472302452569809400,
"line_mean": 36.9904458599,
"line_max": 82,
"alpha_frac": 0.4893955906,
"autogenerated": false,
"ratio": 4.9151215492377425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001557287562511508,
"num_lines": 314
} |
__author__ = 'abel'
import montemodes.functions.calculate as calc
method_function = {
1: calc.get_energy_from_tinker,
2: calc.get_energy_from_gaussian
}
class gaussian:
def __init__(self,
methodology='pm3',
internal=False,
memory=None,
processors=None,
binary='g09'):
self._methodology = methodology
self._memory = memory
self._processors = processors
self._internal = internal
self._binary = binary
def single_point(self, molecule):
return calc.get_energy_from_gaussian(molecule,
calculation=self._methodology,
internal=self._internal,
processors=self._processors,
binary=self._binary)
def vibrations(self, molecule):
modes, energy = calc.get_modes_from_gaussian(molecule,
calculation=self._methodology,
binary=self._binary)
return modes, energy
@property
def internal(self):
return self._internal
@internal.setter
def internal(self, internal):
self._internal = internal
class tinker:
def __init__(self,
parameter_set='mm3.prm',
num_modes=None):
self._parameter_set = parameter_set
self._num_modes = num_modes
def single_point(self, molecule):
return calc.get_energy_from_tinker(molecule, force_field=self._parameter_set)
def vibrations(self, molecule):
modes = calc.get_modes_from_tinker(molecule,
force_field=self._parameter_set,
num_modes=self._num_modes)
energy = None
return modes, energy
| {
"repo_name": "abelcarreras/MonteModes",
"path": "montemodes/functions/methods.py",
"copies": "1",
"size": "1986",
"license": "mit",
"hash": 8463471518446853000,
"line_mean": 27.3714285714,
"line_max": 85,
"alpha_frac": 0.5025176234,
"autogenerated": false,
"ratio": 4.915841584158416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017551590539656634,
"num_lines": 70
} |
__author__ = 'abel'
import tempfile
import os
import numpy as np
def int_to_xyz(molecule, no_dummy=True):
internal = molecule.get_full_z_matrix()
coordinates = [[0.0, 0.0, 0.0]]
for line in internal[1:]:
bi = int(line[0]) #bond index
B = line[1] #bond value
ai = int(line[2]) #Angle index
A = line[3] #Angle value
ci = int(line[4]) #Dihedral index
C = line[5] #Dihedral value
bond = np.array(coordinates[ai-1]) - np.array(coordinates[bi-1])
if np.linalg.norm(bond) == 0:
bond = np.array([1, 0, 0])
bond2 = np.array(coordinates[ci-1]) - np.array(coordinates[ai-1])
if np.linalg.norm(bond2) == 0:
bond2 = np.array([0, 1, 0])
origin = bond/np.linalg.norm(bond)*B
ref2 = bond
ref3 = np.cross(bond, bond2)
# Check case of linear structure
if np.linalg.norm(ref3) == 0:
ref3 = [0.0, 0.0, 0.1]
inter = np.dot(rotation_matrix(ref3, np.deg2rad(A)), origin)
final = np.dot(rotation_matrix(ref2, np.deg2rad(C)), inter)
final = final + np.array(coordinates[bi-1])
coordinates.append(final)
coordinates = np.array(coordinates)
if no_dummy:
# mask = np.argwhere(molecule.get_atomic_elements_with_dummy()[:,0] == 'X')
mask = np.argwhere((molecule.get_atomic_elements_with_dummy()[:,0] == 'X') |
(molecule.get_atomic_elements_with_dummy()[:,0] == 'x')).flatten()
coordinates = np.delete(coordinates,mask,axis=0)
return np.array(coordinates, dtype=float)
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
if np.dot(axis, axis) == 0.0:
print 'Warning, reference rotation axis module is 0'
exit()
axis = np.asarray(axis)
theta = np.asarray(theta)
axis = axis/np.sqrt(np.dot(axis, axis))
a = np.cos(theta/2)
b, c, d = -axis*np.sin(theta/2)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
class Structure:
def __init__(self,
coordinates=None,
internal=None,
z_matrix=None,
int_label=None,
atom_types=None,
atomic_elements=None,
atomic_numbers=None,
connectivity=None,
file_name=None,
charge=0,
multiplicity=1,
#Buscar un lloc millor
int_weights=None):
self._coordinates = coordinates
self._internal = internal
self._z_matrix = z_matrix
self._int_label = int_label
self._atom_types = atom_types
self._atomic_numbers = atomic_numbers
self._connectivity = connectivity
self._atomic_elements = atomic_elements
self._charge = charge
self._multiplicity = multiplicity
self._file_name = file_name
self._int_weights = int_weights
self._atomic_masses = None
self._number_of_atoms = None
self._number_of_internal = None
self._energy = None
self._modes = None
self._full_z_matrix = None
def get_coordinates(self):
if self._coordinates is None:
self._coordinates = int_to_xyz(self)
return self._coordinates.copy()
def set_coordinates(self, coordinates):
self._coordinates = coordinates
self._number_of_atoms = None
self._energy = None
def get_internal(self):
if self._internal is None:
print('No internal coordinates available\n Load internal file')
exit()
return self._internal.copy()
def set_internal(self, internal):
self._internal = internal
self._energy = None
self._coordinates = int_to_xyz(self)
self._full_z_matrix = None
def get_full_z_matrix(self):
if self._full_z_matrix is None:
num_z_atoms = self.get_z_matrix().shape[0]
self._full_z_matrix = np.zeros((num_z_atoms,6))
for row, i in enumerate(self.get_z_matrix()[1:]):
for col, k in enumerate(i[0]):
try:
self._full_z_matrix[row+1, col] = float(k)
except ValueError:
self._full_z_matrix[row+1, col] = self.get_int_dict()[k]
return self._full_z_matrix
def get_z_matrix(self):
if self._z_matrix is None:
print('No Z-matrix available\n Load zmatrix file')
exit()
return self._z_matrix
def set_z_matrix(self, z_matrix):
self._z_matrix = z_matrix
def get_int_label(self):
return self._int_label
def set_int_label(self, int_label):
self._int_label = int_label
def get_int_dict(self):
self._internal_dict = {}
for i, label in enumerate(self.get_int_label()[:,0]):
self._internal_dict.update({label:self.get_internal()[i, 0]})
return self._internal_dict
def get_int_weights(self):
return self._int_weights
def set_int_weights(self, int_weights):
self._int_weights = int_weights
def get_atomic_elements_with_dummy(self):
# print([i for i in self._atomic_elements if i != "X"])
return self._atomic_elements
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, file_name):
self._file_name = file_name
@property
def charge(self):
return self._charge
@charge.setter
def charge(self, charge):
self._charge = charge
@property
def multiplicity(self):
return self._multiplicity
@multiplicity.setter
def multiplicity(self, multiplicity):
self._multiplicity = multiplicity
def get_atom_types(self):
if self._atom_types is None:
print('No atom types available')
exit()
return self._atom_types
def set_atom_types(self, atom_types):
self._atom_types = atom_types
def get_atomic_numbers(self):
if self._atomic_numbers is None:
self._atomic_numbers = np.array(range(1, self.get_number_of_atoms()+1))[None].T
return self._atomic_numbers
def set_atomic_numbers(self, atomic_numbers):
self._atomic_numbers = atomic_numbers
def get_atomic_elements(self):
if self._atomic_elements is None:
self._atomic_elements = np.array(atom_data)[[self.get_atomic_numbers()]][:,1:2]
return np.array([[i[0].upper()] for i in self._atomic_elements if i != "X"], dtype=str)
def set_atomic_elements(self, atomic_elements):
self._atomic_elements = atomic_elements
def get_connectivity(self):
if self._connectivity is None:
print('No atom connectivity available')
exit()
return self._connectivity
def set_connectivity(self, connectivity):
self._connectivity = connectivity
# Real methods
def get_number_of_atoms(self):
if self._number_of_atoms is None:
self._number_of_atoms = self.get_coordinates().shape[0]
return self._number_of_atoms
def get_number_of_internal(self):
if self._number_of_internal is None:
self._number_of_internal = self.get_internal().shape[0]
return self._number_of_internal
def get_energy(self, method=None):
if self._energy is None:
self._energy = method.single_point(self)
return self._energy
def get_modes(self, method=None):
if self._modes is None:
self._modes, self._energy = method.vibrations(self)
return self._modes
def get_atomic_masses(self):
if self._atomic_masses is None:
try:
masses_string = np.array(atom_data)[:,3:4][[np.where(np.array(atom_data)==element)[0][0] for element in self.get_atomic_elements()]]
self._atomic_masses = np.array(masses_string, dtype=float)
except TypeError:
print('Error reading element labels')
exit()
return self._atomic_masses
atom_data = [
[ 0, "X", "X", 0], # 0
[ 1, "H", "Hydrogen", 1.00794], # 1
[ 2, "He", "Helium", 4.002602], # 2
[ 3, "Li", "Lithium", 6.941], # 3
[ 4, "Be", "Beryllium", 9.012182], # 4
[ 5, "B", "Boron", 10.811], # 5
[ 6, "C", "Carbon", 12.0107], # 6
[ 7, "N", "Nitrogen", 14.0067], # 7
[ 8, "O", "Oxygen", 15.9994], # 8
[ 9, "F", "Fluorine", 18.9984032], # 9
[ 10, "Ne", "Neon", 20.1797], # 10
[ 11, "Na", "Sodium", 22.98976928], # 11
[ 12, "Mg", "Magnesium", 24.3050], # 12
[ 13, "Al", "Aluminium", 26.9815386], # 13
[ 14, "Si", "Silicon", 28.0855], # 14
[ 15, "P", "Phosphorus", 30.973762], # 15
[ 16, "S", "Sulfur", 32.065], # 16
[ 17, "Cl", "Chlorine", 35.453], # 17
[ 18, "Ar", "Argon", 39.948], # 18
[ 19, "K", "Potassium", 39.0983], # 19
[ 20, "Ca", "Calcium", 40.078], # 20
[ 21, "Sc", "Scandium", 44.955912], # 21
[ 22, "Ti", "Titanium", 47.867], # 22
[ 23, "V", "Vanadium", 50.9415], # 23
[ 24, "Cr", "Chromium", 51.9961], # 24
[ 25, "Mn", "Manganese", 54.938045], # 25
[ 26, "Fe", "Iron", 55.845], # 26
[ 27, "Co", "Cobalt", 58.933195], # 27
[ 28, "Ni", "Nickel", 58.6934], # 28
[ 29, "Cu", "Copper", 63.546], # 29
[ 30, "Zn", "Zinc", 65.38], # 30
[ 31, "Ga", "Gallium", 69.723], # 31
[ 32, "Ge", "Germanium", 72.64], # 32
[ 33, "As", "Arsenic", 74.92160], # 33
[ 34, "Se", "Selenium", 78.96], # 34
[ 35, "Br", "Bromine", 79.904], # 35
[ 36, "Kr", "Krypton", 83.798], # 36
[ 37, "Rb", "Rubidium", 85.4678], # 37
[ 38, "Sr", "Strontium", 87.62], # 38
[ 39, "Y", "Yttrium", 88.90585], # 39
[ 40, "Zr", "Zirconium", 91.224], # 40
[ 41, "Nb", "Niobium", 92.90638], # 41
[ 42, "Mo", "Molybdenum", 95.96], # 42
[ 43, "Tc", "Technetium", 0], # 43
[ 44, "Ru", "Ruthenium", 101.07], # 44
[ 45, "Rh", "Rhodium", 102.90550], # 45
[ 46, "Pd", "Palladium", 106.42], # 46
[ 47, "Ag", "Silver", 107.8682], # 47
[ 48, "Cd", "Cadmium", 112.411], # 48
[ 49, "In", "Indium", 114.818], # 49
[ 50, "Sn", "Tin", 118.710], # 50
[ 51, "Sb", "Antimony", 121.760], # 51
[ 52, "Te", "Tellurium", 127.60], # 52
[ 53, "I", "Iodine", 126.90447], # 53
[ 54, "Xe", "Xenon", 131.293], # 54
[ 55, "Cs", "Caesium", 132.9054519], # 55
[ 56, "Ba", "Barium", 137.327], # 56
[ 57, "La", "Lanthanum", 138.90547], # 57
[ 58, "Ce", "Cerium", 140.116], # 58
[ 59, "Pr", "Praseodymium", 140.90765], # 59
[ 60, "Nd", "Neodymium", 144.242], # 60
[ 61, "Pm", "Promethium", 0], # 61
[ 62, "Sm", "Samarium", 150.36], # 62
[ 63, "Eu", "Europium", 151.964], # 63
[ 64, "Gd", "Gadolinium", 157.25], # 64
[ 65, "Tb", "Terbium", 158.92535], # 65
[ 66, "Dy", "Dysprosium", 162.500], # 66
[ 67, "Ho", "Holmium", 164.93032], # 67
[ 68, "Er", "Erbium", 167.259], # 68
[ 69, "Tm", "Thulium", 168.93421], # 69
[ 70, "Yb", "Ytterbium", 173.054], # 70
[ 71, "Lu", "Lutetium", 174.9668], # 71
[ 72, "Hf", "Hafnium", 178.49], # 72
[ 73, "Ta", "Tantalum", 180.94788], # 73
[ 74, "W", "Tungsten", 183.84], # 74
[ 75, "Re", "Rhenium", 186.207], # 75
[ 76, "Os", "Osmium", 190.23], # 76
[ 77, "Ir", "Iridium", 192.217], # 77
[ 78, "Pt", "Platinum", 195.084], # 78
[ 79, "Au", "Gold", 196.966569], # 79
[ 80, "Hg", "Mercury", 200.59], # 80
[ 81, "Tl", "Thallium", 204.3833], # 81
[ 82, "Pb", "Lead", 207.2], # 82
[ 83, "Bi", "Bismuth", 208.98040], # 83
[ 84, "Po", "Polonium", 0], # 84
[ 85, "At", "Astatine", 0], # 85
[ 86, "Rn", "Radon", 0], # 86
[ 87, "Fr", "Francium", 0], # 87
[ 88, "Ra", "Radium", 0], # 88
[ 89, "Ac", "Actinium", 0], # 89
[ 90, "Th", "Thorium", 232.03806], # 90
[ 91, "Pa", "Protactinium", 231.03588], # 91
[ 92, "U", "Uranium", 238.02891], # 92
[ 93, "Np", "Neptunium", 0], # 93
[ 94, "Pu", "Plutonium", 0], # 94
[ 95, "Am", "Americium", 0], # 95
[ 96, "Cm", "Curium", 0], # 96
[ 97, "Bk", "Berkelium", 0], # 97
[ 98, "Cf", "Californium", 0], # 98
[ 99, "Es", "Einsteinium", 0], # 99
[100, "Fm", "Fermium", 0], # 100
[101, "Md", "Mendelevium", 0], # 101
[102, "No", "Nobelium", 0], # 102
[103, "Lr", "Lawrencium", 0], # 103
[104, "Rf", "Rutherfordium", 0], # 104
[105, "Db", "Dubnium", 0], # 105
[106, "Sg", "Seaborgium", 0], # 106
[107, "Bh", "Bohrium", 0], # 107
[108, "Hs", "Hassium", 0], # 108
[109, "Mt", "Meitnerium", 0], # 109
[110, "Ds", "Darmstadtium", 0], # 110
[111, "Rg", "Roentgenium", 0], # 111
[112, "Cn", "Copernicium", 0], # 112
[113, "Uut", "Ununtrium", 0], # 113
[114, "Uuq", "Ununquadium", 0], # 114
[115, "Uup", "Ununpentium", 0], # 115
[116, "Uuh", "Ununhexium", 0], # 116
[117, "Uus", "Ununseptium", 0], # 117
[118, "Uuo", "Ununoctium", 0], # 118
]
if __name__ == '__main__':
import montemodes.functions.methods as meth
import montemodes.functions.reading as io_monte
tinker_mm3 = meth.tinker(parameter_set='mm3')
gaussian_pm3 = meth.gaussian()
molecule = io_monte.reading_from_xyz_file('../test.xyz')
print(gaussian_pm3.function(molecule))
# print(molecule.get_full_z_matrix())
print(len(int_to_xyz(molecule))) | {
"repo_name": "abelcarreras/MonteModes",
"path": "montemodes/classes/structure.py",
"copies": "1",
"size": "13864",
"license": "mit",
"hash": 1503145445730430500,
"line_mean": 32.5714285714,
"line_max": 148,
"alpha_frac": 0.5285631852,
"autogenerated": false,
"ratio": 2.9649272882805815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39934904734805815,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Abe'
import json
from unicodedata import normalize
import re
# Run the command below with the correct IP address to retrieve the updated log file
## scp root@<IP ADDRESS>:~/master/logs/main.log ~/Documents/Coursity/Coursity/testing/log_processor/"
f = open("main.log",'r')
output = open("Readable Error Text.txt",'w')
log = f.readlines()
calendar_page_count = 0
choose_semester_page_count = 0
different_timetable_page_count = 0
count = 0
for line in log:
parsed_json = json.loads(line)
if parsed_json.get("message"):
error_raw_text = parsed_json.get("message").replace("\\n","\n")
if "Text" in error_raw_text:
if "Choose a date" in error_raw_text:
calendar_page_count = calendar_page_count + 1
continue
elif re.search("(Sep)|(Oct)",error_raw_text):
calendar_page_count = calendar_page_count + 1
continue
elif "Career" in error_raw_text:
choose_semester_page_count = choose_semester_page_count + 1
continue
elif re.search("[(]\d+[)]",error_raw_text):
different_timetable_page_count = different_timetable_page_count + 1
continue
else:
count = count + 1
output.write("="*30+"\n")
output.write("Error Text " + str(count)+"\n")
output.write("="*30+"\n")
output.write(normalize('NFKD', error_raw_text).encode('ASCII', 'ignore'))
output.write("\n"+"="*30+"\n")
print calendar_page_count
print choose_semester_page_count
print different_timetable_page_count | {
"repo_name": "sarmadhashmi/Coursity",
"path": "tests/log_processor/log_processor.py",
"copies": "1",
"size": "1674",
"license": "mit",
"hash": 8943245487424378000,
"line_mean": 37.9534883721,
"line_max": 101,
"alpha_frac": 0.5884109916,
"autogenerated": false,
"ratio": 3.679120879120879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4767531870720879,
"avg_score": null,
"num_lines": null
} |
"""Given a function which expects a file object as one of the input
arguments, the decorator in this module will modify the function input
entry point so that it additionally accepts file names without modifying
the implementation of the original function.
"""
__all__ = ('FunctionFilenameWrapper', 'fnfnwrap')
import collections
import functools
import inspect
import io
from .utils import is_valid_filename, validate_open_kwargs
############################
## Defining a decorator ##
############################
def fnfnwrap(original_fn=None, *, filearg=0, **open_kwargs):
"""A function decorator which modifies a the function input entry
point so that it additionally accepts file names without modifying
the implementation of the original function.
Args:
original_fn: Main function being wrapped
filearg: Input argument of the function which accepts file-like
objects, which can be given as an index of the positional
argument (as integer) or the name of of the argument itself
(as string)
**open_kwargs: Keyword-only arguments for built-in function
`open()` to be passed through this function when a new file
is opened. Refer to the document of built-in functions for
explanation of input arguments to the function `open()`.
Returns:
The same function with file open mechanics.
"""
if original_fn is None:
return functools.partial(fnfnwrap, filearg=filearg, **open_kwargs)
else:
return FunctionFilenameWrapper(original_fn, filearg, open_kwargs)
# Add signature to the above function using signature from open()
_original_parameters = list(
inspect.signature(fnfnwrap).parameters.values()
)[:-1]
_additional_parameters = list(
inspect.Parameter(
param.name, inspect.Parameter.KEYWORD_ONLY, default=param.default
)
for param in inspect.signature(open).parameters.values()
)[1:]
fnfnwrap.__signature__ = inspect.Signature(
_original_parameters + _additional_parameters
)
##############################
## Wrapper implementation ##
##############################
class FunctionFilenameWrapper(object):
"""Constructs a callable wrapper over a function accepting files as
arguments.
Given (1) an original function `original_fn`, (2) the name or the
index of the file argument `filearg`, and (3) a dictionary of
keyword-only arguments `open_kwargs` as arguments to be passed
to built-in function `open()`, this class wraps over the
`original_fn` and will automatically open file when file name
strings (provided as `str`, `bytes`, or `os.PathLike`) are given
as input arguments instead of file objects.
Attributes:
__wrapped__: Original function being wrapped
(set by `functools.update_wrapper`)
is_generator: Boolean indicating whether `__wrapped__` is a generator
filearg: Name of function input argument accepting file objects
pos: Index of positional `filearg` argument (`None` if keyword-only)
open_kwargs: Dictionary of keyword arguments to built-in function
`open()`
"""
def __new__(cls, original_fn, filearg=0, open_kwargs=None):
# The following line of code already set:
# self.__wrapped__ = original_fn
return functools.update_wrapper(super().__new__(cls), original_fn)
def __init__(self, original_fn, filearg=0, open_kwargs=None):
# Proactively check if original function is callable
if not callable(original_fn):
raise TypeError('expected a callable function')
# Proactively check if open_kwargs is valid
open_kwargs = open_kwargs or {}
validate_open_kwargs(open_kwargs)
# Extract argument specs from original function
sig = inspect.signature(original_fn).parameters
args = [
parameter.name
for parameter in sig.values()
if parameter.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
kwargs = [
parameter.name
for parameter in sig.values()
if parameter.kind == inspect.Parameter.KEYWORD_ONLY
]
# Determine if positional filearg is within bounds
try:
filearg = args[filearg] # obtain name of argument
except TypeError:
pass # re-check later whether it is a valid name
except IndexError as e:
raise IndexError("argument list index out of range") from e
# Check if filearg name string is actually valid
if filearg in args:
pos = args.index(filearg) # always non negative
elif filearg in kwargs:
pos = None
elif isinstance(filearg, str):
raise NameError(
"{name!r} is not a valid argument for the function {fn!r}"
.format(name=filearg, fn=original_fn.__qualname__)
)
else:
raise TypeError(
"{name!r} has incorrect type".format(name=filearg)
)
# Keep track of data
self.is_generator = inspect.isgeneratorfunction(original_fn)
self.filearg = filearg
self.pos = pos
self.open_kwargs = open_kwargs
def __call__(self, *args, **kwargs):
args = list(args) # convert from non-mutable sequence
if self.pos is not None and self.pos < len(args):
# Open files for positional argument
return self._invoke(args, kwargs, args, self.pos)
elif self.filearg in kwargs:
# Open files for keyword arguments
return self._invoke(args, kwargs, kwargs, self.filearg)
else:
# Open files not necessary (fallback to arg default values)
return self.__wrapped__(*args, **kwargs)
def _invoke(self, args, kwargs, store, key):
"""Open files given at `store[key]` before invoking the original
function.
Args:
args: List of given positional arguments
kwargs: Dictionary of given keyword arguments
store: A duplicate of either `args` or `kwargs` containing
file name argument
key: Lookup key for file name argument in `store`
Returns:
Result of the original function being invoked
"""
file_input = store[key]
if isinstance(file_input, io.IOBase):
# Input argument is already a file object: do nothing
return self.__wrapped__(*args, **kwargs)
elif is_valid_filename(file_input):
# Input argument is a filename: need to open
if self.is_generator:
# File needs to be opened inside a generator if the original
# function is also a generator. A wrap is needed to maintain
# the attributes information of the generator objects.
@functools.wraps(self.__wrapped__)
def generator_wrapper():
with open(file_input, **self.open_kwargs) as fileobj:
store[key] = fileobj # replace original arguments
return (yield from self.__wrapped__(*args, **kwargs))
return generator_wrapper()
else:
# Open the file normally
with open(file_input, **self.open_kwargs) as fileobj:
store[key] = fileobj # replace original arguments
return self.__wrapped__(*args, **kwargs)
else:
raise TypeError(
'{filearg!r} must have been file name or file-like object'
.format(filearg=self.filearg)
)
def __get__(self, instance, owner):
# In order to make this callable work with bounded methods inside
# definition of classes, we make sure that this call is a non-data
# descriptor. This part is heavily inspired by the documentation of
# the package `wrapt` at
# https://wrapt.readthedocs.io/en/latest/wrappers.html#function-wrappers
get_method = self.__wrapped__.__get__(instance, owner)
return BoundFunctionFilenameWrapper(get_method)
class BoundFunctionFilenameWrapper(FunctionFilenameWrapper):
"""The bounded method version of the class FunctionFilenameWrapper"""
def __get__(self, isinstance, owner):
return self
| {
"repo_name": "abhabongse/python-filewraps",
"path": "pyfnfn/decorators.py",
"copies": "2",
"size": "8549",
"license": "mit",
"hash": 6924445655987865000,
"line_mean": 40.2995169082,
"line_max": 80,
"alpha_frac": 0.6225289508,
"autogenerated": false,
"ratio": 4.643671917436176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6266200868236176,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abhay'
# Sample data downloaded for testing
sample_food_trucks_data = [
{
"location":{
"needs_recoding":False,
"longitude":"-122.398658184594",
"latitude":"37.7901490874965"
},
"status":"REQUESTED",
"expirationdate":"2013-03-15T00:00:00",
"permit":"13MFF-0068",
"block":"3708",
"received":"Mar 14 2013 3:34PM",
"facilitytype":"Truck",
"blocklot":"3708055",
"locationdescription":"01ST ST: STEVENSON ST to JESSIE ST (21 - 56)",
"cnn":"101000",
"priorpermit":"0",
"schedule":"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule¶ms=permit=13MFF-0068&ExportPDF=1&Filename=13MFF-0068_schedule.pdf",
"address":"50 01ST ST",
"applicant":"Cupkates Bakery, LLC",
"lot":"055",
"fooditems":"Cupcakes",
"longitude":"-122.398658184604",
"latitude":"37.7901490737255",
"objectid":"427856",
"y":"2115738.283",
"x":"6013063.33"
}
,
{
"location":{
"needs_recoding":False,
"longitude":"-122.395881037809",
"latitude":"37.7891192216837"
},
"status":"APPROVED",
"expirationdate":"2014-03-15T00:00:00",
"permit":"13MFF-0060",
"block":"3720",
"received":"Mar 14 2013 12:50PM",
"facilitytype":"Truck",
"blocklot":"3720008",
"locationdescription":"01ST ST: NATOMA ST to HOWARD ST (165 - 199)",
"cnn":"106000",
"priorpermit":"0",
"approved":"2013-06-06T17:05:20",
"schedule":"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule¶ms=permit=13MFF-0060&ExportPDF=1&Filename=13MFF-0060_schedule.pdf",
"address":"400 HOWARD ST",
"applicant":"Cheese Gone Wild",
"lot":"008",
"fooditems":"Grilled Cheese Sandwiches",
"longitude":"-122.395881037818",
"latitude":"37.7891192079118",
"objectid":"427795",
"y":"2115347.095",
"x":"6013858.06"
}
,
{
"location":{
"needs_recoding":False,
"longitude":"-122.394594036195",
"latitude":"37.7879001115912"
},
"status":"APPROVED",
"expirationdate":"2014-03-15T00:00:00",
"permit":"13MFF-0063",
"block":"3737",
"received":"Mar 14 2013 2:35PM",
"facilitytype":"Truck",
"blocklot":"3737012",
"locationdescription":"01ST ST: CLEMENTINA ST to FOLSOM ST (245 - 299)",
"cnn":"110000",
"priorpermit":"1",
"approved":"2013-03-19T14:01:19",
"schedule":"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule¶ms=permit=13MFF-0063&ExportPDF=1&Filename=13MFF-0063_schedule.pdf",
"address":"245 01ST ST",
"applicant":"Mini Mobile Food Catering",
"lot":"012",
"fooditems":"Cold Truck: Corn Dogs: Noodle Soups: Candy: Pre-packaged Snacks: Sandwiches: Chips: Coffee: Tea: Various Beverages",
"longitude":"-122.394594036205",
"latitude":"37.7879000978181",
"objectid":"427837",
"y":"2114895.75",
"x":"6014220.898"
}
,
{
"location":{
"needs_recoding":False,
"longitude":"-122.39347293179",
"latitude":"37.7860914772"
},
"status":"APPROVED",
"expirationdate":"2014-03-15T00:00:00",
"permit":"13MFF-0104",
"block":"3749",
"received":"Apr 18 2013 9:15AM",
"facilitytype":"Truck",
"blocklot":"3749058",
"locationdescription":"01ST ST: LANSING ST to HARRISON ST \\ I-80 E ON RAMP (362 - 399)",
"cnn":"113000",
"priorpermit":"1",
"approved":"2013-06-11T13:06:30",
"schedule":"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule¶ms=permit=13MFF-0104&ExportPDF=1&Filename=13MFF-0104_schedule.pdf",
"address":"390 01ST ST",
"applicant":"Steve's Mobile Deli",
"lot":"058",
"fooditems":"Cold Truck: Pre-packaged sandwiches: Burgers: Hot Dogs: Muffin Sandwiches: Enchiladas: Bagels: Burritos: Salads: Snacks: Beverages",
"longitude":"-122.3934729318",
"latitude":"37.7860914634251",
"objectid":"438423",
"y":"2114230.765",
"x":"6014531.475"
}
]
sample_query_index = {'mini': set([2]), 'cheese': set([1]), 'muffin': set([3]), 'llc': set([0]), 'cold': set([2, 3]),
'gone': set([1]), 'cupcakes': set([0]), 'cupkates': set([0]), 'truck:': set([2, 3]),
'deli': set([3]), 'hot': set([3]), 'various': set([2]), 'sandwiches': set([1]),
'grilled': set([1]), 'pre-packaged': set([2, 3]), 'candy:': set([2]), 'tea:': set([2]),
'beverages': set([2, 3]), 'food': set([2]), 'corn': set([2]), 'chips:': set([2]),
'sandwiches:': set([2, 3]), 'catering': set([2]), 'burgers:': set([3]), 'wild': set([1]),
'dogs:': set([2, 3]), 'bagels:': set([3]), 'noodle': set([2]), 'enchiladas:': set([3]),
'mobile': set([2, 3]), 'coffee:': set([2]), 'salads:': set([3]), 'bakery,': set([0]),
'soups:': set([2]), "steve's": set([3]), 'burritos:': set([3]), 'snacks:': set([2, 3])}
sample_latitude_index = [37.7860914634251, 37.7879000978181, 37.7891192079118, 37.7901490737255]
sample_longitude_index = [-122.398658184604, -122.395881037818, -122.394594036205, -122.3934729318]
| {
"repo_name": "abhayv/foodtrucks-backend",
"path": "test_data.py",
"copies": "1",
"size": "5239",
"license": "mit",
"hash": -7003187338951262000,
"line_mean": 38.6893939394,
"line_max": 177,
"alpha_frac": 0.5936247375,
"autogenerated": false,
"ratio": 2.751575630252101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3845200367752101,
"avg_score": null,
"num_lines": null
} |
import math
def res(x, y):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.log10(x)
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
prompt = "Enter the base and the power separated by a comma: "
x1, y1 = map(int, input(prompt).split(","))
x2, y2 = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
res1 = res(x1, y1)
res2 = res(x2, y2)
# We check for the largest number
if res1 > res2:
print("Largest number is", x1, "^", y1)
elif res2 > res1:
print("Largest number is", x2, "^", y2)
else:
print("Both are equal")
| {
"repo_name": "TheAlgorithms/Python",
"path": "maths/largest_of_very_large_numbers.py",
"copies": "1",
"size": "1029",
"license": "mit",
"hash": -8758023757187157000,
"line_mean": 28.4,
"line_max": 79,
"alpha_frac": 0.5665694849,
"autogenerated": false,
"ratio": 3.2563291139240507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9322898598824051,
"avg_score": 0,
"num_lines": 35
} |
class Gpa(object):
# data attributes
"helps to calculate the Gpa and Cgpa"
arg1 = None
arg2 = None
subData = None
Scale = None
credits = None
initCourse = 0
initgetCredit = 0
totalCredits = 0
temp = 0
def getCourse(self):
"get the value of the no of course you registered"
self.arg1 = input("No of course you have registered: " )
pass
def getSubject(self,value):
"get the subject value"
self.arg2 = value
pass
def getScale(self):
"To get the scale value"
self.Scale = input("Enter the Scale value(Either 5 or 10): " )
pass
def getSubjectData(self):
"get the subject Data in string"
self.subData = raw_input("Enter the grade: " )
pass
def getGradeData(self):
# To calculate grade for two scale,one is for 5.0 and other one for 10.0
if self.Scale == 10:
grade1 = {'s':10,'a':9,'b':8,'c':7,'d':5,'e':3,'f':0}
x=grade1[self.subData]
else: #5.0 scale
grade2 = {'a':5,'b':4,'c':3,'d':2,'e':1,'f':0}
x=grade2[self.subData]
return x
def getCredits(self):
"get credit value"
self.credits = input("Enter the credits for a subject:" )
pass
def gpa(self):
print "Calculate GPA:"
sem = raw_input("Please Enter Semester: " )
self.getScale() #input the scale value
if self.Scale == 5 or self.Scale == 10:
self.getCourse()
if self.arg1 >= 2:
self.calculateGpa()
else:
print "In order to calculate Gpa you schould have atleast 2 subject minimum"
else:
print "you have not entered the scale correctly please try again"
pass
def calculateGpa(self):
"Method to calculate Gpa "
while self.initCourse!=self.arg1:
self.initCourse=self.initCourse+1
self.getCredits()
self.initgetCredit = self.credits
self.getSubjectData()
#type(self.getSubjectData())
self.temp = self.initgetCredit*self.getGradeData()+self.temp
self.totalCredits=self.totalCredits+self.initgetCredit
gpa = round((self.temp+.0)/(self.totalCredits+.0),2)
print "you have registered for total credits:"+" "+str(self.totalCredits)+" "+"and you have acquired GPA:\""+str(gpa)+"\""
pass
def cgpa(self):
print "Calculate your cgpa: "
semesters = input("Enter how many semester cgpa has to be found of: " )
counter = 0
tempInit = 0
tempTotalCredits = 0
self.getScale() #input the scale value
if self.Scale == 5 or self.Scale == 10:
while counter != semesters:
counter = counter+1
print "Please enter the details of the semester"+" "+str(counter)
self.getCourse()
self.calculateGpa()
tempInit = self.temp+tempInit
tempTotalCredits = tempTotalCredits + self.totalCredits
# re-assigning
self.arg1=0
self.initCourse =0
self.temp=0
self.totalCredits=0
print "\n"
cgpa = round((tempInit+.0)/(tempTotalCredits+.0),2)
print "you have registered for total credits:"+" "+str(tempTotalCredits)+" "+"and you have acquired CGPA:\""+str(cgpa)+"\" "
else:
print "you have not entered the scale correctly please try again"
pass
if __name__ == '__main__': # main method
#how to calculate it
Init = Gpa() # Creating Instance
# for calculation of Cgpa (cumulative grade point average)
Init.cgpa()
# In Order to calculate Gpa for single semester
#Init.gpa()
#output:
"""
[abhi@localhost ~]$ python gpaCalculator.py
Calculate your cgpa:
Enter how many semester cgpa has to be found of: 2
Enter the Scale value(Either 5 or 10): 10
Please enter the details of the semester 1
No of course you have registered: 2
Enter the credits for a subject:4
Enter the grade: a
Enter the credits for a subject:4
Enter the grade: c
you have registered for total credits: 8 and you have acquired GPA:"8.0"
Please enter the details of the semester 2
No of course you have registered: 3
Enter the credits for a subject:4
Enter the grade: b
Enter the credits for a subject:5
Enter the grade: a
Enter the credits for a subject:3
Enter the grade: c
you have registered for total credits: 12 and you have acquired GPA:"8.17"
you have registered for total credits: 20 and you have acquired CGPA:"8.1"
"""
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577800_Python_Gpa_and_Cgpa_Calculator/recipe-577800.py",
"copies": "1",
"size": "5641",
"license": "mit",
"hash": 827416176130042500,
"line_mean": 34.25625,
"line_max": 270,
"alpha_frac": 0.5942208828,
"autogenerated": false,
"ratio": 3.866346812885538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9849294713889898,
"avg_score": 0.022254596359128208,
"num_lines": 160
} |
__author__ = '@abhinavbom a.k.a darkl0rd'
import sys
import os
import urllib2
from xml.dom import minidom
from urllib import quote
from lib.parse import *
# Base API URL
base_url = 'http://api.urlvoid.com/'
# API Key . Key can be obtained from api.urlvoid.com
api_key = '<Add API key Here>'+'/'
if api_key == '<Add API key Here>/':
print "Please add your Urlvoid API key"
sys.exit()
# plan identifier. change this value as per your plan in URLvoid. Free plans are designated by api1000 which is the
#default value here.
plan = 'api1000/'
#search for a host in URLvoid
detect = 'host/'
def urlvoid(url):
c=0
print "Conneting to URLVoid"
while c < len(url):
print url[c]
final_url = base_url+plan+api_key+detect+url[c]
#print final_url
if HTTP_PROXY or HTTPS_PROXY:
proxy = urllib2.ProxyHandler({'http': HTTP_PROXY, 'https': HTTPS_PROXY})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
req = urllib2.urlopen(final_url)
resp = req.read()
print resp
tree = minidom.parseString(resp)
print tree
obs_value = tree.getElementsByTagName('detections')
for value in obs_value:
print value.firstChild.nodeValue
#for detections in tree.iter('detections'):
# print detections.attrib
#print resp
c+=1
| {
"repo_name": "abhinavbom/Threat-Intelligence-Hunter",
"path": "api/urlvoid.py",
"copies": "1",
"size": "1415",
"license": "mit",
"hash": -2004291452544340200,
"line_mean": 26.7450980392,
"line_max": 115,
"alpha_frac": 0.6310954064,
"autogenerated": false,
"ratio": 3.5199004975124377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46509959039124377,
"avg_score": null,
"num_lines": null
} |
__author__ = '@abhinavbom a.k.a darkl0rd'
import urllib2
import urlparse
import re
import os
import time
from lib.feeds import *
from lib.parse import *
def gather():
if not os.path.exists('intel'):
os.mkdir('intel')
os.chdir('.\\intel')
#print os.getcwd()
print "Starting feed update process"
counter = 0
ioc_list = []
timestr = time.strftime("%Y%m%d-%H%M%S")
for source in OSINT_IP.iteritems():
if not os.path.exists(str(source[0])):
os.mkdir(str(source[0]))
print os.getcwd()
os.chdir(str(source[0]))
print source[0]
name = str(source[0]) +"_" + timestr + ".txt"
print name
print "Building database"
file = open(name, 'a+')
r = requests.get(str(source[1]),
headers=create_basic_headers(),
proxies={'http': HTTP_PROXY, 'https': HTTPS_PROXY})
for line in r:
if line.startswith("/") or line.startswith('\n') or line.startswith("#"):
pass
else:
file.write(line+'\n')
os.chdir("..")
| {
"repo_name": "abhinavbom/Threat-Intelligence-Hunter",
"path": "lib/updatefeed.py",
"copies": "1",
"size": "1127",
"license": "mit",
"hash": -264536217843432740,
"line_mean": 28.6578947368,
"line_max": 85,
"alpha_frac": 0.5359361136,
"autogenerated": false,
"ratio": 3.6237942122186495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46597303258186495,
"avg_score": null,
"num_lines": null
} |
__author__ = '@abhinavbom a.k.a Darkl0rd'
###List of known IPs from various sources###
#Emerging Threats Community
emerging_threat_compromisedIP = "http://rules.emergingthreats.net/blockrules/compromised-ips.txt"
emerging_threat_blockedIP = "http://rules.emergingthreats.net/fwrules/emerging-Block-IPs.txt"
emerging_threat_russian_IP = "http://doc.emergingthreats.net/pub/Main/RussianBusinessNetwork/RussianBusinessNetworkIPs.txt"
emerging_threat_malvertisers = "http://doc.emergingthreats.net/pub/Main/RussianBusinessNetwork/emerging-rbn-malvertisers.txt"
#Project Honeynet
honeypot_ip = "http://www.projecthoneypot.org/list_of_ips.php?rss=1"
#SANS Institute
sans_ip = "https://isc.sans.edu/ipsascii.html?limit=2000"
#Blocklist.de
block_list = "http://www.blocklist.de/lists/all.txt"
#AlienVault IP Reputation
alien = "https://reputation.alienvault.com/reputation.generic"
#Abuse.ch zeus tracker
zeus_tracker = "https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist"
#Malc0de Blacklist IPs
malcode_ip = "http://malc0de.com/bl/IP_Blacklist.txt"
#Malware Domain List (MDL) Malware C&C IPs
mdl_ip = "http://www.malwaredomainlist.com/hostslist/ip.txt"
#TALOS IP Blacklist
talos_ip = "http://talosintel.com/files/additional_resources/ips_blacklist/ip-filter.blf"
#CI Army Bad IPs
ci_army_ip = "http://www.ciarmy.com/list/ci-badguys.txt"
#NoThink.org Honeypot Project By Matteo Cantoni
nothink_dns = "http://www.nothink.org/blacklist/blacklist_malware_dns.txt"
nothink_http = "http://www.nothink.org/blacklist/blacklist_malware_http.txt"
nothink_irc = "http://www.nothink.org/blacklist/blacklist_malware_irc.txt"
nothink_ssh = "http://www.nothink.org/blacklist/blacklist_ssh_all.txt"
#TOR exit nodes
tor_exit_nodes = "https://check.torproject.org/exit-addresses"
#korean & Chinese Spam IP feed
korean_ip = "http://www.okean.com/korea.txt"
#Bad IPs listr for last 1000 hours
bad_ip = "http://www.badips.com/get/list/apache-wordpress/0?age=1000h"
###List of Domains goes here ###
#Open-phish
open_pish = "https://openphish.com/feed.txt"
#Joewein Blacklist
joewein_domains = 'http://www.joewein.net/dl/bl/dom-bl.txt'
###Ocasionally updated feeds for Email blacklisting###
#Joewein Blacklist
joewein_email = 'http://www.joewein.net/dl/bl/from-bl.txt'
OSINT_IP = {
"Emerging Threat Compromised IP Feed": emerging_threat_compromisedIP,
"Emerging Threat Blocked IP feed": emerging_threat_blockedIP,
"Emerging Threat Russian Business Network IP feed": emerging_threat_russian_IP,
"Emerging threat Malvertisement Network": emerging_threat_malvertisers,
"Project HoneyNet IP feed": honeypot_ip,
"SANS IP feed": sans_ip,
"BlockList.de IP feed": block_list,
"AlienVault IP reputation feed": alien,
"Abuse.ch Zeus Tracker IP feed": zeus_tracker,
"Malc0de Blacklist IP feed": malcode_ip,
"Malware Domain List C&C IPs": mdl_ip,
"Talos Blacklist IP feed": talos_ip,
"CI Army IP feed": ci_army_ip,
"Nothink.org Honeypot DNS IPs": nothink_dns,
"Nothink.org Http CC IPs": nothink_http,
"Nothink.org IRC bot IPs": nothink_irc,
"Nothink.org SSH bruteforce IPs": nothink_ssh,
"TOR Exit node IP": tor_exit_nodes,
"Korean & Chinese Spam IP feed": korean_ip,
"Bad-IPs DB for last 1000 hours": bad_ip
}
OSINT_URL = {
"Open Phish DB for Phishing Websites": open_pish,
"Joe Wein domain blacklist": joewein_domains
}
OSINT_EMAIL = {
"Joe Wein Email blacklist": joewein_email
}
| {
"repo_name": "abhinavbom/Threat-Intelligence-Hunter",
"path": "lib/feeds.py",
"copies": "1",
"size": "3472",
"license": "mit",
"hash": 5653165215396957000,
"line_mean": 34.0707070707,
"line_max": 125,
"alpha_frac": 0.7373271889,
"autogenerated": false,
"ratio": 2.5968586387434556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38341858276434554,
"avg_score": null,
"num_lines": null
} |
__author__ = '@abhinavbom a.k.a Darkl0rd'
#local import
from lib.feeds import *
#stdlib import
import sys
import requests
# for traffic inspection from eclipse only
DEBUG=False
# Proxy Support. Added local proxy for debugging purpose.
if DEBUG:
HTTP_PROXY = '127.0.0.1:80'
HTTPS_PROXY = '127.0.0.1:443'
# Add your own proxy server to pass traffic through it
else:
HTTP_PROXY = '' # Enter your proxy address
HTTPS_PROXY = HTTP_PROXY #enter HTTPS proxy address(remove the assigned HTTPS_PROXY variable)
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'
#Creating basic HTTP headers to avoid traffic being dropped by filters.
def create_basic_headers():
headers = {}
headers['User-Agent'] = USER_AGENT
headers['Accept-Language'] = 'en-US,en;q=0.5'
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
return headers
# parse function calls feeds.sources and traverses each of them to look for the input vector.
def regex(ioc_type):
ioc_patts = {
"ip":"((?:(?:[12]\d?\d?|[1-9]\d|[1-9])(?:\[\.\]|\.)){3}(?:[12]\d?\d?|[\d+]{1,2}))",
"domain":"([A-Za-z0-9]+(?:[\-|\.][A-Za-z0-9]+)*(?:\[\.\]|\.)(?:com|net|edu|ru|org|de|uk|jp|br|pl|info|fr|it|cn|in|su|pw|biz|co|eu|nl|kr|me))",
"md5":"\W([A-Fa-f0-9]{32})(?:\W|$)",
"sha1":"\W([A-Fa-f0-9]{40})(?:\W|$)",
"sha256":"\W([A-Fa-f0-9]{64})(?:\W|$)",
"email":"[a-zA-Z0-9_]+(?:\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?!([a-zA-Z0-9]*\.[a-zA-Z0-9]*\.[a-zA-Z0-9]*\.))(?:[A-Za-z0-9](?:[a-zA-Z0-9-]*[A-Za-z0-9])?\.)+[a-zA-Z0-9](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?",
"URL":"((?:http|ftp|https)\:\/\/(?:[\w+?\.\w+])+[a-zA-Z0-9\~\!\@\#\$\%\^\&\*\(\)_\-\=\+\\\/\?\.\:\;]+)",
}
try:
pattern = re.compile(ioc_patts[ioc_type])
except re.error:
print '[!] Invalid type specified.'
sys.exit(0)
return pattern
def url_param():
params={}
return params
def connect(url,params):
print "Connecting with", url
try:
r = requests.get(url,
headers = create_basic_headers(),
proxies = {'http': HTTP_PROXY, 'https': HTTPS_PROXY},
params=url_param())
return r
except Exception as exc:
sys.stdout.write('[!] Could not connect to: %s\n' %url)
# sys.stdout.write('Exception: %s' % exc)
def parse_ip(ip):
counter = 0
ioc_list = []
for filename, source in OSINT_IP.iteritems():
c = connect(source,params=url_param())
for line in c:
if line.startswith("/") or line.startswith('\n') or line.startswith("#"):
pass
else:
counter += 1
d = 0
while d < len(ip):
if ip[d] in line:
print "************"
print "IP {} match found under {} Feed".format(ip[d], source)
print "************"
d +=1
print "Total scanned indicators", counter
def parse_ipList(list):
counter = 0
ioc_list = []
for filename, source in OSINT_ip.iteritems():
c = connect(source)
list = open("items.txt", "r")
for items in list.readlines():
for line in c:
if line.startswith("/") or line.startswith('\n') or line.startswith("#"):
pass
else:
counter += 1
if items in line:
print items, filename
print "Total scanned indicators", counter
| {
"repo_name": "abhinavbom/Threat-Intelligence-Hunter",
"path": "lib/parse.py",
"copies": "1",
"size": "3632",
"license": "mit",
"hash": 347241824934938500,
"line_mean": 32.9439252336,
"line_max": 206,
"alpha_frac": 0.5123898678,
"autogenerated": false,
"ratio": 3.1310344827586207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9072779280318815,
"avg_score": 0.01412901404796123,
"num_lines": 107
} |
__author__ = '@abhinavbom a.k.a Darkl0rd'
#local imports
from lib.parse import *
from api.vt import *
from api.urlvoid import *
from lib.updatefeed import gather
#stdlib imports
import argparse
banner = ''' Threat Intelligence Hunter framework Begins now '''
print banner
def main():
print "Intel test"
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-ip', type=str, nargs='+', help="Search for a"
"single IP through OSINT threat feeds")
parser.add_argument('-list', type=str, nargs='?', help="Search for a list of IP vectors. Accepted formats is .txt")
parser.add_argument('-md5', type=str, nargs='+', help="Search for a single of space separated multiple MD5s. "
"This search is performed on Virustotal hence only 4 searches "
"per minute is allowed. Please add your public key to vt.py")
parser.add_argument('-url', type=str, nargs='+', help="Search for a single of space separated multiple urls. "
"This search is performed on Virustotal hence only 4 searches "
"per minute is allowed. Please add your public key to bin/vt.py")
parser.add_argument('-repo', type=str, nargs='+', help="Search for the reputation of a list of URLs. The script"
"accepts comma separated list of domains or IPs and searches it"
"against popular reputation tools like URLVoid, Bluecoat etc.")
parser.add_argument('-update', action='store_true', help='Update the local storage of feeds data.')
args = parser.parse_args()
if args.ip:
if len(args.ip) > 4:
print "Too many argument values specified. Maximum arguments per minute is 4."
sys.exit(1)
parse_ip(args.ip)
vt_ip(args.ip)
if args.list:
parse_ipList(list)
if args.md5:
if len(args.md5) > 4:
print "Too many argument values specified. Maximum arguments per minute is 4."
sys.exit(1)
vt_md5(args.md5)
if args.url:
if len(args.url) > 4:
print "Too many argument values specified. Maximum arguments per minute is 4."
sys.exit(1)
vt_url(args.url)
if args.repo:
urlvoid(args.repo)
if args.update:
print "updating"
gather()
if __name__ == '__main__':
main()
| {
"repo_name": "abhinavbom/Threat-Intelligence-Hunter",
"path": "tih.py",
"copies": "1",
"size": "2680",
"license": "mit",
"hash": 1227851610080421600,
"line_mean": 42.2258064516,
"line_max": 123,
"alpha_frac": 0.5630597015,
"autogenerated": false,
"ratio": 4.3156199677938805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.537867966929388,
"avg_score": null,
"num_lines": null
} |
import schedule
from django.core.mail import send_mail, send_mass_mail
from haikunator import haikunator
from twilio.rest import TwilioRestClient
from datetime import datetime, timedelta
import time
import _thread
import os
from log.Log import log
from users.models import MemberUser
sys_email = 'hex.omega@yandex.com'
def get_default_password():
h = haikunator.Haikunator()
return h.haikunate()
def mail_kickoff(*args, var=1, **kwargs):
if var is 1:
_thread.start_new_thread(send_default_password_threads, (args[0], args[1]))
elif var is 2:
_thread.start_new_thread(send_reminder_threads, (args[0],))
elif var is 3:
_thread.start_new_thread(send_upload_update, (args[0], args[1]))
def send_upload_update(user, task):
subject = 'Upload Update - ' + task.title
body = 'Deliverable for task:[{}] has been uploaded by {}'.format(task.title, user.get_full_name)
l = [m.email for m in task.members.all()]
l.append(task.action_list.project.leader.email)
l.extend([a.email for a in task.action_list.project.admins.all()])
send_mail(
subject,
body,
sys_email,
l,
fail_silently=False
)
def send_default_password_threads(user, password, **kwargs):
subject = 'Password - Hex Omega Systems'
body = 'Password for {}({}) is {}\n\n'.format(user.get_full_name(), user.username, password)
print(password)
body += 'Please login and change your password, under Profile->Edit Profile.\n'
send_mail(
subject,
body,
sys_email,
[user.email],
fail_silently=False
)
def start_schedule_thread():
print('Yo')
_thread.start_new_thread(tasks_email_schedule, ())
def tasks_email_schedule():
from users.models import Project
print('Haha')
for project in Project.objects.all():
print(project.name)
send_mail(
'test_scheduler',
'This is a test for the scheduler. \nPlease ignore.\n' + project.name,
sys_email,
['defstryker@hotmail.com'],
fail_silently=False
)
lp = []
for task in project.actionlist.task_set.all():
if task.est_end.date() < datetime.now().date():
for m in task.members.all():
log('WARNING', m, '{} is late in submission of deliverable for {}'.format(m.username, task.title))
if task.to_leader is True:
log('WARNING', task.action_list.project.leader,
'{} is late in submission of deliverable for {}'.format(
task.action_list.project.leader.username, task.title))
if task.est_end.date() - timedelta(days=1) == datetime.now().date():
if task.status is not 'Completed' and task.status is not 'Unassigned':
l = [member.email for member in task.members.all() if member.email is not '']
if task.to_leader:
l.append(task.action_list.project.leader.email)
sub = task.action_list.project.name + ' : ' + task.title
msg = 'This is an automated reminder to submit your deliverable before tomorrow.\n\n'
msg += 'Please do not reply to this mail.'
t = (sub, msg, sys_email, l)
print(t, file=open('mass_mail_log.txt', 'w+'))
lp.append(t)
if len(lp) is not 0:
mail_kickoff(lp, var=2)
print(lp, file=open('tasks.txt', 'w+'))
def send_reminder_threads(mails, **kwargs):
send_mass_mail(
tuple(mails),
fail_silently=False
)
def uploaded_file_handler(f, path):
with open(os.path.join(path, f.name), 'wb') as destination:
for chunk in f.chunks():
destination.write(chunk)
def project_deadline_schedule():
from users.models import Project
print('Haha')
for project in Project.objects.all():
print(project.name)
send_mail(
'test_scheduler',
'This is a test for the scheduler. \nPlease ignore.\n' + project.name,
sys_email,
['defstryker@hotmail.com'],
fail_silently=False
)
lp = []
if project.end_date == datetime.now() + timedelta(days=2):
l = [member.email for member in MemberUser.objects.filter(project__name=project.name)]
l.append(project.leader.email)
sub = project.name + ' Project Closing Notice'
msg = 'This is an automated alert. This project will be closed in two days.\n\n'
msg += 'Please do not reply to this mail.'
t = (sub, msg, sys_email, l)
print(t, file=open('mass_mail_log.txt', 'w+'))
lp.append(t)
if project.end_date.date() == datetime.now().date():
project.status = '1'
project.save()
if len(lp) is not 0:
mail_kickoff(lp, var=2)
print(lp, file=open('tasks.txt', 'w+'))
| {
"repo_name": "defstryker/Hex-Omega",
"path": "users/utils.py",
"copies": "1",
"size": "5129",
"license": "mit",
"hash": 3445194616962332000,
"line_mean": 34.1301369863,
"line_max": 118,
"alpha_frac": 0.5810099435,
"autogenerated": false,
"ratio": 3.687275341480949,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9761189660370572,
"avg_score": 0.0014191249220754452,
"num_lines": 146
} |
import os
import sqlite3
import datetime
def get_conn(db_file_name, printing=True):
#makes a new file if it does not exist
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) #gets direcotry path in which file is stored.
db_path = os.path.join(BASE_DIR, db_file_name)
with sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES) as conn:
#souce for "detect_types=sqlite3.PARSE_DECLTYPES" is:
#http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
if printing:
print("\n\tOpened connection successfully to database:\n\t%s"%db_path)
return conn
return None
# def getConn(dbFilePath):
# with sqlite3.connect(dbFilePath, detect_types=sqlite3.PARSE_DECLTYPES) as conn:
# #souce for "detect_types=sqlite3.PARSE_DECLTYPES" is:
# #http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
# print "\t\tOpened connection successfully"
# return conn
# return None
class Table:
def __init__ (self, input_attributes, input_table):
self.table=input_table
self.attributes=input_attributes
def __len__(self):
return len(self.table)
def __getitem__(self,i):
'''
works for 2D or 3D or any-D yay!
works because if a[i][j][k], a[i] returns a tuple, for the ith row. Let, row=a[i].
Then, a[i][j][k] becomes row[j][k]. We start call the function again, to get the column entry.
'''
# print type(self)
if type(i)==int:
return self.table[i]
elif type(i)==str:
#assume that they are searching by column, i.e.
#table['col_name']
#this allows access by column and then row
ind=self.attributes.index(i)
col=[]
for row_no in range(0, len(self.table)-1):
col.append(self.table[row_no][ind])
return tuple(col)
def build_where_clause(where_params_list, where_values_list):
if where_params_list!=None and where_values_list!=None:
where_clause=" WHERE "
where_clause+=" %s='%s' "%(str(where_params_list[0]), str(where_values_list[0]))
for i in range(1,len(where_values_list)):
where_clause+=" AND %s='%s' "%(str(where_params_list[i]), str(where_values_list[i]))
else :
where_clause=""
return where_clause
def build_select_query(tablename, select_params_list, where_params_list=None, where_values_list=None):
select_query="SELECT "
select_query+=" %s"%select_params_list[0]
for i in range(1,len(select_params_list)):
select_query+=", %s"%select_params_list[i]
select_query+=" FROM `%s` "%tablename
select_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
select_query+=";"
return select_query
def build_update_query(tablename, update_params_list, update_values_list, where_params_list=None, where_values_list=None):
update_query="UPDATE `"+tablename+"` SET "
update_query+=" %s='%s' "%(str(update_params_list[0]), str(update_values_list[0]))
for i in range(1,len(update_values_list)):
update_query+=", %s='%s' "%(str(update_params_list[i]), str(update_values_list[i]))
update_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
update_query+=";"
return update_query
def build_insert_query(tablename, insert_params_list, tuple_values_list):
insert_query="INSERT INTO `%s`(" %tablename+"%s"%insert_params_list[0]
# print insert_query
for param in insert_params_list:
if insert_params_list[0]!= param:
insert_query+=", %s"%param
insert_query+=") VALUES "
#print insert_query
insert_query+="\n('%s'"%tuple_values_list[0][0]
for j in range(1,len(tuple_values_list[0])):
insert_query+=" ,'%s'"%tuple_values_list[0][j]
insert_query+=")"
for i in range(1,len(tuple_values_list)):
insert_query+=",\n('%s'"%tuple_values_list[i][0]
for j in range(1,len(tuple_values_list[i])):
insert_query+=" ,'%s'"%tuple_values_list[i][j]
insert_query+=";"
# print insert_query
return insert_query
def build_date(d, m, y):
return datetime.date(y,m,d)
def build_date2(day, month, year):
return datetime.date(year,month,day)
""" <---------------THE CORRECT WAY TO HANDLE DATES IN SQLITE3 with sqliteDefaults------------------>
#Create a random table
conn.execute('''Create table if not exists person(
ID INTEGER PRIMARY KEY,
Name TEXT,
DOB DATE
);
''')
conn.commit()
#Insert values into the table in one of the accepted formats
sqliteDefaults.insert_table_sqlite(conn,
'person',
('ID', 'Name', 'DOB'),
[
(1, 'Bob', sqliteDefaults.build_date(07,10,1999) ),
(2, 'John', sqliteDefaults.build_date(y=2005,m=8,d=21) ),
(3, 'Stacy', sqliteDefaults.build_date2(month=6,day=25,year=2003)),
(4, 'Emma', datetime.date(2001, 10, 27) )
]
)
#Source: http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
table=sqliteDefaults.verified_select_sqlite(conn,"select * from `person` order by DOB desc;")
for row in table:
print row
#OUTPUT:
#(2, u'John', datetime.date(2005, 8, 21))
#(3, u'Stacy', datetime.date(2003, 6, 25))
#(4, u'Emma', datetime.date(2001, 10, 27))
#(1, u'Bob', datetime.date(1999, 10, 7))
print table[2][2].day
#OUTPUT:
# 27
#We can now compare the values as we do normal datetime objects: with > and <, etc
i=1; j=2;
if table[i][2]<table[j][2]:
print "%s is older than %s"%(table[i][1], table[j][1])
elif table[j][2]<table[i][2]:
print "%s is older than %s"%(table[j][1], table[i][1])
#OUTPUT:
# Emma is older than Stacy
"""
def insert_table_sqlite(conn, tablename, insert_params_list, tuple_values_list, commit=True, printing_debug=False):
insert_query= build_insert_query(tablename=tablename, insert_params_list=insert_params_list, tuple_values_list=tuple_values_list)
if printing_debug:
print("\n\n\n\tSQLITE INSERTION QUERY:\n%s\n\n"%insert_query)
cursor=conn.cursor()
cursor.execute(insert_query)
if commit:
conn.commit()
# database_in_use(conn)
def insert_table_sqlite2(conn, tablename, parameters_tuple=(), tuple_values_list=[], commit=True, print_query=False):
if tuple_values_list==[]:
print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: tuple_value_list cannot be empty")
return
query=""
if parameters_tuple==():
query="INSERT INTO `%s` VALUES " %(tablename);
else:
query="INSERT INTO `%s` %s VALUES" %(tablename, parameters_tuple);
#else:
#print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: parameters_tuple must be a tuple")
query=query+"(?" + (",?"*(len(parameters_tuple)-1)) + ")" #source: https://docs.python.org/2/library/sqlite3.html
if print_query:
print query
conn.executemany(query, tuple_values_list)
if commit:
conn.commit()
def verified_select_sqlite(conn, select_query, fetch="all", printing=True):
'''This function verifies that the entered query is a valid select query (to prevent SQL injection).
If it is, it executes it and gets the table object. It returns None if the table is Empty, and prints an ERROR.
If the table is non-empty, it returns the table object.'''
if 'select' in select_query.lower():
temp = select_query.strip()
if not ';' in temp:
temp+=';'
# print temp
if temp.index(';') == (len(temp)-1):
cursor=conn.cursor()
cursor.execute(temp)
attributes=[]
for i in cursor.description:
attributes.append(i[0])
result_table=()
if fetch.lower()=="all":
result_table=cursor.fetchall()
elif fetch.lower()=="one":
result_table=cursor.fetchone()
else:
if printing:
print "verified_select() ERROR: Improper value '%s' passed to argument 'fetch'"%fetch
return None
if result_table is ():
if printing:
print 'verified_select() ERROR: Empty table'
return None
return Table(input_table=result_table, input_attributes=attributes)
else:
if printing:
print 'verified_select() ERROR: Only one query can be fired at a time'
else:
if printing:
print 'verified_select() ERROR: Only select queries can be executed'
def print_table(conn, select_query):
table = verified_select_sqlite(conn, select_query, printing=False)
if table is not None:
print '\n\n----------------------------------------------------------------'
for row in table:
print '\n'
for i in range(0,len(row)):
print row[i],"\t\t",
print '\n\n----------------------------------------------------------------\n'
def list_all_tables(db_file_name):
conn=get_conn(db_file_name)
print_table(conn,"select name from `sqlite_master` where type = 'table';")
'''
print("\n\n<------------TEST CODE----------->\n")
def select_table_sqlite(conn, tablename, parameters_tuple=(), where_string="", order_by_string=""):
query=""
if parameters_tuple==():
query="SELECT * FROM `%s`"%(tablename)
elif type(parameters_tuple)=="tuple":
query="SELECT %s FROM `%s`"%(parameters_tuple, tablename)
else:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: parameters_tuple must be a tuple")
if where_string!="":
query=query+" WHERE "+where_string
elif where_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: where_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
if order_by_string!="":
query=query+" ORDER BY "+order_by_string
elif order_by_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: order_by_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
query=query+";"
table=conn.execute(query)
print type(table)
for row in table:
print type(row)
print row
print("\n<---------END OF TEST CODE-------->\n")
'''
| {
"repo_name": "ARDivekar/SearchDistribute",
"path": "SearchDistribute/sqliteDefaults.py",
"copies": "1",
"size": "9906",
"license": "mit",
"hash": 339821817094511170,
"line_mean": 29.2492113565,
"line_max": 140,
"alpha_frac": 0.6476882697,
"autogenerated": false,
"ratio": 2.9765625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41242507697,
"avg_score": null,
"num_lines": null
} |
import os
import sqlite3
import datetime
def get_conn(db_file_name):
#makes a new file if it does not exist
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) #gets direcotry path in which file is stored.
db_path = os.path.join(BASE_DIR, db_file_name)
with sqlite3.connect(db_path, detect_types=sqlite3.PARSE_DECLTYPES) as conn:
#souce for "detect_types=sqlite3.PARSE_DECLTYPES" is:
#http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
print "\t\tOpened connection successfully"
return conn
return None
class Table:
def __init__ (self, input_attributes, input_table):
self.table=input_table
self.attributes=input_attributes
def __len__(self):
return len(self.table)
def __getitem__(self,i):
'''
works for 2D or 3D or any-D yay!
works because if a[i][j][k], a[i] returns a tuple, for the ith row. Let, row=a[i].
Then, a[i][j][k] becomes row[j][k]. We start call the function again, to get the column entry.
'''
# print type(self)
if type(i)==int:
return self.table[i]
elif type(i)==str:
#assume that they are searching by column, i.e.
#table['col_name']
#this allows access by column and then row
ind=self.attributes.index(i)
col=[]
for row_no in range(0, len(self.table)-1):
col.append(self.table[row_no][ind])
return tuple(col)
def build_where_clause(where_params_list, where_values_list):
if where_params_list!=None and where_values_list!=None:
where_clause=" WHERE "
where_clause+=" %s='%s' "%(str(where_params_list[0]), str(where_values_list[0]))
for i in range(1,len(where_values_list)):
where_clause+=" AND %s='%s' "%(str(where_params_list[i]), str(where_values_list[i]))
else :
where_clause=""
return where_clause
def build_select_query(tablename, select_params_list, where_params_list=None, where_values_list=None):
select_query="SELECT "
select_query+=" %s"%select_params_list[0]
for i in range(1,len(select_params_list)):
select_query+=", %s"%select_params_list[i]
select_query+=" FROM %s "%tablename
select_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
select_query+=";"
return select_query
def build_update_query(tablename, update_params_list, update_values_list, where_params_list=None, where_values_list=None):
update_query="UPDATE "+tablename+" SET "
update_query+=" %s='%s' "%(str(update_params_list[0]), str(update_values_list[0]))
for i in range(1,len(update_values_list)):
update_query+=", %s='%s' "%(str(update_params_list[i]), str(update_values_list[i]))
update_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
update_query+=";"
return update_query
def build_insert_query(tablename, insert_params_list, tuple_values_list):
insert_query="INSERT INTO %s(" %tablename+"%s"%insert_params_list[0]
# print insert_query
for param in insert_params_list:
if insert_params_list[0]!= param:
insert_query+=", %s"%param
insert_query+=") VALUES "
#print insert_query
insert_query+="\n('%s'"%tuple_values_list[0][0]
for j in range(1,len(tuple_values_list[0])):
insert_query+=" ,'%s'"%tuple_values_list[0][j]
insert_query+=")"
for i in range(1,len(tuple_values_list)):
insert_query+=",\n('%s'"%tuple_values_list[i][0]
for j in range(1,len(tuple_values_list[i])):
insert_query+=" ,'%s'"%tuple_values_list[i][j]
insert_query+=";"
# print insert_query
return insert_query
def build_date(d, m, y):
return datetime.date(y,m,d)
def build_date2(day, month, year):
return datetime.date(year,month,day)
""" <---------------THE CORRECT WAY TO HANDLE DATES IN SQLITE3 with sqliteDefaults------------------>
#Create a random table
conn.execute('''Create table if not exists person(
ID INTEGER PRIMARY KEY,
Name TEXT,
DOB DATE
);
''')
conn.commit()
#Insert values into the table in one of the accepted formats
sqliteDefaults.insert_table_sqlite(conn,
'person',
('ID', 'Name', 'DOB'),
[
(1, 'Bob', sqliteDefaults.build_date(07,10,1999) ),
(2, 'John', sqliteDefaults.build_date(y=2005,m=8,d=21) ),
(3, 'Stacy', sqliteDefaults.build_date2(month=6,day=25,year=2003)),
(4, 'Emma', datetime.date(2001, 10, 27) )
]
)
#Source: http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
table=sqliteDefaults.verified_select_sqlite(conn,"select * from person order by DOB desc;")
for row in table:
print row
#OUTPUT:
#(2, u'John', datetime.date(2005, 8, 21))
#(3, u'Stacy', datetime.date(2003, 6, 25))
#(4, u'Emma', datetime.date(2001, 10, 27))
#(1, u'Bob', datetime.date(1999, 10, 7))
print table[2][2].day
#OUTPUT:
# 27
#We can now compare the values as we do normal datetime objects: with > and <, etc
i=1; j=2;
if table[i][2]<table[j][2]:
print "%s is older than %s"%(table[i][1], table[j][1])
elif table[j][2]<table[i][2]:
print "%s is older than %s"%(table[j][1], table[i][1])
#OUTPUT:
# Emma is older than Stacy
"""
def insert_table_sqlite(conn, tablename, insert_params_list, tuple_values_list, commit=True):
insert_query= build_insert_query(tablename=tablename, insert_params_list=insert_params_list, tuple_values_list=tuple_values_list)
# print insert_query
cursor=conn.cursor()
cursor.execute(insert_query)
if commit:
conn.commit()
# database_in_use(conn)
def insert_table_sqlite2(conn, tablename, parameters_tuple=(), tuple_values_list=[], commit=True, print_query=False):
if tuple_values_list==[]:
print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: tuple_value_list cannot be empty")
return
query=""
if parameters_tuple==():
query="INSERT INTO %s VALUES " %(tablename);
else:
query="INSERT INTO %s %s VALUES" %(tablename, parameters_tuple);
#else:
#print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: parameters_tuple must be a tuple")
query=query+"(?" + (",?"*(len(parameters_tuple)-1)) + ")" #source: https://docs.python.org/2/library/sqlite3.html
if print_query:
print query
conn.executemany(query, tuple_values_list)
if commit:
conn.commit()
def verified_select_sqlite(conn, select_query, fetch="all", printing=True):
'''This function verifies that the entered query is a valid select query (to prevent SQL injection).
If it is, it executes it and gets the table object. It returns None if the table is Empty, and prints an ERROR.
If the table is non-empty, it returns the table object.'''
if 'select' in select_query.lower():
temp = select_query.strip()
if not ';' in temp:
temp+=';'
# print temp
if temp.index(';') == (len(temp)-1):
cursor=conn.cursor()
cursor.execute(temp)
attributes=[]
for i in cursor.description:
attributes.append(i[0])
result_table=()
if fetch.lower()=="all":
result_table=cursor.fetchall()
elif fetch.lower()=="one":
result_table=cursor.fetchone()
else:
if printing:
print "verified_select() ERROR: Improper value '%s' passed to argument 'fetch'"%fetch
return None
if result_table is ():
if printing:
print 'verified_select() ERROR: Empty table'
return None
return Table(input_table=result_table, input_attributes=attributes)
else:
if printing:
print 'verified_select() ERROR: Only one query can be fired at a time'
else:
if printing:
print 'verified_select() ERROR: Only select queries can be executed'
def print_table(conn, select_query):
table = verified_select_sqlite(conn, select_query, printing=False)
if table is not None:
print '\n\n----------------------------------------------------------------'
for row in table:
print '\n'
for i in range(0,len(row)):
print row[i],"\t\t",
print '\n\n----------------------------------------------------------------\n'
def list_all_tables(db_file_name):
conn=get_conn(db_file_name)
print_table(conn,"select name from sqlite_master where type = 'table';")
'''
print("\n\n<------------TEST CODE----------->\n")
def select_table_sqlite(conn, tablename, parameters_tuple=(), where_string="", order_by_string=""):
query=""
if parameters_tuple==():
query="SELECT * FROM %s"%(tablename)
elif type(parameters_tuple)=="tuple":
query="SELECT %s FROM %s"%(parameters_tuple, tablename)
else:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: parameters_tuple must be a tuple")
if where_string!="":
query=query+" WHERE "+where_string
elif where_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: where_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
if order_by_string!="":
query=query+" ORDER BY "+order_by_string
elif order_by_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: order_by_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
query=query+";"
table=conn.execute(query)
print type(table)
for row in table:
print type(row)
print row
print("\n<---------END OF TEST CODE-------->\n")
'''
| {
"repo_name": "ARDivekar/SearchDistribute",
"path": "other/Legacy/sqliteDefaults.py",
"copies": "1",
"size": "9396",
"license": "mit",
"hash": 8178793539901393000,
"line_mean": 28.6058631922,
"line_max": 140,
"alpha_frac": 0.6454874415,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41454874415,
"avg_score": null,
"num_lines": null
} |
import hashlib
import hmac
import json
import sys
import time
import urllib
import urllib2
import bitcoin
from bitcoin.secrets import api
def warning(msg):
"""
Method for printing a warning to stderr.
"""
sys.stderr.write("WARNING: " + msg + "\n")
def credentials():
"""
This method returns all of the credentials that must be send in the POST body of API requests to Bitstamp for authentication.
"""
creds = {}
nonce = str(int(time.time() * 1e6)) # We use the Unix timestamp in microseconds converted to a string as the nonce.
key = api['key']
message = nonce + api['client_id'] + key
signature = hmac.new(api['secret'], msg=message, digestmod=hashlib.sha256).hexdigest().upper()
creds['key'] = key
creds['nonce'] = nonce
creds['signature'] = signature
return creds
def current_price():
"""
Fetch the current buy and sell prices.
"""
url = "https://www.bitstamp.net/api/ticker/"
response = json.load(urllib2.urlopen(url))
data = {'buy': response["ask"], 'sell': response["bid"]}
return data
def balance():
"""
Fetch the BTC and USD balance in the account.
"""
url = "https://www.bitstamp.net/api/balance/"
return request(url)
def btc():
"""
Returns btc balance as a Float.
"""
return float(balance()['btc_balance'])
def usd():
"""
Returns usd balance as a Float.
"""
return float(balance()['usd_balance'])
def transactions():
"""
Fetch the User Transaction history.
"""
url = "https://www.bitstamp.net/api/user_transactions/"
return request(url)
def open_orders():
"""
Fetch all currently open orders.
"""
url = "https://www.bitstamp.net/api/open_orders/"
return request(url)
def cancel_order(id):
"""
Cancel the order with the specified id.
"""
url = "https://www.bitstamp.net/api/cancel_order/"
return request(url, {'id': id})
def cancel_all_orders():
"""
Cancels ALL open orders.
"""
data = open_orders()
for datum in data:
cancel_order(datum['id'])
def buy_order(amount, price):
"""
Create a Buy Limit order.
"""
url = "https://www.bitstamp.net/api/buy/"
return request(url, {'amount': amount, 'price': price})
def sell_order(amount, price):
"""
Create a Sell Limit order.
"""
url = "https://www.bitstamp.net/api/sell/"
return request(url, {'amount': amount, 'price': price})
def buy_for_usd(usd):
"""
Buys BTC at the current buy price for the specified amount inclusive of fees charged. So the exact amount specified
is bought.
"""
buy_price = float(current_price()['buy']) # Fetch current buy price
usd_buy_order(usd, buy_price)
def usd_buy_order(usd, price):
"""
Places a Buy Order for BTC at the specified price and such that the BTC bought costs the specified amount of USD,
taking the fee in to account.
So you can place a buy order for $1200 worth of BTC at $510.
"""
fee = float(balance()['fee'])
amount = bitcoin.adjusted_usd_amount(usd, fee)
btc = bitcoin.chop_btc(amount / price)
print("Buying {} btc at ${} at a cost of ${}".format(btc, price, usd))
buy_order(btc, price)
def request(url, payload={}):
"""
Uses the BitStamp REST API to POST a request and get the response back as a Python Dictionary.
We pass in a dictionary payload containing data above and beyond the credentials.
"""
pd = credentials() # Initial payload is the credentials dictionary
pd.update(payload) # We add the passed in dictionary to the data object we send in the request.
data = urllib.urlencode( pd )
fin = urllib2.urlopen(url, data)
jResponse = fin.readlines()
#print("{}\n{}\n".format(url, jResponse))
try:
response = json.loads( jResponse[0] )
except ValueError:
warning("{}\n{}\n".format(url, jResponse)) # Print warning and exit the program
sys.exit(1)
if type(response) == dict and 'error' in response.keys():
raise ClientException("API Error: " + str(response['error']))
return response
class ClientException(Exception):
"""
Custom exception raised when an error occurs during the client's operation.
"""
def __init__(self, message):
super(ClientException, self).__init__(message)
| {
"repo_name": "abid-mujtaba/BitcoinTracker-python",
"path": "bitcoin/client.py",
"copies": "1",
"size": "4576",
"license": "apache-2.0",
"hash": -4751010431611899000,
"line_mean": 20.3831775701,
"line_max": 129,
"alpha_frac": 0.6280594406,
"autogenerated": false,
"ratio": 3.7355102040816326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9857752241725581,
"avg_score": 0.001163480591210085,
"num_lines": 214
} |
import re
import redis
# Define the handle for the redis database
rds = redis.StrictRedis(host='localhost', port=6379, db=0)
def dump():
"""
Method for printing a dump of redis keys and corresponding values
"""
keys = rds.keys()
keys.sort()
for k in keys:
print("{}: {}".format(k, rds.get(k)))
def change():
"""
Method for listing and allowing the user to change ONE of the values in redis.
"""
reActive = re.compile("^active_.*")
keys = rds.keys()
keys = [k for k in keys if not reActive.match(k)]
keys.sort()
for ii in range(len(keys)): # Print keys with associated values indexed by an integer for choosing.
print("{} - {}: {}".format(ii, keys[ii], rds.get(keys[ii])))
jj = raw_input("\nIndex? ")
if len(jj) == 0: return # If an empty string (simply an ENTER has been pressed) it indicates no choice was made and so we exit
jj = int(jj)
v = raw_input("\n{} [{}]? ".format(keys[jj], rds.get(keys[jj])))
if len(v) > 0: # If an empty string (simply an ENTER has been pressed) is present it means keep the default value
v = float(v)
rds.set(keys[jj], v)
def toggle():
"""
Method for listing and toggling the active flags of the decisions.
"""
reActive = re.compile("^active_.*")
keys = rds.keys()
keys = [k for k in keys if reActive.match(k)]
keys.sort()
for ii in range(len(keys)):
print("{} - {}: {}".format(ii, keys[ii], rds.get(keys[ii])))
jj = raw_input("\nIndex? ")
if len(jj) == 0: return
jj = int(jj)
v = rds.get(keys[jj]) == 'True' # Convert the string to boolean by performing a comparison
rds.set(keys[jj], not v) # Flip/Toggle the specified value
print("{} set to {}".format(keys[jj], not v))
def load():
"""
Method for loading default values in to redis. Used if redis database is ever cleared.
"""
rds.set(KEY_RISING_PEAK_ACTIVATION_THRESHOLD, 415)
rds.set(KEY_RISING_PEAK_UPPER_LIMIT_FACTOR, 1.01)
rds.set(KEY_RISING_PEAK_LOWER_LIMIT_FACTOR, 0.9925)
rds.set(KEY_FALLING_TRENCH_ACTIVATION_THRESHOLD, 410)
rds.set(KEY_FALLING_TRENCH_LOWER_LIMIT_FACTOR, 0.99)
rds.set(KEY_FALLING_TRENCH_UPPER_LIMIT_FACTOR, 1.0075)
rds.set(KEY_ABSOLUTE_ZERO_MINIMUM_THRESHOLD, 380)
rds.set(KEY_MINIMIZE_LOSS_DROP_FACTOR, 0.97)
rds.set(KEY_MIN_PROFIT_BAND_LOWER_FACTOR, 1.008)
rds.set(KEY_MIN_PROFIT_BAND_UPPER_FACTOR, 1.02)
rds.set(KEY_MIN_PROFIT_TRIGGER_THRESHOLD, 1.025)
rds.set(KEY_ACTIVE_ABSOLUTE_ZERO, False)
rds.set(KEY_ACTIVE_MINIMIZE_LOSS, False)
rds.set(KEY_ACTIVE_MINIMUM_PROFIT, False)
rds.set(KEY_ACTIVE_RISING_PEAK, False)
rds.set(KEY_ACTIVE_FALLING_TRENCH, False)
# Activation keys and functions
KEY_ACTIVE_ABSOLUTE_ZERO = "active_absolute_zero"
def active_absolute_zero(): return rds.get(KEY_ACTIVE_ABSOLUTE_ZERO) == 'True'
KEY_ACTIVE_MINIMIZE_LOSS = "active_minimize_loss"
def active_minimize_loss(): return rds.get(KEY_ACTIVE_MINIMIZE_LOSS) == 'True'
KEY_ACTIVE_MINIMUM_PROFIT = "active_min_profit"
def active_minimum_profit(): return rds.get(KEY_ACTIVE_MINIMUM_PROFIT) == 'True'
KEY_ACTIVE_RISING_PEAK = "active_rising_peak"
def active_rising_peak(): return rds.get(KEY_ACTIVE_RISING_PEAK) == 'True'
KEY_ACTIVE_FALLING_TRENCH = "active_falling_trench"
def active_falling_trench(): return rds.get(KEY_ACTIVE_FALLING_TRENCH) == 'True'
# Define the key and function for accessing Rising Peak Activiation Threshold
KEY_RISING_PEAK_ACTIVATION_THRESHOLD = "rising_peak_activiation_threshold"
KEY_RISING_PEAK_UPPER_LIMIT_FACTOR = "rising_peak_upper_limit_factor"
KEY_RISING_PEAK_LOWER_LIMIT_FACTOR = "rising_peak_lower_limit_factor"
def rising_peak_activiation_threshold(): return float(rds.get(KEY_RISING_PEAK_ACTIVATION_THRESHOLD))
def rising_peak_upper_limit_factor(): return float(rds.get(KEY_RISING_PEAK_UPPER_LIMIT_FACTOR))
def rising_peak_lower_limit_factor(): return float(rds.get(KEY_RISING_PEAK_LOWER_LIMIT_FACTOR))
# Define the key and function for accessing Falling Trench Activation Threshold
KEY_FALLING_TRENCH_ACTIVATION_THRESHOLD = "falling_trench_activation_threshold"
KEY_FALLING_TRENCH_LOWER_LIMIT_FACTOR = "falling_trench_lower_limit_factor"
KEY_FALLING_TRENCH_UPPER_LIMIT_FACTOR = "falling_trench_upper_limit_factor"
def falling_trench_activation_threshold(): return float(rds.get(KEY_FALLING_TRENCH_ACTIVATION_THRESHOLD))
def falling_trench_lower_limit_factor(): return float(rds.get(KEY_FALLING_TRENCH_LOWER_LIMIT_FACTOR))
def falling_trench_upper_limit_factor(): return float(rds.get(KEY_FALLING_TRENCH_UPPER_LIMIT_FACTOR))
# Define the key and function for accessing Absolute Zero Activation Threshold
KEY_ABSOLUTE_ZERO_MINIMUM_THRESHOLD = "absolute_zero_min_threshold"
def absolute_zero_min_threshold(): return float(rds.get(KEY_ABSOLUTE_ZERO_MINIMUM_THRESHOLD))
# Define the key and function for accessing Minimum Loss Drop Factor
KEY_MINIMIZE_LOSS_DROP_FACTOR = "minimize_loss_drop_factor"
def minimize_loss_drop_factor(): return float(rds.get(KEY_MINIMIZE_LOSS_DROP_FACTOR))
# Define the keys and functions for accessing Minimum Profit values:
KEY_MIN_PROFIT_BAND_UPPER_FACTOR = "min_profit_band_upper_factor"
KEY_MIN_PROFIT_BAND_LOWER_FACTOR = "min_profit_band_lower_factor"
KEY_MIN_PROFIT_TRIGGER_THRESHOLD = "min_profit_trigger_threshold"
def min_profit_band_upper_factor(): return float(rds.get(KEY_MIN_PROFIT_BAND_UPPER_FACTOR))
def min_profit_band_lower_factor(): return float(rds.get(KEY_MIN_PROFIT_BAND_LOWER_FACTOR))
def min_profit_trigger_threshold(): return float(rds.get(KEY_MIN_PROFIT_TRIGGER_THRESHOLD))
| {
"repo_name": "abid-mujtaba/BitcoinTracker-python",
"path": "bitcoin/redis_client.py",
"copies": "1",
"size": "5898",
"license": "apache-2.0",
"hash": -3180553644987290600,
"line_mean": 32.5113636364,
"line_max": 147,
"alpha_frac": 0.6993896236,
"autogenerated": false,
"ratio": 2.963819095477387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9142628315262045,
"avg_score": 0.0041160807630683215,
"num_lines": 176
} |
__author__ = 'abr'
import csv
import os
import subprocess
import tempfile
import sys
from scapetesting.govdocs1 import groundTruth
from scapetesting.toolevaluation.reportParser import ToolPrecisionTest
__author__ = 'abr'
print("running")
_groundTruth = groundTruth.groundTruths
identificationsKeyToMime = {}
#then we csv parse the report file
def fidoReportLoader(csvReportfile):
with open(csvReportfile, mode="rt") as fileThing:
reader = csv.DictReader(f=fileThing, fieldnames=["OK","PARENT_ID","URI","DESCRIPTION","FORMAT_NAME","SIZE","NAME","MIME","METHOD"], delimiter=",")
for line in reader:
filename = line["NAME"]
if filename is None or len(filename) == 0:
continue
mime = line["MIME"]
filename = os.path.basename(filename)
filename = filename.split(".")[0]
try:
key = int(filename)
except ValueError:
continue
existingMimes = identificationsKeyToMime.get(key,[])
existingMimes.append(mime)
identificationsKeyToMime[key] = existingMimes
#The parser operation just retrieve the entry from the parsed csv file
def fidoParser(datafile):
#print ("opening ", reportFile)
filename = datafile.split(".")[0]
key = int(filename)
mime = identificationsKeyToMime[key]
return mime
fidoEval = ToolPrecisionTest()
fidoEval.reportParser = fidoParser
fidoEval.verbal = True
| {
"repo_name": "openpreserve/Scape-Tool-Tester",
"path": "app/scapetesting/fidoEval/fidoParser.py",
"copies": "1",
"size": "1496",
"license": "apache-2.0",
"hash": -5566902154563313000,
"line_mean": 22.746031746,
"line_max": 154,
"alpha_frac": 0.6537433155,
"autogenerated": false,
"ratio": 3.8958333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5049576648833334,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abuddenberg'
from gcis_clients import Nca3Client, GcisClient, gcis_stage_auth, gcis_dev_auth
import pickle
import json
import requests
import bs4
from bs4 import BeautifulSoup
nca3_url = 'https://nca3.cicsnc.org'
nca3 = Nca3Client(nca3_url, 'andrew.buddenberg', 'Nz9O^00I', http_basic_user='nca3', http_basic_pass='avl-TSU')
gcis_url = 'https://data-stage.globalchange.gov'
gcis = GcisClient(gcis_url, *gcis_stage_auth)
# gcis = GcisClient('http://data.gcis-dev-front.joss.ucar.edu', *gcis_dev_auth)
def main():
print gcis.test_login()
for idx, list_item in enumerate([i for i in sorted(nca3.get_all_captions().json(), key=lambda f: f['Ordinal']) if i['Ordinal'] and i['Metadata URI'] and i['Caption']]):
ordinal = list_item['Ordinal']
gcis_fig_id = list_item['Metadata URI'].split('/')[-1]
stripped_caption = strip_tags(list_item['Caption']['value'])
fig = gcis.get_figure('nca3', gcis_fig_id)
fig.caption = stripped_caption
# print idx, list_item
# print stripped_caption
#Just to be safe...
fig.contributors = []
print gcis.update_figure('nca3', fig.chapter_identifier, fig, skip_images=True)
def strip_tags(caption):
soup = BeautifulSoup(caption)
for t in soup.find_all(name=lambda t: t.name not in ['tbib', 'sup', 'sub']):
t.unwrap()
return str(soup).strip()
def get_gcis_chapters():
gcis_all_chapters = requests.get('{b}/report/nca3/chapter.json'.format(b=gcis_url), params={'all': 1}, verify=False).json()
chapter_map = {c['identifier']: c for c in gcis_all_chapters}
with open('chapter_map.pk1', 'wb') as fout:
pickle.dump(chapter_map, fout)
return pickle.load(open('chapter_map.pk1'))
def get_all_gcis_figures():
gcis_all_figs = {}
for f in gcis.get_figure_listing('nca3'):
chapter_num = get_gcis_chapters()[f.chapter_identifier]['number']
# print f.chapter_identifier, chapter_num, f.ordinal
f.figure_num = '{0}.{1}'.format(chapter_num, f.ordinal)
gcis_all_figs[f.figure_num] = f
with open('fig_map.pk1', 'wb') as fout:
pickle.dump(gcis_all_figs, fout)
gcis_all_figs = pickle.load(open('fig_map.pk1'))
return gcis_all_figs
def populate_uris_in_drupal():
gcis_all_figs = get_all_gcis_figures()
for list_item in sorted(nca3.get_all_captions().json(), key=lambda f: f['Ordinal']):
nid = list_item['nid']
ordinal = list_item['Ordinal']
graphic_type = list_item['Graphic Type']
if ordinal and ordinal in gcis_all_figs and graphic_type == 'Figure':
print 'Found: ', graphic_type, ordinal, gcis_all_figs[ordinal].uri
# nca3_fig = nca3.get_figure(nid)
# print nca3_fig
uri_frag = {
'und': [
{
'value': gcis_all_figs[ordinal].uri[1:],
'format': None,
'safe_value': gcis_all_figs[ordinal].uri[1:]
}
]
}
# nca3_fig['field_metadata_uri'] = uri_frag
resp = nca3.update_figure(nid, {'field_metadata_uri': uri_frag})
print resp.status_code, resp.text
print ''
else:
print '***NO URI FOUND***', graphic_type, ordinal
main() | {
"repo_name": "USGCRP/gcis-py-client",
"path": "bin/sync_captions.py",
"copies": "1",
"size": "3365",
"license": "bsd-3-clause",
"hash": 6246765158048531,
"line_mean": 31.6796116505,
"line_max": 172,
"alpha_frac": 0.5946508172,
"autogenerated": false,
"ratio": 3.115740740740741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9187745232322821,
"avg_score": 0.004529265123584093,
"num_lines": 103
} |
__author__ = 'abuddenberg'
FIG_TRANSLATIONS = {
'what_is_the_figure_id': 'identifier',
'what_is_the_name_of_the_figure_as_listed_in_the_report': 'title',
'when_was_this_figure_created': 'create_dt',
'what_is_the_chapter_and_figure_number': 'figure_num'
}
IMG_TRANSLATIONS = {
'list_any_keywords_for_the_image': 'attributes',
'when_was_this_image_created': 'create_dt',
'what_is_the_image_id': 'identifier',
'maximum_latitude': 'lat_max',
'minimum_latitude': 'lat_min',
'maximum_longitude': 'lon_max',
'minimum_longitude': 'lon_min',
'start_time': 'time_start',
'end_time': 'time_end',
'what_is_the_name_of_the_image_listed_in_the_report': 'title'
}
DATASET_TRANSLATIONS = {
'data_set_access_date': 'access_dt',
'data_set_publication_year': 'publication_year',
'data_set_original_release_date': 'release_dt',
# HACK elsewhere 'start_time and end_time': '',
'data_set_id': 'native_id',
# HACK elsewhere'': 'doi',
# HACK elsewhere 'maximum_latitude etc. etc. etc.': '',
'data_set_version': 'version',
'data_set_name': 'name',
'data_set_citation': 'cite_metadata',
'data_set_description': 'description',
# Not sure'': 'type',
'data_set_location': 'url',
'data_set_variables': 'attributes'
}
DATASET_IDS = {
'Global Historical Climatology Network - Daily': 'nca3-ghcn-daily-r201305',
'Global Historical Climatology Network - Monthly': 'nca3-ghcn-monthly-r201305',
'NCDC Merged Land and Ocean Surface Temperature': 'nca3-mlost',
'NOAA Merged Land-Ocean Surface Temperature Analysis (MLOST)': 'nca3-mlost',
'U.S. Climate Divisional Dataset Version 2': 'nca3-cddv2-r1',
'Climate Division Database Version 2': 'nca3-cddv2-r1',
'Eighth degree-CONUS Daily Downscaled Climate Projections by Katharine Hayhoe': 'nca3-cmip3-downscaled-r201304',
'Eighth degree-CONUS Daily Downscaled Climate Projections': 'nca3-cmip3-downscaled-r201304',
'Earth Policy Institute Atmospheric Carbon Dioxide Concentration, 1000-2012': 'nca3-epi-co2-r201307',
'Daily 1/8-degree gridded meteorological data [1 Jan 1949 - 31 Dec 2010]': 'nca3-maurer-r201304',
'NCEP/NCAR Reanalysis': 'nca3-ncep-ncar-r1',
'NCDC Global Surface Temperature Anomalies': 'nca3-ncdc-gst-anomalies-r201307',
'GRACE Static Field Geopotential Coefficients JPL Release 5.0 GSM': 'nca3-grace-r201307',
'UW/NCDC Satellite Derived Hurricane Intensity Dataset': 'nca3-hursat-r1',
'Bias-Corrected and Spatially Downscaled Surface Water Projections Hydrologic Data': 'nca3-water-projections-r201208',
'International Best Track Archive for Climate Stewardship (IBTrACS)': 'nca3-ibtracs-r201311',
'the World Climate Research Programme\'s (WCRP\'s) Coupled Model Intercomparison Project phase 3 (CMIP3) multi-model dataset': 'nca3-cmip3-r201205',
'World Climate Research Programme\'s (WCRP\'s) Coupled Model Intercomparison Project phase 3 (CMIP3) multi-model dataset': 'nca3-cmip3-r201205',
'World Climate Research Program\'s (WCRP\'s) Coupled Model Intercomparison Project phase 3 (CMIP3) multi-model dataset': 'nca3-cmip3-r201205',
'North American Regional Climate Change Assessment Program dataset': 'nca3-narccap-r201205',
'Gridded Population of the World Version 3 (GPWv3): Population Count Grid': 'nca3-gpwv3-r201211',
'ETCCDI Extremes Indices Archive': 'nca3-etccdi-r201305',
'Historical Climatology Network Monthly (USHCN) Version 2.5': 'nca3-ushcn',
'Annual Maximum Ice Coverage (AMIC)': 'nca3-amic-r201308',
'Global Historical Climatology Network-Daily (GHCN-D) Monthly Summaries: North American subset': 'nca3-ghcnd-monthly-summaries-r201401',
'Global Sea Level From TOPEX & Jason Altimetry': 'nca3-topex-jason-altimetry-r1',
'World Climate Research Program\'s (WCRP\'s) Coupled Model Intercomparison Project phase 5 (CMIP5) multi-model ensemble': 'nca3-cmip5-r1',
'Global CO2 Emissions from Fossil-Fuel Burning, Cement Manufacture, and Gas Flaring: 1751-2009': 'nca3-doe-co2-r201209',
'Proxy-based reconstructions of hemispheric and global surface temperature variations over the past two millennia': 'nca3-mann-recons-r200809',
'EPICA Dome C Ice Core 800KYr Carbon Dioxide Data': 'nca3-epica-ice-core-r20080515',
'Western U.S. Percent Area Moderately to Extremely Dry and Moderately to Extremely Wet': 'nca3-western-us-wet-dry-r2012',
'Total Wildland Fires and Acres (1960-2009)': 'nca3-wildland-fires-r1',
'HadCRUT4': 'nca3-hadcrut4-v4_1_1_0',
'GISS Surface Temperature Analysis (GISTEMP)': 'nca3-gistemp-r2010',
'Solar Constant': 'nca3-solar-constant-r20131003',
#Surely we can do better
'Proxy Data': 'nca3-proxy-data-r1',
'Tide Gauge Data': 'nca3-tide-gauge-data-r1',
'Projected Sea Level Rise': 'nca3-projected-sea-level-rise-r1',
}
ACT_TRANSLATIONS = {
'how_much_time_was_invested_in_creating_the_image': 'duration',
'35_what_are_all_of_the_files_names_and_extensions_associated_with_this_image': 'output_artifacts',
'what_operating_systems_and_platforms_were_used': 'computing_environment',
'what_analytical_statistical_methods_were_employed_to_the_data': 'methodology',
'describe_how_the_data_was_used_in_the_image_figure_creation': 'data_usage',
'list_the_name_and_version_of_the_software': 'software',
'what_software_applications_were_used_to_manipulate_the_data': 'notes',
'33_what_software_applications_were_used_to_visualize_the_data': 'visualization_software'
}
ORG_IDS = {
'NOAA NCDC/CICS-NC': 'cooperative-institute-climate-satellites-nc',
'NCDC/CICS-NC': 'cooperative-institute-climate-satellites-nc',
'NOAA NCDC/CICS NC': 'cooperative-institute-climate-satellites-nc',
'NESDIS/NCDC': 'national-climatic-data-center',
'NCDC': 'national-climatic-data-center',
'U.S. Forest Service': 'us-forest-service',
'NOAA Pacific Marine Environmental Laboratory': 'pacific-marine-environmental-laboratory',
'Jet Propulsion Laboratory': 'jet-propulsion-laboratory',
'HGS Consulting': 'hgs-consulting-llc',
'University of Virginia': 'university-virginia',
'Miami-Dade Dept. of Regulatory and Economic Resources': 'miami-dade-dept-regulatory-economic-resources',
'Nansen Environmental and Remote Sensing Center': 'nansen-environmental-and-remote-sensing-center',
'University of Illinois at Urbana-Champaign': 'university-illinois',
'USGCRP': 'us-global-change-research-program',
'National Park Service': 'national-park-service',
'Institute of the Environment': 'university-arizona',
'USGS': 'us-geological-survey',
'University of Puerto Rico': 'university-puerto-rico',
'University of Alaska': 'university-alaska',
'U.S. Department of Agriculture': 'us-department-agriculture',
'Kiksapa Consulting': 'kiksapa-consulting-llc',
'Centers for Disease Control and Prevention': 'centers-disease-control-and-prevention',
'Pacific Northwest Laboratories': 'pacific-northwest-national-laboratory',
'Susanne Moser Research & Consulting': 'susanne-moser-research-consulting',
'NEMAC': 'national-environmental-modeling-analysis-center',
'LBNL': 'lawrence-berkeley-national-laboratory',
'Texas Tech University': 'texas-tech-university',
'NASA': 'national-aeronautics-space-administration',
'GATech': 'georgia-institute-technology',
'UW': 'university-washington'
}
CONTRIB_ROLES = {
'Kenneth Kunkel': 'scientist',
'Xungang Yin': 'scientist',
'Nina Bednarsek': 'scientist',
'Henry Schwartz': 'scientist',
'Jessicca Griffin': 'graphic_artist',
'James Youtz': 'scientist',
'Chris Fenimore': 'scientist',
'Deb Misch': 'graphic_artist',
'James Galloway': 'scientist',
'Laura Stevens': 'scientist',
'Nichole Hefty': 'point_of_contact',
'Mike Squires': 'scientist',
'Peter Thorne': 'scientist',
'Donald Wuebbles': 'scientist',
'Felix Landerer': 'scientist',
'David Wuertz': 'scientist',
'Russell Vose': 'scientist',
'Gregg Garfin': 'scientist',
'Jeremy Littell': 'scientist',
'Emily Cloyd': 'contributing_author',
'F. Chapin': 'scientist',
' Chapin': 'scientist',
'Andrew Buddenberg': 'analyst',
'Jerry Hatfield': 'author',
'George Luber': 'lead_author',
'Kathy Hibbard': 'lead_author',
'Susanne Moser': 'convening_lead_author',
'Bull Bennett': 'convening_lead_author',
'Ernesto Weil': 'scientist',
'William Elder': 'scientist',
'Greg Dobson': 'analyst',
'Michael Wehner': 'scientist',
'Katharine Hayhoe': 'scientist',
'Walt Meier': 'scientist',
'Aris Georgakakos': 'scientist',
'Matthew Peters': 'scientist',
'Robert Norheim': 'scientist'
}
PARENT_TRANSLATIONS = {
'what_type_of_publication_was_the_figure_published_in': 'publication_type_identifier',
'name_title': 'label',
'article_title': 'label',
'book_title': 'label',
'web_page_title': 'label',
'conference_title': 'label',
'title': 'label',
}
PARENT_PUBTYPE_MAP = {
'report': 'report',
'journal_article': 'article',
'book_section': 'report',
'electronic_article': 'article',
'web_page': 'webpage',
'book': 'book',
'conference_proceedings': 'generic',
}
PARENT_SEARCH_HINTS = {
'report': {
'The State of the Climate 2009 Highlights': 'noaa-stateofclim-2009',
'Global Climate Change Impacts in the United States': 'nca2',
'Impacts of Climate Change and Variability on Transportation Systems and Infrastructure: Gulf Study, Phase I.': 'ccsp-sap-4_7-2008',
'Climate and Energy-Water-Land System Interactions': 'pnnl-21185',
'Freshwater Use by U.S. Power Plants Electricity\'s thirst for a Precious Resource': 'ucusa-freshwater-2011',
'New York City Panel on Climate Change Climate Risk Information 2013 Observations, Climate Change Projections and Maps': 'nycpanelonclimch-cri2013',
'Regional Climate Trends and Scenarios for the U.S. National Climate Assessment. Part 2. Climate of the Southeast U.S.': 'noaa-techreport-nesdis-142-2',
'Regional Climate Trends and Scenarios for the U.S. National Climate Assessment. Part 3. Climate of the Midwest U.S.': 'noaa-techreport-nesdis-142-3',
'Reefs at Risk Revisited': ('book', '3788c071-e06a-42c3-b0b9-0396fd494aa3'),
'Climate Change and Pacific Islands: Indicators and Impacts Report for the 2012 Pacific Islands Regional Climate Assessment': 'pirca-climate-change-and-pacific-islands',
'Climate adaptation: Risk, uncertainty and decision-making': 'ukcip-climate-adaptation-risk-uncertainty-and-decision-making',
'Adapting to Impacts of Climate Change. America\'s Climate Choices: Report of the Panel on 43 Adapting to the Impacts of Climate C': ('book', '1e88532d-c40d-47d2-a872-77b2627fbe89'),
'Climate Change 2007: The Physical Science Basis. Contribution of Working Group I to the Fourth Assessment Report of the IPCC': ('book', '92debecd-ca55-46f1-a0c1-734e6b0dc6b1'),
'Snow, Water, Ice and Permafrost in the Arctic (SWIPA): Climate Change and the Cryosphere': ('book', 'e7c9614c-8db8-410f-9fec-0957292554bf'),
'Climate Change 2013: The Physical Science Basis. Contribution of Working Group I to the Fifth Assessment Report of the IPCC': 'ipcc-wg1-ar5-physical-science-basis',
'Regional Climate Trends and Scenarios for the U.S. National Climate Assessment. Part 9. Climate of the Contiguous United States': 'noaa-techreport-nesdis-142-9',
'How to Avoid Dangerous Climate Change. A Target for U.S. Emissions Reductions': 'ucusa-howtoavoid-2007',
'Summary for Decision Makers. Assessment of Climate Change in the Southwest United States': 'swccar-assessment-climate-change-in-southwest-us',
'Climate Variability and Change in Mobile, Alabama: Task 2 Final Report. Impacts of Climate 25 Change and Variability on Transpo': 'fhwa-hep-12-053',
'Effects of Climatic Variability and Change on Forest Ecosystems: A Comprehensive Science Synthesis for the U.S. Forest Sector': 'usfs-pnw-gtr-870',
'Future of America\'s Forests and Rangelands Forest Service. 2010 Resources Planning Act Assessment': 'usfs-gtr-wo-87',
'Regional Climate Trends and Scenarios for the U.S. National Climate Assessment. Part 5. Climate of the Southwest U.S.': 'noaa-techreport-nesdis-142-5',
'Regional Climate Trends and Scenarios for the U.S. National Climate Assessment. Part 7. Climate of Alaska': 'noaa-techreport-nesdis-142-7',
'Reclamation, SECURE Water Act Section 9503(c) - Reclamation Climate Change and Water, Report to Congress': 'usbr-secure-2011',
'The Physical Science Basis. Contribution of Working Group I to the Fourth AR4 of IPCC': 'ipcc-wg1-ar5-physical-science-basis',
'2005 Louisiana Hurricane Impact Atlas': 'lgic-lahurricane-2006',
'2009 State of the Climate Highlights': 'noaa-stateofclim-2009',
'Climate of the Southeast United States: Variability, change, impacts and vulnerability.': ('book', '7951fbd8-5877-41aa-ae62-9da3eb56b5c5'),
'A Reef Manager\'s Guide to Coral Bleaching': ('book', 'd6f69088-1025-4ce7-b0e1-54ab6403a951'),
'Climate Stabilization Targets: Emissions, Concentrations, and Impacts over Decades to Millennia': ('book', 'f5b281a2-38d2-4633-84db-fd37fa0fb3e6'),
'Water Resources Sector Technical Input Report in Support of the U.S. Global Change Research Program': 'nca-waterresourcessector-2013',
'Estimated Use of Water in the United States in 2005': 'usgs-circular-1344',
'Annual Energy Outlook 2008': 'aeo2008',
'Value of U.S. agricultural trade, by fiscal year. U.S. Department of Agriculture, Economic Research Service': ('webpage', '319332d5-ec59-4d6d-8411-5eb57f38141d'),
'Future of America\'s Forest and Rangelands: Forest Service 2010 Resources Planning Act Assessment': 'usfs-gtr-wo-87',
'Assessment of Climate Change in the Southwest United States: A Report Prepared for the National Climate Assessment': ('book', 'c9625c65-c20f-4163-87fe-cebf734f7836'),
'Sea-Level Rise for the Coasts of California, Oregon, and Washington: Past, Present, and Future': ('book', 'a36230af-24e6-42c8-8d68-17bcab910595'),
'Water Planning: A Review of Promising New Methods.': ('generic', '7bd61959-19a0-43ad-80ae-d786619956a1')
},
'webpage': {
'Toxic Algae Bloom in Lake Erie. October 14, 2011': 'afe12af6-a7d3-4b70-99e5-0f80b67b7047',
'Tribal Energy Program Projects on Tribal Lands': 'abde0ebc-342b-4bb7-b206-016cd3c829c4',
'Atlas of Rural and Small-Town America. Category: County Classifications. Current Map: Rural-urban Continuum Code, 2013': '2cb79b4a-31cf-43ec-a70a-0371626f1407',
'Atlas of Rural and Small-Town America. Category: County Classifications. Current Map: Economic Dependence, 1998-2000': '2cb79b4a-31cf-43ec-a70a-0371626f1407',
'Atlas of Rural and Small-Town America. Category: People.': '2cb79b4a-31cf-43ec-a70a-0371626f1407',
'St. Petersburg Coastal and Marine Science Center': '2f586ef7-91bb-45e5-b463-ee3e358185ba',
'NASA Earth Observatory Natural Hazards': 'c57946b1-f413-491f-b75c-1c08f7594f84',
'Plants of Hawaii': 'a8159919-b01c-442b-afb8-c2e272f81f48',
'Public Roads': '5f3538ab-eb81-4858-b44e-1304b949b288',
'Freight Analysis Framework Data Tabulation Tool': '5fe65558-d010-445b-b4f1-9079224dca6b',
'Ecosystem Services Analysis of Climate Change and Urban Growth in the Upper Santa Cruz Watershed: SCWEPM': 'd4622f7e-aca7-42e6-95da-90579a187c30',
'State and Local Climate Adaptation': '7de6bfc9-55fa-4d12-ae80-486561b3802c',
'Climate Change Response Framework - Northwoods': '267378f7-278b-4201-8ffa-a820f5a694b8',
'NWHI Maps and Publications': 'e6438f11-85f4-4c29-abb5-b09efa3279b2',
'The Cryosphere Today Compare Daily Sea Ice': 'e4a9eb6a-9421-42c3-94b1-47caf588d41d',
'NASA Earth Observatory Visualizing the Sea Ice Minimum': '71b4c19e-42da-4f15-99d2-7c7746d8eaf2',
'2007 Census Ag Atlas Maps: Crops and Plants': 'f39c0146-137f-4668-b401-5972fe40208d',
'NRCS Photo Gallery': '13da595f-e0f0-4ad0-b87b-44ce3897cd30',
'Billion-Dollar Weather/Climate Disasters: Mapping': 'd70d7a59-45d7-4b38-baf2-86a7fcf12da3',
'Before and After: 50 Years of Rising Tides and Sinking Marshes': '6778161f-897b-4f89-942f-8ad2f01f11a0',
'Influence of El Nino and La Nina on Southwest Rainfall': '6d0a1cba-70fe-4fa3-a630-c45409115ab8',
'Map of Sea Level Trends': '2ab182cc-171d-4edd-9f9f-51e8b4cc2584',
'Climate changing our nation\'s landscapes: NOAA, American Public Gardens Association unveil partnership to enhance awareness': 'e4160240-e5ad-41ee-ad56-9cbdaf162369'
},
'article': {
'North American carbon dioxide sources and sinks: magnitude, attribution, and uncertainty': '10.1890/120066',
'Air Quality and Exercise-Related Health Benefits from Reduced Car Travel in the Midwestern United States': '10.1289/ehp.1103440',
'A Shift in Western Tropical Pacific Sea Level Trends during the 1990s': '10.1175/2011JCLI3932.1',
'An update on Earth\'s energy balance in light of the latest global observations': '10.1038/ngeo1580',
'About the Lack of Warming...': ('web_page', 'e2ec2d0f-430c-4032-a309-2514ca1f6572'),
'The Myth of the 1970s Global Cooling Scientific Consensus': '10.1175/2008BAMS2370.1',
'Hurricane Sandy devestates NY/NJ-area passenger rai systems': ('web_page', '135ae7d9-56e3-4dcb-a81c-42a6f1e9b332'),
'Climate change impacts of US reactive nitrogen': '10.1073/pnas.1114243109',
'Range-wide patterns of greater sage-grouse persistence': '10.1111/j.1472-4642.2008.00502.x',
'Monitoring and understanding changes in heat waves, cold waves, floods and droughts in the United States: State of Knowledge': '10.1175/BAMS-D-12-00066.1',
'How do we know the world has warmed?': '10.1175/BAMS-91-7-StateoftheClimate',
'Attribution of observed historical near-surface temperature variations to anthropogenic and natural causes usingCMIP5simulations': '10.1002/jgrd.50239',
'Evaluating global trends (1988-2010) in harmonized multi-satellite surface soil moisture': '10.1029/2012gl052988'
},
'book': {
'Climate Change and Pacific Islands: Indicators and Impacts. Report for the 2012 Pacific Islands Regional Climate Assessment': ('report', 'pirca-climate-change-and-pacific-islands'),
'A focus on climate during the past 100 years in "Climate Variability and Extremes during the Past 100 Years"': '998aa4c2-9f0d-478c-b7bb-19e383c628a9'
},
'generic': {
'Verrazano Narrows Storm Surge Barrier: Against the Deluge: Storm Barriers to Protect New York City, March 31st 2009': '01d188d1-636b-49e6-af43-c1544cee9319',
'National Association of Recreation Resource Planners Annual Conference': 'national-association-of-recreation-resource-planners-annual-conference-2005'
}
}
| {
"repo_name": "USGCRP/gcis-py-client",
"path": "gcis_clients/webform_transforms.py",
"copies": "1",
"size": "19139",
"license": "bsd-3-clause",
"hash": -2598949407985435600,
"line_mean": 66.8687943262,
"line_max": 190,
"alpha_frac": 0.7126809133,
"autogenerated": false,
"ratio": 2.819119163352482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4031800076652482,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abuddenberg'
from copy import deepcopy
import json
import re
import inspect
import traceback
from dateutil.parser import parse
class Gcisbase(object):
def __init__(self, data, fields=(), trans=()):
#Setup class variables
self.gcis_fields = fields
self.translations = trans
self.identifier = None
#Save off a copy of the original JSON for debugging
self.original = deepcopy(data)
#Create attributes from the master list
self. __dict__.update(dict.fromkeys(self.gcis_fields, None))
#Perform translations
for term in self.translations:
val = data.pop(term, None)
if val is not None:
data[self.translations[term]] = val
for k in data:
if hasattr(self, k):
try:
#Strip whitespace from strings for consistency
data[k] = data[k].strip()
#We now have unicode characters infesting our data. I'm sure this is wrong.
data[k] = data[k].encode('utf-8')
except AttributeError:
pass
finally:
if data[k]:
setattr(self, k, data[k])
def merge(self, other):
#This sucks
attrs_we_care_about = [(attr, v) for attr, v in inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))
if not attr.startswith('__')]
for attr, value in attrs_we_care_about:
if value in (None, '') and hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
return self
def as_json(self, indent=0, omit_fields=()):
out_fields = set(self.gcis_fields) - (set(['uri', 'href']) | set(omit_fields))
return json.dumps({f: getattr(self, f) for f in out_fields}, indent=indent)
class GcisObject(Gcisbase):
def __init__(self, data, **kwargs):
if type(data) is not dict:
raise TypeError('Expected dict, got {t}'.format(t=type(data)))
#Special case for contributors
contrib_list = data.pop('contributors', None)
self.contributors = [Contributor(contrib) for contrib in contrib_list] if contrib_list else []
parents_list = data.pop('parents', None)
self.parents = [Parent(parent) for parent in parents_list] if parents_list else []
super(GcisObject, self).__init__(data, **kwargs)
def add_contributor(self, contributor):
if isinstance(contributor, Contributor):
self.contributors.append(contributor)
else:
raise TypeError('Expected Contributor, got {t}'.format(t=type(contributor)))
def add_person(self, person):
self.contributors.append(Contributor(person, Organization()))
def add_parent(self, parent):
if isinstance(parent, Parent):
self.parents.append(parent)
else:
raise TypeError('Expected Parent, got {t}'.format(t=type(parent)))
class Figure(GcisObject):
def __init__(self, data, local_path=None, remote_path=None, trans=()):
self.gcis_fields = [
'usage_limits', 'kindred_figures', 'time_start', 'time_end', 'keywords', 'lat_min', 'create_dt', 'lat_max',
'title', 'ordinal', 'lon_min', 'report_identifier', 'chapter', 'submission_dt', 'uri', 'lon_max',
'caption', 'source_citation', 'attributes', 'identifier', 'chapter_identifier', 'images', 'url'
]
super(Figure, self).__init__(data, fields=self.gcis_fields, trans=trans)
self.local_path = local_path
self.remote_path = remote_path
#Special case for chapter
chap_tree = data.pop('chapter', None)
self.chapter = Chapter(chap_tree) if chap_tree else self.chapter
#Special case for images
image_list = data.pop('images', None)
self.images = [Image(image) for image in image_list] if image_list else []
file_list = data.pop('files', None)
self.files = [File(f) for f in file_list] if file_list else []
#Hack
self.identifier = self.identifier.replace('/figure/', '') if self.identifier not in ('', None) else '***ID MISSING***'
@property
def figure_num(self):
if isinstance(self.chapter, Chapter) and self.chapter.number and self.ordinal:
return '{0}.{1}'.format(self.chapter.number, self.ordinal)
else:
return '{0}.{1}'.format(self.chapter, self.ordinal)
#TODO: Ordinal handling is unnecessarily complex
@figure_num.setter
def figure_num(self, value):
if value:
try:
chp, fig = value.split('.')
chp = int(chp)
fig = int(fig)
except ValueError:
print 'Invalid chapter/figure numbers: ' + value
chp = None
fig = None
self.ordinal = fig
#If we have an actual Chapter instance, populate it
if isinstance(self.chapter, Chapter):
self.chapter.number = chp
else:
self.chapter = chp
def as_json(self, indent=0, omit_fields=('images', 'chapter', 'kindred_figures', 'keywords')):
return super(Figure, self).as_json(omit_fields=omit_fields)
def __str__(self):
string = '<Figure: id:{f_id} fignum:{f_num} name:{f_name}>\n\t[Images: {imgs}]'.format(
f_id=self.identifier, f_num=self.figure_num, f_name=self.title, imgs=[i.identifier for i in self.images]
)
return string
def __repr__(self):
# return super(Figure, self).__repr__()
return self.__str__()
def merge(self, other):
# Special handling for Chapters
if isinstance(other.chapter, Chapter) and isinstance(self.chapter, Chapter):
self.chapter.merge(other.chapter)
#This might want to move to Chapter's merge()
elif isinstance(other.chapter, Chapter) and not isinstance(self.chapter, Chapter):
chapter_num = self.chapter
self.chapter = other.chapter
self.chapter.number = chapter_num
return super(Figure, self).merge(other)
class Report(GcisObject):
def __init__(self, data, trans=()):
self.gcis_fields = ['doi', 'contact_note', 'title', 'publication_year', 'summary', 'url', 'contact_email', 'identifier', 'report_type_identifier']
super(Report, self).__init__(data, fields=self.gcis_fields, trans=trans)
# if self.report_type_identifier not in ['report', 'assessment', 'technical_input', 'indicator']:
# raise ValueError("report_type_identifier must be one of 'report', 'assessment', 'technical_input', 'indicator'")
def as_json(self, indent=0, omit_fields=()):
return super(Report, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<Report: id:{id}>'.format(id=self.identifier)
def __str__(self):
return self.__repr__()
class Chapter(GcisObject):
def __init__(self, data):
self.gcis_fields = ['report_identifier', 'identifier', 'number', 'url', 'title']
super(Chapter, self).__init__(data, fields=self.gcis_fields)
def as_json(self, indent=0, omit_fields=()):
return super(Chapter, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<Chapter: id:{id}>'.format(id=self.identifier)
def __str__(self):
return self.__repr__()
class Image(GcisObject):
def __init__(self, data, local_path=None, remote_path=None, trans=()):
self.gcis_fields = ['attributes', 'create_dt', 'description', 'identifier', 'lat_max', 'lat_min', 'lon_max',
'uri', 'lon_min', 'position', 'submission_dt', 'time_end', 'time_start', 'title', 'href',
'usage_limits']
#Private attributes for handling date parsing
self._create_dt = None
super(Image, self).__init__(data, fields=self.gcis_fields, trans=trans)
self.local_path = local_path
self.remote_path = remote_path
@property
def create_dt(self):
return self._create_dt
@create_dt.setter
def create_dt(self, value):
try:
self._create_dt = parse(value).isoformat() if value else None
except TypeError:
self._create_dt = None
def __str__(self):
return '<Image: id:{id} name:{name}>'.format(id=self.identifier, name=self.title)
class Dataset(GcisObject):
def __init__(self, data, trans=(), known_ids=None):
self.gcis_fields = ['contributors', 'vertical_extent', 'native_id', 'href', 'references', 'cite_metadata',
'scale', 'publication_year', 'temporal_extent', 'version', 'parents', 'scope', 'type',
'processing_level', 'files', 'data_qualifier', 'access_dt', 'description',
'spatial_ref_sys', 'spatial_res', 'spatial_extent', 'doi', 'name', 'url', 'uri',
'identifier', 'release_dt', 'attributes']
#TODO: This hack has got to go.
self._identifiers = known_ids
#Private attributes for handling date parsing
self._release_dt = None
self._access_dt = None
self._publication_year = None
self._temporal_extent = None
super(Dataset, self).__init__(data, fields=self.gcis_fields, trans=trans)
self.identifier = self._identifiers[self.name] if self._identifiers and self.name in self._identifiers else self.identifier
def __repr__(self):
return '<Dataset: id:{id} name:{name}>'.format(id=self.identifier, name=self.name)
def __str__(self):
return self.__repr__()
def as_json(self, indent=0, omit_fields=('files', 'parents', 'contributors', 'references')):
return super(Dataset, self).as_json(omit_fields=omit_fields)
def merge(self, other):
for k in self.__dict__:
#If our copy of the field is empty or the other copy is longer, take that one.
#TODO: Shoot myself for professional negligence.
if hasattr(other, k) and (self.__dict__[k] in (None, '') or len(getattr(other, k)) > self.__dict__[k]):
self.__dict__[k] = getattr(other, k)
return self
@property
def release_dt(self):
return self._release_dt
@release_dt.setter
def release_dt(self, value):
try:
self._release_dt = parse(value).isoformat() if value else None
except TypeError:
self._release_dt = None
except ValueError:
self._release_dt = None
@property
def access_dt(self):
return self._access_dt
@access_dt.setter
def access_dt(self, value):
try:
self._access_dt = parse(value).isoformat() if value else None
except TypeError:
# print "Problem with date: " + self.access_dt
self._access_dt = None
except ValueError:
self._access_dt = None
@property
def publication_year(self):
return self._publication_year
@publication_year.setter
def publication_year(self, value):
match = re.search('\d{4}', str(value)) if value else None
if match:
self._publication_year = match.group()
else:
self._publication_year = None
@property
def temporal_extent(self):
return self._temporal_extent
@temporal_extent.setter
def temporal_extent(self, value):
try:
self.set_temporal_extent(*value.split())
except AttributeError:
print('Unable to ')
#Can't use property.setter directly to multiple args
def set_temporal_extent(self, start_dt, end_dt):
try:
self._temporal_extent = '{0} {1}'.format(parse(start_dt).isoformat(), parse(end_dt).isoformat()) if start_dt and end_dt else None
except TypeError:
self._temporal_extent = None
except ValueError:
self._temporal_extent = None
class Activity(GcisObject):
def __init__(self, data, trans=()):
self.gcis_fields = ['start_time', 'uri', 'methodology', 'data_usage', 'href', 'metholodogies', 'end_time',
'output_artifacts', 'duration', 'identifier', 'publication_maps', 'computing_environment',
'software', 'visualization_software', 'notes']
super(Activity, self).__init__(data, fields=self.gcis_fields, trans=trans)
def as_json(self, indent=0, omit_fields=('metholodogies', 'publication_maps')):
return super(Activity, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<Activity: id:{id}>'.format(id=self.identifier)
def __str__(self):
return self.__repr__()
class Person(Gcisbase):
def __init__(self, data, trans=()):
self.gcis_fields = ['first_name', 'last_name', 'middle_name', 'contributors', 'url', 'uri', 'href', 'orcid',
'id']
super(Person, self).__init__(data, fields=self.gcis_fields, trans=trans)
def as_json(self, indent=0, omit_fields=('contributors',)):
return super(Person, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<Person: id:{id} first:{fn} last:{ln}>'.format(id=self.id, fn=self.first_name, ln=self.last_name)
def __str__(self):
return self.__repr__()
class Organization(Gcisbase):
def __init__(self, data, trans=()):
self.gcis_fields = ['organization_type_identifier', 'url', 'uri', 'href', 'country_code', 'identifier', 'name']
super(Organization, self).__init__(data, fields=self.gcis_fields, trans=trans)
def __repr__(self):
return '<Organization: id:{id} name:{name}>'.format(id=self.identifier, name=self.name)
def __str__(self):
return self.__repr__()
class Contributor(Gcisbase):
def __init__(self, data):
self.gcis_fields = ['role_type_identifier', 'organization_uri', 'uri', 'href', 'person_uri', 'person_id', 'id']
super(Contributor, self).__init__(data, fields=self.gcis_fields)
person_tree = data.pop('person', None)
org_tree = data.pop('organization', None)
self.person = Person(person_tree) if person_tree else None
self.organization = Organization(org_tree) if org_tree else None
self.role = Role(self.role_type_identifier) if self.role_type_identifier else None
def __repr__(self):
return '<Contributor: Person:{p} Org:{o} Role:{r}>'.format(p=self.person, o=self.organization, r=self.role)
def __str__(self):
return self.__repr__()
class Role(object):
def __init__(self, type_id):
self.type_id = type_id
def __repr__(self):
return self.type_id
def __str__(self):
return self.__repr__()
class Parent(Gcisbase):
def __init__(self, data, target_pub=None, trans=(), pubtype_map=None):
self.gcis_fields = ['relationship', 'url', 'publication_type_identifier', 'label', 'activity_uri', 'note']
self.publication_type_map = pubtype_map
self._publication_type_identifier = None
self.activity = None
super(Parent, self).__init__(data, fields=self.gcis_fields, trans=trans)
self.publication = target_pub
#HACK: Set default relationship type
self.relationship = self.relationship if self.relationship else 'prov:wasDerivedFrom'
@property
def publication_type_identifier(self):
return self._publication_type_identifier
@publication_type_identifier.setter
def publication_type_identifier(self, value):
self._publication_type_identifier = self.publication_type_map[value] \
if self.publication_type_map and value in self.publication_type_map else value
@staticmethod
def from_obj(gcis_obj, activity=None):
gcis_obj_type = type(gcis_obj).__name__.lower()
if hasattr(gcis_obj, 'title'):
label = gcis_obj.title
elif hasattr(gcis_obj, 'name'):
label = gcis_obj.name
else:
label = '***MISSING***'
p = Parent({
'relationship': 'prov:wasDerivedFrom',
'publication_type_identifier': gcis_obj_type,
'url': '/{type}/{id}'.format(type=gcis_obj_type, id=gcis_obj.identifier) if gcis_obj_type and gcis_obj.identifier else None,
'label': label
}, target_pub=gcis_obj)
p.activity = activity
return p
def __repr__(self):
return '<Parent: rel:{rel} pub_type:{type} url:{url} label:{lbl}>'.format(
rel=self.relationship, type=self.publication_type_identifier, url=self.url, lbl=self.label
)
def __str__(self):
return self.__repr__()
class Article(Gcisbase):
def __init__(self, data, trans=()):
self.gcis_fields = ['files', 'doi', 'contributors', 'title', 'url', 'notes', 'uri',
'journal_identifier', 'journal_pages', 'cited_by', 'href', 'parents', 'year',
'journal_vol', 'references', 'identifier']
super(Article, self).__init__(data, fields=self.gcis_fields, trans=trans)
def as_json(self, indent=0, omit_fields=('files', 'parents', 'contributors', 'references', 'cited_by')):
return super(Article, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<Article: id:{id} title:{t}>'.format(id=self.identifier, t=self.title)
def __str__(self):
return self.__repr__()
class Webpage(Gcisbase):
def __init__(self, data, trans=()):
self.gcis_fields = ['files', 'contributors', 'title', 'url', 'uri', 'cited_by', 'href', 'references',
'parents', 'access_date', 'identifier']
super(Webpage, self).__init__(data, fields=self.gcis_fields, trans=trans)
def as_json(self, indent=0, omit_fields=('files', 'parents', 'contributors', 'references', 'cited_by')):
return super(Webpage, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<Webpage id:{id} title:{t}>'.format(id=self.identifier, t=self.title)
def __str__(self):
return self.__repr__()
class File(Gcisbase):
def __init__(self, data, trans=()):
self.gcis_fields = ['landing_page', 'sha1', 'url', 'thumbnail_href', 'uri', 'href', 'location', 'file', 'identifier', 'thumbnail', 'mime_type', 'size']
super(File, self).__init__(data, fields=self.gcis_fields, trans=trans)
# def as_json(self, indent=0, omit_fields=('files', 'parents', 'contributors', 'references', 'cited_by')):
# return super(File, self).as_json(omit_fields=omit_fields)
def __repr__(self):
return '<File id:{id} path:{p}>'.format(id=self.identifier, p=self.file)
def __str__(self):
return self.__repr__()
| {
"repo_name": "USGCRP/gcis-py-client",
"path": "gcis_clients/domain.py",
"copies": "1",
"size": "18981",
"license": "bsd-3-clause",
"hash": 4920206442520414000,
"line_mean": 35.7848837209,
"line_max": 159,
"alpha_frac": 0.589273484,
"autogenerated": false,
"ratio": 3.8038076152304607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4893081099230461,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abuddenberg'
from gcis_clients.domain import Person
DATASET_TRANSLATIONS = {
'dataset_name': 'name',
'dataset_id': 'native_id',
'dataset_citation': 'cite_metadata',
'dataset_description': 'description',
'dataset_year_published': 'publication_year',
# 'dataset_period_record': 'temporal_extent',
'dataset_version': 'version',
'dataset_access_date': 'access_dt',
'dataset_url': 'url',
'dataset_release_date': 'release_dt'
}
DATASET_IDS = {
'U.S. Climate Divisional Dataset Version 2': 'nca3-cddv2-r1',
'World Climate Research Program\'s (WCRP\'s) Coupled Model Intercomparison Project phase 5 (CMIP5) multi-model ensemble': 'nca3-cmip5-r1',
'World Climate Research Program\'s (WCRP\'s) Coupled Model Intercomparison Project phase 5 (CMIP5) multi-model ensemble': 'nca3-cmip5-r1',
'ArboNet': 'cdc-arbonet',
'U.S. Natural Hazard Statistics': 'noaa-nws-us-natural-hazard-statistics',
'Billion-Dollar Weather and Climate Disasters': 'noaa-ncdc-billion-dollar-weather-climate-disasters',
'ESRI USA10 dataset (ArcGIS version 10.0)': 'esri-arcgis-v10-0',
'nClimDiv': 'noaa-ncdc-cag-us-temperature-nclimdiv',
'Global Historical Climatology Network (GHCN) Daily': 'noaa-ncdc-ghcn-daily',
'Continential United States Hurricane Impacts/ Landfalls 1851-2015': 'noaa-aoml-hurdat'
# 'Time Bias Corrected Divisional Temperature Precipitation Drought Index': 'noaa-ncei-time-bias-corrected-divisional-temperature-precipitation-drought-index',
# 'North American Drought Atlas': 'noaa-ncei-north-american-drought-atlas'
}
COPYRIGHT_TRANSLATIONS = {
None: None,
'requested': 'Copyright protected. Obtain permission from the original figure source.',
'denied': 'Copyright protected. Obtain permission from the original figure source.',
'obtained': 'Copyright protected. Obtain permission from the original figure source.',
'original_work_nr': 'Free to use with credit to the original figure source.'
}
PARENT_TRANSLATIONS = {
'publicationType': 'publication_type_identifier',
'report_name': 'label',
'journal_article_title': 'label',
'book_title': 'label',
'book_section_title': 'label',
'conference_proceeding_title': 'label',
'electronic_article_title': 'label',
'newspaper_article_title': 'label',
'web_page_title': 'label'
}
PARENT_PUBTYPE_MAP = {
'Book': 'book',
'Book Section': 'report',
'Conference Proceedings': 'generic',
'Electronic Article': 'article',
'Journal Article': 'article',
'Newspaper Article': 'article',
'Report': 'report',
'Web Page': 'webpage'
}
PARENT_SEARCH_HINTS = {
'report': {
'Climate Change Impacts in the United States: The Third National Climate Assessment': 'nca3',
'Third National Climate Assessment': 'nca3',
'A conceptual framework for action on the social determinants of health': 'conceptual-framework-for-action-on-the-social-determinants-of-health',
'Regional Surface Climate Conditions in CMIP3 and CMIP5 for the United States: Differences, Similarities, and Implications for the U.S. National Climate Assessment.': 'noaa-techreport-nesdis-144',
'2012 Census of Agriculture': 'census-agriculture-2012',
'Regional Climate Trends and Scenarios for the U.S. National Climate Assessment. Part 3. Climate of the Midwest U.S.,': 'noaa-techreport-nesdis-142-3'
},
'dataset': {
'Global Historical Climatology Network - Daily': 'noaa-ncdc-ghcn-daily'
},
'article': {
'Projections of future temperature-attributable deaths in 209 U.S. cities using a cluster based Poisson approach': '10.1186/s12940-015-0071-2',
'Projections of future temperature-attributable deaths in 209 U.S. cities 1 using a cluster-based Poisson approach': '10.1186/s12940-015-0071-2',
'A framework for examining climate driven changes to the seasonality and geographic range of coastal pathogens': '10.1016/j.crm.2015.03.002',
'Effects of Ocean Warming on Growth and Distribution of Five Ciguatera-Associated Dinoflagellates in the Caribbean and Implications for Ciguatera Fish Poisoning': 'potential-effects-of-climate-change-on-growth-and-distribution-of-five-caribbean-gambierdiscus-species',
'Effects of elevated CO2 on the protein concentration of food crops: a meta-analysis': '10.1111/j.1365-2486.2007.01511.x',
'A new scenario framework for climate change research: the concept of shared socio-economic pathways': '10.1007/s10584-013-0905-2',
'Climate Change influences on the annual onset of Lyme disease in the United States': '10.1016/j.ttbdis.2015.05.005',
'The roads ahead: Narratives for shared socioeconomic pathways describing world futures in the 21st century': '10.1016/j.gloenvcha.2015.01.004'
},
'webpage': {
'Screenshot of: Social Vulnerability Index (SVI) Mapping Dashboard': '6d0ccc19-cdcc-4d56-acb7-d62f12611333',
'Lyme Disease: Data and Statistics: Maps': '7206f315-04be-4536-9e10-70155edfada0',
'Risk of very large fires could increase sixfold by mid-century in the US': 'af962cb2-dd38-44f6-9b4e-7bef8577b92c',
'Flickr account: User: TeamSaintLouis. Album: Low Water on the Mississippi River': '9853a51e-b7c2-42c5-a201-d2f5c8527f52',
'USDA Biological Control of Pests Research Unit': '950835c0-2088-4f06-a61a-a809387d4930'
},
'book': {
'Assessing Health Vulnerability to Climate Change: A Guide for Health Departments': ('report', 'assessing-health-vulnerability-to-climate-change-a-guide-for-health-departments')
}
}
ORG_IDS = {
'NOAA NCDC/CICS-NC': 'cooperative-institute-climate-satellites-nc',
'NCDC/CICS-NC': 'cooperative-institute-climate-satellites-nc',
'NOAA NCDC/CICS NC': 'cooperative-institute-climate-satellites-nc',
'NESDIS/NCDC': 'national-climatic-data-center',
'NCDC': 'national-climatic-data-center',
'U.S. Forest Service': 'us-forest-service',
'NOAA Pacific Marine Environmental Laboratory': 'pacific-marine-environmental-laboratory',
'Jet Propulsion Laboratory': 'jet-propulsion-laboratory',
'HGS Consulting': 'hgs-consulting-llc',
'University of Virginia': 'university-virginia',
'Miami-Dade Dept. of Regulatory and Economic Resources': 'miami-dade-dept-regulatory-economic-resources',
'Nansen Environmental and Remote Sensing Center': 'nansen-environmental-and-remote-sensing-center',
'University of Illinois at Urbana-Champaign': 'university-illinois',
'USGCRP': 'us-global-change-research-program',
'National Park Service': 'national-park-service',
'Institute of the Environment': 'university-arizona',
'USGS': 'us-geological-survey',
'University of Puerto Rico': 'university-puerto-rico',
'University of Alaska': 'university-alaska',
'U.S. Department of Agriculture': 'us-department-agriculture',
'Kiksapa Consulting': 'kiksapa-consulting-llc',
'Centers for Disease Control and Prevention': 'centers-disease-control-and-prevention',
'Pacific Northwest Laboratories': 'pacific-northwest-national-laboratory',
'Susanne Moser Research & Consulting': 'susanne-moser-research-consulting',
'NEMAC': 'national-environmental-modeling-analysis-center',
'LBNL': 'lawrence-berkeley-national-laboratory',
'Texas Tech University': 'texas-tech-university',
'Centers for Disease Control and Prevention / National Center for Atmospheric Research': 'centers-disease-control-and-prevention',
'Stratus Consulting Inc.': 'stratus-consulting'
}
# host
# point_of_contact
# editor
# analyst
# principal_author
# secondary_author
# data_producer
# author
# scientist
# coordinator
# manager
# convening_lead_author
# lead_author
# contributor
# lead_agency
# publisher
# executive_editor
# distributor
# engineer
# primary_author
# graphic_artist
# coordinating_lead_author
# contributing_editor
# funding_agency
# contributing_author
# data_archive
# advisor
# contributing_agency
PERSON_TRANSLATIONS = {
'Alexis St. Juliana': Person({'id': 9934, 'first_name': 'Alexis', 'last_name': 'St. Juliana'}),
'Alexis Juliana': Person({'id': 9934, 'first_name': 'Alexis', 'last_name': 'St. Juliana'}),
'Pat Dolwick': Person({'first_name': 'Patrick', 'last_name': 'Dolwick'}),
'Alan Joyner': Person({'first_name': 'Alan', 'last_name': 'Joyner'}),
'Juli Trtanj': Person({'id': 1264, 'first_name': 'Juli M.', 'last_name': 'Trtanj'}),
'Juli M. Trtanj': Person({'id': 1264, 'first_name': 'Juli M.', 'last_name': 'Trtanj'}),
'Stephanie Moore': Person({'id': 9889, 'first_name': 'Stephanie K.', 'last_name': 'Moore'}),
'Stephanie K. Moore': Person({'id': 9889, 'first_name': 'Stephanie', 'last_name': 'Moore'}),
'Steve Kibler': Person({'first_name': 'Steven R.', 'last_name': 'Kibler'}),
'Jesse Bell': Person({'first_name': 'Jesse E.', 'last_name': 'Bell'}),
'Dave Mills': Person({'first_name': 'David M.', 'last_name': 'Mills'}),
'David Mills': Person({'first_name': 'David M.', 'last_name': 'Mills'}),
'Lesley Crimmins)': Person({'first_name': 'Lesley', 'last_name': 'Jantarasami'}),
'Allison Jantarasami)': Person({'first_name': 'Allison', 'last_name': 'Crimmins'}),
'Lewis Ziska': Person({'id': 923, 'first_name': 'Lewis', 'last_name': 'Ziska'}),
'Jada Garofalo': Person({'id': 9885, 'first_name': 'Jada', 'last_name': 'F. Garofalo'}),
'Micah Hahn': Person({'id': 9886, 'first_name': 'Micah', 'last_name': 'Hahn'}),
'Jeanette Thurston': Person({'id': 9892, 'first_name': 'Jeanette', 'last_name': 'Thurston'}),
'Daniel Dodgen': Person({'id': 9908, 'first_name': 'Daniel', 'last_name': 'Dodgen'}),
'John Jacobs': Person({'id': 9888, 'first_name': 'John', 'last_name': 'Jacobs'}),
'Stephanie Herring': Person({'id': 8333, 'first_name': 'Stephanie', 'last_name': 'Herring'}),
'Tom Maycock': Person({'id': 10926, 'first_name': 'Tom', 'last_name': 'Maycock'})
}
CONTRIB_ROLES = {
'Kenneth Kunkel': ('cooperative-institute-climate-satellites-nc', 'scientist'),
'Allison Crimmins': ('us-environmental-protection-agency', 'point_of_contact'),
'Micah Hahn': ('centers-disease-control-and-prevention', 'scientist'),
'Jada Garofalo': ('centers-disease-control-and-prevention', 'point_of_contact'),
'Ben Beard': ('centers-disease-control-and-prevention', 'scientist'),
'Dave Mills': ('stratus-consulting', 'analyst'),
'David Mills': ('stratus-consulting', 'analyst'),
'Alexis Juliana': ('stratus-consulting', 'analyst'),
'Alexis St. Juliana': ('stratus-consulting', 'analyst'),
'Neal Fann': ('us-environmental-protection-agency', 'analyst'),
'Pat Dolwick': ('us-environmental-protection-agency', 'scientist'),
'Lewis Ziska': ('us-department-agriculture', 'scientist'),
'Juli Trtanj': ('national-oceanic-atmospheric-administration', 'point_of_contact'),
'Alan Joyner': ('university-north-carolina-chapel-hill', 'graphic_artist'),
'Jeanette Thurston': ('us-department-agriculture', 'scientist'),
'Richard Streeter': ('stratus-consulting', 'analyst'),
'Stephanie Moore': ('national-oceanic-atmospheric-administration', 'scientist'),
'Steve Kibler': ('national-oceanic-atmospheric-administration', 'scientist'),
'Jesse Bell': ('national-oceanic-atmospheric-administration', 'scientist'),
'Lesley Jantarasami': ('us-environmental-protection-agency', 'analyst'),
'Daniel Dodgen': ('us-department-health-human-services', 'point_of_contact'),
'Andrea Maguire': ('us-environmental-protection-agency', 'point_of_contact'),
'Lesley Crimmins)': ('us-environmental-protection-agency', 'analyst'),
'Allison Jantarasami)': ('us-environmental-protection-agency', 'point_of_contact'),
'John Jacobs': ('national-oceanic-atmospheric-administration', 'scientist'),
'Stephanie Herring': ('national-oceanic-atmospheric-administration', 'scientist'),
'Tom Maycock': ('cooperative-institute-climate-satellites-nc', 'point_of_contact'),
}
| {
"repo_name": "USGCRP/gcis-py-client",
"path": "gcis_clients/survey_transforms.py",
"copies": "1",
"size": "12022",
"license": "bsd-3-clause",
"hash": -3008898724694510000,
"line_mean": 55.7075471698,
"line_max": 276,
"alpha_frac": 0.6953086009,
"autogenerated": false,
"ratio": 2.9329104659673093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.911421685488879,
"avg_score": 0.0028004423957037517,
"num_lines": 212
} |
__author__ = 'abuddenberg'
from gcis_clients import GcisClient, SurveyClient, survey_token, gcis_dev_auth, gcis_stage_auth
from gcis_clients.domain import Report, Chapter
# from sync_utils import realize_parents, realize_contributors
from collections import OrderedDict
import pickle
import sys
# gcis = GcisClient('http://data.gcis-dev-front.joss.ucar.edu', *gcis_dev_auth)
gcis = GcisClient('https://data-stage.globalchange.gov', *gcis_stage_auth)
surveys = SurveyClient('https://healthresources.cicsnc.org', survey_token)
sync_metadata_tree = {
'usgcrp-climate-human-health-assessment-2016': OrderedDict([
('front-matter', [
('/metadata/figures/3931', 'understanding-the-exposure-pathway-diagrams'),
]),
('executive-summary', [
('/metadata/figures/3906', 'examples-of-climate-impacts-on-human-health'),
('/metadata/figures/3832', 'es-climate-change-and-health'),
('/metadata/figures/3833', 'es-projected-changes-in-deaths-in-us-cities-by-season'),
('/metadata/figures/3834', 'es-projected-change-in-temperature-ozone-and-ozone-related-premature-deaths-in-2030'),
('/metadata/figures/3838', 'es-estimated-deaths-and-billion-dollar-losses-from-extreme-weather-events-in-the-u-s-2004-2013'),
('/metadata/figures/3835', 'es-changes-in-lyme-disease-case-report-distribution'),
('/metadata/figures/3836', 'es-links-between-climate-change-water-quantity-and-quality-and-human-exposure-to-water-related-illness'),
('/metadata/figures/3837', 'es-farm-to-table'),
('/metadata/figures/3839', 'es-the-impact-of-climate-change-on-physical-mental-and-community-health'),
('/metadata/figures/3840', 'es-determinants-of-vulnerability')
]),
('climate-change-and-human-health', [
('/metadata/figures/3698', 'major-us-climate-trends'), #1.1 #climate-change-and-human-health
('/metadata/figures/3632', 'change-in-number-of-extreme-precipitation-events'), #1.2 #climate-change-and-human-health
('/metadata/figures/3635', 'projected-changes-in-temperature-and-precipitation-by-mid-century'), #1.3 #climate-change-and-human-health
('/metadata/figures/3633', 'projected-changes-in-hottest-coldest-and-wettest-driest-day-of-the-year'), #1.4 #climate-change-and-human-health
('/metadata/figures/3757', 'climate-change-and-health'), #1.5 #climate-change-and-human-health
('/metadata/figures/3933', 'sources-of-uncertainty'), #1.6 #climate-change-and-human-health
]),
('temperature-related-death-and-illness', [
('/metadata/figures/3811', 'climate-change-and-health-extreme-heat'), #2.1 #temperature-related-death-and-illness
('/metadata/figures/3585', 'heat-related-deaths-during-the-1995-chicago-heat-wave'), #2.2 #temperature-related-death-and-illness
('/metadata/figures/3643', 'projected-changes-in-temperature-related-death-rates'), #2.3 #temperature-related-death-and-illness
('/metadata/figures/3653', 'projected-changes-in-deaths-in-us-cities-by-season'), #2.4 #temperature-related-death-and-illness
]),
('air-quality-impacts', [
('/metadata/figures/3812', 'climate-change-and-health-outdoor-air-quality'), #3.1 #air-quality-impacts
('/metadata/figures/3647', 'projected-change-in-temperature-ozone-and-ozone-related-premature-deaths-in-2030'), #3.2 #air-quality-impacts
('/metadata/figures/3649', 'projected-change-in-ozone-related-premature-deaths'), #3.3 #air-quality-impacts
('/metadata/figures/3650', 'ragweed-pollen-season-lengthens'), #3.4 #air-quality-impacts
]),
('extreme-events', [
('/metadata/figures/3810', 'estimated-deaths-and-billion-dollar-losses-from-extreme-weather-events-in-the-us-2004-2013'), #4.1 #extreme-weather #Has Activities
('/metadata/figures/3808', 'climate-change-and-health-flooding'), #4.2 #extreme-weather
('/metadata/figures/3760', 'hurricane-induced-flood-effects-in-eastern-and-central-united-states'), #4.3 #extreme-weather
('/metadata/figures/3907', 'projected-increases-in-very-large-fires'), #4.4 #extreme-weather
]),
('vectorborne-diseases', [
('/metadata/figures/3807', 'climate-change-and-health-lyme-disease'), #5.1 #vectorborne-diseases
('/metadata/figures/3659', 'changes-in-lyme-disease-case-report-distribution'), #5.2 #vectorborne-diseases
('/metadata/figures/3658', 'life-cycle-of-blacklegged-ticks-ixodes-scapularis'), #5.3 #vectorborne-diseases
('/metadata/figures/3747', 'projected-change-in-lyme-disease-onset-week'), #5.4 #vectorborne-diseases
('/metadata/figures/3674', 'incidence-of-west-nile-neuroinvasive-disease-by-county-in-the-united-states'), #5.5 #vectorborne-diseases
('/metadata/figures/3675', 'climate-impacts-on-west-nile-virus-transmission'), #5.6 #vectorborne-diseases
]),
('water-related-illnesses', [
('/metadata/figures/3824', 'climate-change-and-health-vibrio'), #5.1 #water-related-illnesses
('/metadata/figures/3700', 'links-between-climate-change-water-quantity-and-quality-and-human-exposure-to-water-related-illness'), #5.2 #water-related-illnesses #TOO BIG
('/metadata/figures/3671', 'locations-of-livestock-and-projections-of-heavy-precipitation'), #5.3 #water-related-illnesses #TOO BIG
('/metadata/figures/3709', 'projections-of-vibrio-occurrence-and-abundance-in-chesapeake-bay'), #5.4 #water-related-illnesses
('/metadata/figures/3704', 'changes-in-suitable-coastal-vibrio-habitat-in-alaska'), #5.5 #water-related-illnesses
('/metadata/figures/3734', 'projected-changes-in-caribbean-gambierdiscus-species'), #5.6 #water-related-illnesses
('/metadata/figures/3712', 'projections-of-growth-of-alexandrium-in-puget-sound'), #5.7 #water-related-illnesses
]),
('food-safety-nutrition-and-distribution', [
('/metadata/figures/3579', 'farm-to-table'), #7.1 #food-safety-nutrition-and-distribution
# ('/metadata/figures/3600', 'mycotoxin-in-corn'), #7.1 #food-safety-nutrition-and-distribution BOX 1?
('/metadata/figures/3809', 'climate-change-and-health-salmonella'), #7.2 #food-safety-nutrition-and-distribution
('/metadata/figures/3748', 'seasonality-of-human-illnesses-associated-with-foodborne-pathogens'), #7.3 #food-safety-nutrition-and-distribution
('/metadata/figures/3688', 'effects-of-carbon-dioxide-on-protein-and-minerals'), #7.4 #food-safety-nutrition-and-distribution
('/metadata/figures/3597', 'mississippi-river-level-at-st-louis-missouri'), #7.5 #food-safety-nutrition-and-distribution
# ('/metadata/figures/3600', 'mycotoxin-in-corn'), #Box 7,1
# ('/metadata/figures/3806', 'low-water-conditions-on-mississippi-river')
]),
('mental-health-and-well-being', [
('/metadata/figures/3789', 'climate-change-and-mental-health'), #8.1 #mental-health-and-well-being
('/metadata/figures/3722', 'the-impact-of-climate-change-on-physical-mental-and-community-health'), #8.2 #mental-health-and-well-being
]),
('populations-of-concern', [
('/metadata/figures/3696', 'determinants-of-vulnerability'), #9.1 #populations-of-concern
('/metadata/figures/3917', 'intersection-of-social-determinants-of-health-and-vulnerability'), #9.2 #populations-of-concern
('/metadata/figures/3758', 'vulnerability-to-the-health-impacts-of-climate-change-at-different-lifestages'), #9.3 #populations-of-concern
('/metadata/figures/3714', 'mapping-social-vulnerability'), #9.4 #populations-of-concern
('/metadata/figures/3717', 'mapping-communities-vulnerable-to-heat-in-georgia'), #9.5 #populations-of-concern
]),
('appendix-1--technical-support-document', [
('/metadata/figures/3623', 'scenarios-of-future-temperature-rise'), #1.1 #climate-change-and-human-health
('/metadata/figures/3939', 'example-increasing-spatial-resolution-of-climate-models'), #1.2 #climate-change-and-human-health
('/metadata/figures/3638', 'sensitivity-analysis-of-differences-in-modeling-approaches'), #1.3 #climate-change-and-human-health
('/metadata/figures/3932', 'tsd-sources-of-uncertainty'), #1.4 #climate-change-and-human-health
])
])
}
def main():
print gcis.test_login()
image_id_map = pickle.load(open('image_id_cache.pk1', 'r'))
# regenerate_image_id_map(existing=image_id_map)
# create_health_report()
# create_cmip5_report()
for report_id in sync_metadata_tree:
for chapter_id in sync_metadata_tree[report_id]:
for survey_url, figure_id in sync_metadata_tree[report_id][chapter_id]:
figure, datasets = surveys.get_survey(survey_url, do_download=False)
resp = gcis.post_figure_original(report_id, figure_id, figure.original, chapter_id=chapter_id)
print(resp.status_code, resp.text)
# gcis_fig = gcis.get_figure(report_id, figure_id, chapter_id=chapter_id)
#
# print survey_url, gen_edit_link(survey_url)
#
# figure, datasets = surveys.get_survey(survey_url, do_download=False)
#
# #Override identifier
# figure.identifier = figure_id
#
# #Pull existing captions
# if gcis.figure_exists(report_id, figure_id, chapter_id=chapter_id):
# gcis_fig = gcis.get_figure(report_id, figure_id, chapter_id=chapter_id)
# figure.caption = gcis_fig.caption
# figure.files = gcis_fig.files
#
# realize_parents(gcis, figure.parents)
# realize_contributors(gcis, figure.contributors)
#
# print 'Contributors: ', figure.contributors
# print 'Parents: ', figure.parents
#
# for ds in [p for p in figure.parents if p.publication_type_identifier == 'dataset']:
# # Assign synthetic activity identifier to for datasets associated with figure
# if ds.activity and ds.activity.identifier is None:
# ds.activity.identifier = generate_activity_id(figure, ds.publication)
# print 'Dataset: ', ds.activity
#
# #Create the figure in GCIS
# # print 'Creating figure... ', gcis.create_figure(report_id, chapter_id, figure, skip_images=True, skip_upload=False)
# print 'Updating figure... ', gcis.update_figure(report_id, chapter_id, figure, skip_images=True)
# # print 'Deleting old file', gcis.delete_file(figure.files[0])
# # print 'Uploading...', gcis.upload_figure_file(report_id, chapter_id, figure_id, figure.local_path)
#
# for i in figure.images:
# i.identifier = image_id_map[(figure_id, i.identifier)]
# print '\t', i
#
# realize_parents(gcis, i.parents)
# realize_contributors(gcis, i.contributors)
#
# print '\t\tContributors: ', i.contributors
# print '\t\tParents: ', i.parents
# for ds in [p for p in i.parents if p.publication_type_identifier == 'dataset']:
# # Assign synthetic activity identifier to for datasets associated with images
# if ds.activity and ds.activity.identifier is None:
# ds.activity.identifier = generate_activity_id(i, ds.publication)
# print '\t\tDataset: ', ds, ds.activity
#
# #Create image in GCIS
# # print 'Creating image... ', gcis.create_image(i, report_id=report_id, figure_id=figure_id)
# print 'Updating image... ', gcis.update_image(i)
def gen_edit_link(survey_id):
node_id = survey_id.split('/')[-1]
return 'https://healthresources.globalchange.gov/node/' + node_id
def generate_activity_id(image, dataset):
try:
return '-'.join([image.identifier.split('-')[0], dataset.identifier, '-process'])
except Exception, e:
sys.stderr.write('WARNING: Activity identifier generation failed\n')
def regenerate_image_id_map(existing=None):
from uuid import uuid4
image_id_map = existing if existing else {}
for report_id in sync_metadata_tree:
for chapter_id in sync_metadata_tree[report_id]:
for survey_url, figure_id in sync_metadata_tree[report_id][chapter_id]:
s, ds = surveys.get_survey(survey_url, do_download=False)
for img in s.images:
if (figure_id, img.identifier) in image_id_map:
print 'skipping: ', (figure_id, img.identifier)
continue
else:
print 'added: ', (figure_id, img.identifier)
image_id_map[(figure_id, img.identifier)] = str(uuid4())
with open('image_id_cache.pk1', 'wb') as fout:
pickle.dump(image_id_map, fout)
print 'image_id_map regenerated'
def gen_survey_list():
realized_list = []
chapters = [c for c in sync_metadata_tree['usgcrp-climate-human-health-assessment-2016']]
survey_list = surveys.get_list()
for i, survey in enumerate(survey_list):
url = survey['url']
print 'Processing: {b}{url} ({i}/{total})'.format(b=surveys.base_url, url=url, i=i + 1, total=len(survey_list))
s = surveys.get_survey(url)
chp_id = chapters[s.chapter] if s and s.chapter else None
if s:
print s.identifier
print chp_id, s.figure_num, s.title
realized_list.append((chp_id, s.figure_num, s.identifier, s.title, url))
print ''
return realized_list
def create_health_report():
hr = Report({
'identifier': 'usgcrp-climate-human-health-assessment-2016',
'report_type_identifier': 'assessment',
'title': 'The Impacts of Climate Change on Human Health in the United States: A Scientific Assessment',
'url': 'http://www.globalchange.gov/health-assessment',
'publication_year': '2016',
'contact_email': 'healthreport@usgcrp.gov'
})
# ['report_identifier', 'identifier', 'number', 'title', 'url']
chapters = [
('executive-summary', None, 'Executive Summary'),
('climate-change-and-human-health', 1, 'Climate Change and Human Health'),
('temperature-related-death-and-illness', 2, 'Temperature-Related Death and Illness'),
('air-quality-impacts', 3, 'Air Quality Impacts'),
('extreme-events', 4, 'Impacts of Extreme Events on Human Health'),
('vectorborne-diseases', 5, 'Vectorborne Diseases'),
('water-related-illnesses', 6, 'Climate Impacts on Water-Related Illnesses'),
('food-safety--nutrition--and-distribution', 7, 'Food Safety, Nutrition, and Distribution'),
('mental-health-and-well-being', 8, 'Mental Health and Well-Being'),
('populations-of-concern', 9, 'Climate-Health Risk Factors and Populations of Concern'),
('appendix-1--technical-support-document', None, 'Appendix 1: Technical Support Document'),
('appendix-2--process-for-literature-review', None, 'Appendix 2: Process for Literature Review'),
('appendix-3--report-requirements-development-process-review-and-approval', None, 'Appendix 3: Report Requirements, Development Process, Review, and Approval'),
('appendix-4--documenting-uncertainty-confidence-and-likelihood', None, 'Appendix 4: Documenting Uncertainty: Confidence and Likelihood'),
('appendix-5--glossary-and-acronyms', None, 'Appendix 5: Glossary and Acronyms'),
('front-matter', None, 'Front Matter')
]
print gcis.create_report(hr)
for id, num, title in chapters:
ch = Chapter({
'identifier': id,
'number': num,
'title': title,
'report_identifier': hr.identifier
})
print gcis.create_chapter(hr.identifier, ch)
def create_cmip5_report():
cmip = Report({
'identifier': 'noaa-techreport-nesdis-144',
'report_type_identifier': 'report',
'title': 'Regional Surface Climate Conditions in CMIP3 and CMIP5 for the United States: Differences, Similarities, and Implications for the U.S. National Climate Assessment',
'publication_year': '2015'
})
print gcis.create_report(cmip)
chapters = [
('introduction', 1, 'Introduction'),
('data', 2, 'Data'),
('methods', 3, 'Methods'),
('temperature', 4, 'Temperature'),
('precipitation', 5, 'Precipitation'),
('summary', 6, 'Summary'),
('appendix', None, 'Appendix'),
('references', None, 'References'),
('acknowledgements', None, 'Acknowledgements'),
]
for id, num, title in chapters:
ch = Chapter({
'identifier': id,
'number': num,
'title': title,
'report_identifier': cmip.identifier
})
print gcis.create_chapter(cmip.identifier, ch)
main()
| {
"repo_name": "USGCRP/gcis-py-client",
"path": "bin/sync_surveys.py",
"copies": "1",
"size": "17672",
"license": "bsd-3-clause",
"hash": 8643370061094882000,
"line_mean": 57.3234323432,
"line_max": 182,
"alpha_frac": 0.6251697601,
"autogenerated": false,
"ratio": 3.361613087312155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4486782847412155,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abuddenberg'
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from gcis_clients import GcisClient, SurveyClient, survey_token, gcis_dev_auth, gcis_stage_auth
from domain import Report, Chapter
from sync_utils import realize_parents, realize_contributors
from states import sync_metadata_tree
import pickle
import sys
import re
import traceback
# gcis = GcisClient('https://data-stage.globalchange.gov', *gcis_stage_auth)
gcis = GcisClient('https://data-review.globalchange.gov', *gcis_stage_auth)
surveys = SurveyClient('https://state-resources.cicsnc.org', survey_token)
def main():
print(gcis.test_login())
cssr = Report({
'identifier': 'climate-science-special-report',
'report_type_identifier': 'report',
'title': 'Climate Science Special Report',
# 'url': 'https://statesummaries.cicsnc.org/',
'publication_year': '2017',
'contact_email': ''
})
print(gcis.create_report(cssr));
# for report_id in sync_metadata_tree:
# for chapter_id in sync_metadata_tree[report_id]:
# for survey_url, figure_id, figure_num in sync_metadata_tree[report_id][chapter_id]:
# figure, datasets = surveys.get_survey(survey_url, do_download=False)
#
# resp = gcis.post_figure_original(report_id, figure_id, figure.original, chapter_id=chapter_id)
# print(resp.status_code, resp.text)
#
# #Fix misspelling
# figure.identifier = figure_id
# figure.title = figure.title.replace('precipitaton', 'precipitation')
# figure.ordinal = figure_num
#
# print(survey_url)
# print(figure, datasets)
#
# realize_parents(gcis, figure.parents)
# realize_contributors(gcis, figure.contributors)
#
# print('Contributors: ', figure.contributors)
# print('Parents: ', figure.parents)
# # gcis_fig = gcis.get_figure(report_id, figure_id, chapter_id=chapter_id)
#
# for ds in [p for p in figure.parents if p.publication_type_identifier == 'dataset']:
# # Assign synthetic activity identifier to for datasets associated with figure
# if ds.activity and ds.activity.identifier is None:
# ds.activity.identifier = generate_activity_id(figure, ds.publication)
# print('Dataset: ', ds.activity)
#
# print('Creating figure... ', gcis.create_figure(report_id, chapter_id, figure, skip_images=True, skip_upload=False))
# # print('Updating figure... ', gcis.update_figure(report_id, chapter_id, figure, skip_images=True))
def generate_activity_id(image, dataset):
try:
return '-'.join([image.identifier.split('-')[0], dataset.identifier, 'process'])
except Exception, e:
sys.stderr.write('WARNING: Activity identifier generation failed\n')
traceback.print_exc()
def gen_survey_list():
# with open('survey_list.pk', 'wb') as out:
# pickle.dump(gen_survey_list(), out)
# surveys = pickle.load(open('survey_list.pk'))
#
# for st in sync_metadata_tree['noaa-led-state-summaries-2016']:
# print(st)
# for f in sorted(surveys[st], key=lambda x: x[1]):
# print("('{0}', '{1}', '{2}'),".format(f[0], f[2], f[1]))
# print('')
realized_list = {}
survey_list = surveys.get_list()
for i, survey in enumerate(survey_list):
url = survey['url']
match = re.match('group/([a-z-]+)', survey['node_title'])
chapter = match.group(1) if match else ''
print('Processing: {b}{url} ({i}/{total})'.format(b=surveys.base_url, url=url, i=i + 1, total=len(survey_list)))
s, ds = surveys.get_survey(url)
if s:
print(s.identifier)
print(chapter, s.ordinal, s.title)
realized_list.setdefault(chapter, []).append((url, s.ordinal, s.identifier, s.title))
print('')
return realized_list
def create_nlss_report():
nlss = Report({
'identifier': 'noaa-led-state-summaries-2017',
'report_type_identifier': 'report',
'title': 'NOAA-led State Summaries 2017',
'url': 'https://statesummaries.cicsnc.org/',
'publication_year': '2017',
'contact_email': ''
})
chapters = [(id, i + 1, ' '.join([w.capitalize() for w in id.split('-')])) for i, id in enumerate(sync_metadata_tree['noaa-led-state-summaries-2017'])]
print(gcis.create_report(nlss))
for id, num, title in chapters:
ch = Chapter({
'identifier': id,
'number': num,
'title': title,
'report_identifier': nlss.identifier
})
print(gcis.create_chapter(nlss.identifier, ch))
main()
| {
"repo_name": "USGCRP/gcis-py-client",
"path": "bin/sync_states.py",
"copies": "1",
"size": "5005",
"license": "bsd-3-clause",
"hash": -7323429383795670000,
"line_mean": 36.3507462687,
"line_max": 155,
"alpha_frac": 0.600999001,
"autogenerated": false,
"ratio": 3.5471296952515945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46481286962515944,
"avg_score": null,
"num_lines": null
} |
__author__ = 'abuddenberg'
import requests
import pickle
import json
from bs4 import BeautifulSoup
def http_resp(fn):
def wrapped(*args, **kwargs):
resp = fn(*args, **kwargs)
if resp.status_code == 200:
return resp
else:
raise Exception('Status: {code} \n{txt}'.format(code=resp.status_code, txt=resp.text))
return wrapped
class Nca3Client(object):
def __init__(self, url, username, password, http_basic_user=None, http_basic_pass=None):
self.base_url = url
self.s = requests.Session()
self.s.auth = (http_basic_user, http_basic_pass)
# self.s.headers = {'content-type': 'application/json'}
self.drupal_user = username
self.drupal_pass = password
self.cookie_jar = '/tmp/cookies'
try:
self.s.cookies = pickle.load(open(self.cookie_jar, 'r'))
except Exception, e:
pass
def do_login(self):
url = '{b}/user'.format(b=self.base_url)
form = BeautifulSoup(self.s.get(url).text).find('form', id='user-login')
form_build_id = form.find_all('input', attrs={'name': 'form_build_id'})
resp = self.s.post(
url,
data={
'name': self.drupal_user,
'pass': self.drupal_pass,
'form_id': 'user_login',
'form_build_id': form_build_id,
'op': 'Log in'
},
allow_redirects=False
)
pickle.dump(self.s.cookies, open(self.cookie_jar, 'wb'))
return resp
@http_resp
def get_all_captions(self):
url = '{b}/gcis/figure-table-captions'.format(b=self.base_url)
resp = self.s.get(url, verify=False, headers={'content-type': 'application/json'}, cookies=self.s.cookies)
return resp
def get_figure(self, nid):
url = '{b}/gcis/node/{nid}'.format(b=self.base_url, nid=nid)
return self.s.get(url, verify=False, headers={'content-type': 'application/json'}, cookies=self.s.cookies).json()
def update_figure(self, nid, figure_frag):
url = '{b}/gcis/node/{nid}'.format(b=self.base_url, nid=nid)
token_url = '{b}/services/session/token'.format(b=self.base_url)
token = self.s.get(token_url, verify=False, cookies=self.s.cookies).text
return self.s.put(url, data=json.dumps(figure_frag), verify=False, cookies=self.s.cookies, headers={'X-CSRF-Token': token, 'content-type': 'application/json'})
| {
"repo_name": "USGCRP/gcis-py-client",
"path": "gcis_clients/nca3_client.py",
"copies": "1",
"size": "2500",
"license": "bsd-3-clause",
"hash": -6439263841825736000,
"line_mean": 33.2465753425,
"line_max": 167,
"alpha_frac": 0.5848,
"autogenerated": false,
"ratio": 3.3557046979865772,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9363722411021995,
"avg_score": 0.015356457392916195,
"num_lines": 73
} |
__author__ = 'abuddenberg'
test_figure_json = """
{
"files": [
{
"file_type": null,
"uri": "/file/4cdc6d08-9153-49b2-a924-71f1061dd617",
"url": "/img/ce/22/be3c961e83c6f567744be8a28add/ch02_html_m7f662db3.png",
"href": "http://data.gcis-dev-front.joss.ucar.edu/img/ce/22/be3c961e83c6f567744be8a28add/ch02_html_m7f662db3.png",
"file": "ce/22/be3c961e83c6f567744be8a28add/ch02_html_m7f662db3.png",
"identifier": "4cdc6d08-9153-49b2-a924-71f1061dd617"
}
],
"usage_limits": null,
"contributors": [],
"kindred_figures": [],
"time_end": null,
"href": "http://data.gcis-dev-front.joss.ucar.edu/report/nca3draft/chapter/our-changing-climate/figure/observed-us-temperature-change.json",
"references": [],
"images": [
{
"lon_min": "-124.80",
"create_dt": "2013-08-01T00:00:00",
"lat_max": "49.38",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Map - Contiguous U.S.",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-66.95",
"position": null,
"attributes": "Contiguous U.S., temperature, observed",
"identifier": "8e74f576-a5af-46c0-b33a-f30072118b86",
"lat_min": "24.50",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-116.03",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "49.00",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Great Plains North",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-95.42",
"position": null,
"attributes": "Great Plains, temperature, observed",
"identifier": "1f5a3cdd-fc45-403e-bf11-d1772005b430",
"lat_min": "40.00",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-106.63",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "40.00",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Great Plains South",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-93.52",
"position": null,
"attributes": "Great Plains, temperature, observed",
"identifier": "68537d68-b14c-4811-908a-5dc0ab73879b",
"lat_min": "25.83",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-124.80",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "49.38",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - U.S. Average",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-66.95",
"position": null,
"attributes": "Contiguous U.S., temperature, observed",
"identifier": "230cb2f8-92e0-4897-ab5f-4d6339673832",
"lat_min": "24.50",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-94.70",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "39.62",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Southeast",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-75.22",
"position": null,
"attributes": "Southeast, temperature, observed",
"identifier": "ca983a87-53a7-4c42-b0e9-18d26fad40ba",
"lat_min": "24.50",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "173.00",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "71.83",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Alaska",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-130.00",
"position": null,
"attributes": "Alaska, temperature, observed",
"identifier": "69da6d93-4426-4061-a2a1-7b3d01f2dc1c",
"lat_min": "54.67",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-82.67",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "47.46",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Northeast",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-66.95",
"position": null,
"attributes": "Northeast, temperature, observed",
"identifier": "fa83c34b-7b67-4b74-bcba-5bf60ba7730f",
"lat_min": "37.17",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-97.20",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "49.38",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Midwest",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-80.53",
"position": null,
"attributes": "Midwest, temperature, observed",
"identifier": "b180cfd9-b064-4644-a9a1-d2c3660c1be7",
"lat_min": "36.00",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-124.40",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "42.00",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Southwest",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-102.00",
"position": null,
"attributes": "Southwest, temperature, observed",
"identifier": "26a28c2a-75f2-47f7-a40f-becfc468d3d6",
"lat_min": "31.33",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-162.00",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "23.00",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Hawaii",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-154.67",
"position": null,
"attributes": "Hawaii, temperature, observed",
"identifier": "f69194e8-397d-4f9c-836c-335d259ee09c",
"lat_min": "16.92",
"time_start": "1901-01-01T00:00:00"
},
{
"lon_min": "-124.80",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "49.00",
"usage_limits": null,
"description": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Northwest",
"time_end": "2012-12-31T00:00:00",
"lon_max": "-111.00",
"position": null,
"attributes": "Northwest, temperature, observed",
"identifier": "db4d291d-17c5-4e10-b760-6c8799a8d709",
"lat_min": "42.00",
"time_start": "1901-01-01T00:00:00"
}
],
"lat_min": null,
"create_dt": "2013-08-03T10:04:00",
"lat_max": null,
"time_start": null,
"title": "Observed U.S. Temperature Change",
"parents": [],
"ordinal": 7,
"lon_min": null,
"report_identifier": "nca3draft",
"chapter": {
"url": null,
"identifier": "our-changing-climate",
"number": 2,
"report_identifier": "nca3draft",
"title": "Our Changing Climate"
},
"submission_dt": null,
"uri": "/report/nca3draft/chapter/our-changing-climate/figure/observed-us-temperature-change",
"lon_max": null,
"caption": "The colors on the map show temperature changes over the past 22 years (1991-2012) compared to the 1901-1960 average. The bars on the graphs show the average temperature changes by decade for 1901-2012 (relative to the 1901-1960 average) for each region. The far right bar in each graph (2000s decade) includes 2011 and 2012. The period from 2001 to 2012 was warmer than any previous decade in every region.",
"source_citation": "NOAA NCDC / CICS-NC",
"attributes": null,
"identifier": "observed-us-temperature-change",
"chapter_identifier": "our-changing-climate",
"contributors": [
{
"role_type_identifier": "contributing_author",
"organization_uri": "/organization/university-california-san-diego",
"href": "https://data-stage.globalchange.gov/contributor/1882.json",
"person_uri": "/person/1028",
"uri": "/contributor/1882"
},
{
"role_type_identifier": "convening_lead_author",
"organization_uri": "/organization/university-illinois",
"href": "https://data-stage.globalchange.gov/contributor/1256.json",
"person_uri": "/person/1008",
"uri": "/contributor/1256"
}
]
}
"""
test_image_json = """
{
"files": [
{
"file_type": null,
"uri": "/file/5b6257ec-5277-4d48-a0e7-f457486b1af1",
"url": "/img/08/6a/9debf1a7b538e2887c0a798a1129/unnamed",
"href": "http://data.gcis-dev-front.joss.ucar.edu/img/08/6a/9debf1a7b538e2887c0a798a1129/unnamed",
"file": "08/6a/9debf1a7b538e2887c0a798a1129/unnamed",
"identifier": "5b6257ec-5277-4d48-a0e7-f457486b1af1"
}
],
"lon_min": "-116.03",
"create_dt": "2013-06-27T15:16:00",
"lat_max": "49.00",
"time_start": "1901-01-01T00:00:00",
"submission_dt": null,
"contributors": [],
"title": "Observed U.S. Temperature Change Decadal Bar Graph - Great Plains North",
"uri": "/image/1f5a3cdd-fc45-403e-bf11-d1772005b430",
"lon_max": "-95.42",
"position": null,
"usage_limits": null,
"href": "http://data.gcis-dev-front.joss.ucar.edu/image/1f5a3cdd-fc45-403e-bf11-d1772005b430.json",
"figures": [
{
"ordinal": 7,
"lon_min": null,
"create_dt": "2013-08-03T10:04:00",
"lat_max": null,
"time_start": null,
"submission_dt": null,
"title": "Observed U.S. Temperature Change",
"report_identifier": "nca3draft",
"lon_max": null,
"caption": "The colors on the map show temperature changes over the past 22 years (1991-2012) compared to the 1901-1960 average. The bars on the graphs show the average temperature changes by decade for 1901-2012 (relative to the 1901-1960 average) for each region. The far right bar in each graph (2000s decade) includes 2011 and 2012. The period from 2001 to 2012 was warmer than any previous decade in every region.",
"usage_limits": null,
"source_citation": "NOAA NCDC / CICS-NC",
"attributes": null,
"identifier": "observed-us-temperature-change",
"time_end": null,
"lat_min": null,
"chapter_identifier": "our-changing-climate"
}
],
"references": [],
"attributes": "Great Plains, temperature, observed",
"parents": [
{
"url": "/dataset/CDDv2",
"note": null,
"publication_type_identifier": "dataset",
"relationship": "prov:wasDerivedFrom",
"label": "dataset : CDDv2"
}
],
"identifier": "1f5a3cdd-fc45-403e-bf11-d1772005b430",
"time_end": "2012-12-31T00:00:00",
"lat_min": "40.00",
"description": null
}
"""
webform_json_temp = """
{
"what_type_of_source_provided_this_figure": "person",
"when_was_this_figure_created": "2013-08-03 10:04:00",
"what_type_of_figure_is_this": "map",
"what_is_the_file_name_extension_of_the_figure": "CS_Net Change in Ann Temp_12910_v8.png",
"what_is_the_figure_id": "observed-us-temperature-change",
"what_is_the_name_of_the_figure_as_listed_in_the_report": "Observed U.S. Temperature Change",
"what_is_the_creators_phone_number": "(828) 257-3137",
"nid": "3175",
"how_many_images_are_included_within_the_figure": "11",
"what_is_the_creators_email_address": "ken.kunkel@noaa.gov",
"list_the_creator_of_the_figure": "Kenneth Kunkel, NCDC/CICS-NC",
"path": "/system/files/cs_net_change_in_ann_temp_12910_v10.png"
}
"""
webform_json_precip = """
{
"nid": "2506",
"what_type_of_source_provided_this_figure": "person",
"when_was_this_figure_created": "2013-07-02 13:07:00",
"what_type_of_figure_is_this": "map",
"what_is_the_file_name_extension_of_the_figure": "CS_Extreme Heavy precipitation_v7.png",
"what_is_the_creators_phone_number": "(828) 257-3137",
"what_is_the_figure_id": "observed-change-in-very-heavy-precipitation",
"what_is_the_name_of_the_figure_as_listed_in_the_report": "Observed Changes in Very Heavy Precipitation",
"what_is_the_figure_orientation": "original_for_the_report",
"ready_for_publication": "yes",
"what_is_the_chapter_and_figure_number": "2.17",
"how_many_images_are_included_within_the_figure": "10",
"what_is_the_creators_email_address": "ken.kunkel@noaa.gov",
"list_the_creator_of_the_figure": "Kenneth Kunkel, NOAA NCDC/CICS-NC",
"path": "/system/files/cs_extreme_heavy_precipitation_v7.png"
}
"""
test_dataset_json = """
{
"vertical_extent": null,
"native_id": "TBD",
"cite_metadata": "TBD",
"scale": null,
"temporal_extent": "1895-01-01T00:00:00 2013-08-31T00:00:00",
"version": "2",
"release_dt": "2014-01-01T00:00:00",
"scope": null,
"type": null,
"processing_level": null,
"data_qualifier": null,
"access_dt": "2011-12-31T00:00:00",
"description": "Historical monthly temperature and precipitation time series for 344 climate divisions in the conterminous United States for 1895-present.",
"spatial_ref_sys": null,
"spatial_res": null,
"spatial_extent": "maximum_latitude: 49; minimum_latitude: 24; maximum_longitude: 125; minimum_longitude: 65;",
"doi": null,
"name": "Climate Division Database Version 2",
"url": "https://www.ncdc.noaa.gov/ersst/merge.php",
"publication_year": "2014",
"attributes": "Monthly average maximum temperature, monthly average minimum temperature, monthly average temperature, total monthly precipitation",
"identifier": "cddv2"
}
""" | {
"repo_name": "USGCRP/gcis-py-client",
"path": "gcis_clients/test/test_data.py",
"copies": "1",
"size": "15159",
"license": "bsd-3-clause",
"hash": 6797001724531661000,
"line_mean": 40.3079019074,
"line_max": 432,
"alpha_frac": 0.5511577281,
"autogenerated": false,
"ratio": 3.057482855990319,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9095528665052781,
"avg_score": 0.00262238380750759,
"num_lines": 367
} |
__author__ = 'Acheron'
import re
import argparse
class CCard:
def __init__(self, card_number):
self.number = str(card_number)
self.length = len(self.number)
self.BANK_CODES = {
'AMEX': {'lead_numbers': (34, 37), 'length': (15, )},
'MasterCard': {'lead_numbers': (51, 52, 53, 54, 55), 'length': (16, )},
'Visa': {'lead_numbers': (4, ), 'length': (13, 16)},
}
self.patter_bank = re.compile(r'^([4]|\d{2})', re.IGNORECASE)
def _getBankType(self):
try:
b_number = int(re.search(self.patter_bank, self.number).group(0))
for bank in self.BANK_CODES.keys():
if b_number in self.BANK_CODES[bank]['lead_numbers']:
if self.length in self.BANK_CODES[bank]['length']:
return bank
except:
pass
return None
def _checkCard(self):
# to short for credit card; 13 is hardcoded and that's bad!
if self.length < 13:
return None
# multiply by 2 every second digit starts from second-to-last
n_list = []
i = self.length - 2
while i >= 0:
n = str(int(self.number[i]) * 2)
n_list += [num for num in n]
i -= 2
# summarize digits
s_sum = 0
for element in n_list:
s_sum += int(element[0])
# summarize digits that weren’t multiplied by 2
part = 0
i = self.length - 1
while i >= 0:
part += int(self.number[i])
i -= 2
# if last digit is zero - this is valid card
try:
if str(s_sum + part)[1] == '0':
return True
except:
pass
return False
def check(self):
# if valid card - print bank emitent
if self._checkCard():
return self._getBankType()
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-n', help='number of blocks', nargs=1)
args = parser.parse_args()
card_number = int(args.n[0])
checker = CCard(card_number)
result = checker.check()
if result:
print(result)
else:
print('INVALID') | {
"repo_name": "Charnelx/CS50",
"path": "pset6/credit.py",
"copies": "1",
"size": "2276",
"license": "mit",
"hash": -4579639257712389000,
"line_mean": 26.743902439,
"line_max": 83,
"alpha_frac": 0.5039577836,
"autogenerated": false,
"ratio": 3.7278688524590162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47318266360590167,
"avg_score": null,
"num_lines": null
} |
import exceptions
import types
from pygsl import errno
class gsl_Error(exceptions.Exception):
"""
base of all gsl exceptions defined here
"""
IsA = 'gsl_Error'
errno = None
pass
class gsl_Warning(exceptions.Warning):
"""
base of all gsl warnings defined here
"""
errno = None
pass
class gsl_ArithmeticError(gsl_Error,exceptions.ArithmeticError):
"""
base of all common arithmetic exceptions
"""
pass
class gsl_RangeError(gsl_ArithmeticError):
"""
output range error, e.g. exp(1e100)
"""
errno = errno.GSL_ERANGE
pass
class gsl_PointerError(gsl_Error):
"""
invalid pointer
"""
errno = errno.GSL_EFAULT
# Perhaps a lookup Error ???
pass
class gsl_InvalidArgumentError(gsl_Error):
"""
invalid argument supplied by user
"""
errno = errno.GSL_EINVAL
pass
class gsl_GenericError(gsl_Error):
"""
generic failure
"""
errno = errno.GSL_EFAILED
pass
class gsl_FactorizationError(gsl_Error):
"""
factorization failed
"""
errno = errno.GSL_EFACTOR
pass
class gsl_SanityCheckError(gsl_Error):
"""
sanity check failed - shouldn't happen
"""
errno = errno.GSL_ESANITY
pass
class gsl_BadFuncError(gsl_Error):
"""
problem with user-supplied function
"""
errno = errno.GSL_EBADFUNC
pass
class gsl_RunAwayError(gsl_ArithmeticError):
"""
iterative process is out of control
"""
errno = errno.GSL_ERUNAWAY
pass
class gsl_MaximumIterationError(gsl_ArithmeticError):
"""
exceeded max number of iterations
"""
errno = errno.GSL_EMAXITER
pass
class gsl_ZeroDivisionError(gsl_Error,exceptions.ZeroDivisionError):
"""
"""
errno = errno.GSL_EZERODIV
pass
class gsl_BadToleranceError(gsl_Error):
"""
user specified an invalid tolerance
"""
errno = errno.GSL_EBADTOL
pass
class gsl_ToleranceError(gsl_ArithmeticError):
"""
failed to reach the specified tolerance
"""
errno = errno.GSL_ETOL
pass
class gsl_UnderflowError(gsl_Error,exceptions.OverflowError):
"""
"""
errno = errno.GSL_EUNDRFLW
pass
class gsl_OverflowError(gsl_Error,exceptions.OverflowError):
"""
"""
errno = errno.GSL_EOVRFLW
pass
class gsl_AccuracyLossError(gsl_ArithmeticError):
"""
failed to reach the specified tolerance
"""
errno = errno.GSL_ELOSS
pass
class gsl_RoundOffError(gsl_ArithmeticError):
"""
failed because of roundoff error
"""
errno = errno.GSL_EROUND
pass
class gsl_BadLength(gsl_Error):
"""
matrix; vector lengths are not conformant
"""
errno = errno.GSL_EBADLEN
pass
class gsl_MatrixNotSquare(gsl_Error):
"""
matrix not square
"""
errno = errno.GSL_ENOTSQR
pass
class gsl_SingularityError(gsl_ArithmeticError):
"""
apparent singularity detected
"""
errno = errno.GSL_ESING
pass
class gsl_DivergeError(gsl_ArithmeticError):
"""
integral or series is divergent
"""
errno = errno.GSL_EDIVERGE
pass
class gsl_NoHardwareSupportError(gsl_Error):
"""
requested feature is not supported by the hardware
"""
errno = errno.GSL_EUNSUP
pass
class gsl_NotImplementedError(gsl_Error, exceptions.NotImplementedError):
"""
requested feature not (yet) implemented
"""
errno = errno.GSL_EUNIMPL
pass
class gsl_CacheLimitError(gsl_Error):
"""
cache limit exceeded
"""
errno = errno.GSL_ECACHE
pass
class gsl_TableLimitError(gsl_Error):
"""
Table limit exceeded
"""
errno = errno.GSL_ETABLE
pass
class gsl_NoProgressError(gsl_ArithmeticError):
"""
iteration is not making progress towards solution
"""
errno = errno.GSL_ENOPROG
pass
class gsl_JacobianEvaluationError(gsl_ArithmeticError):
"""
jacobian evaluations are not improving the solution
"""
errno = errno.GSL_ENOPROGJ
pass
class gsl_ToleranceFError(gsl_ArithmeticError):
"""
cannot reach the specified tolerance in F
"""
errno = errno.GSL_ETOLF
pass
class gsl_ToleranceXError(gsl_ArithmeticError):
"""
cannot reach the specified tolerance in X
"""
errno = errno.GSL_ETOLX
pass
class gsl_ToleranceGradientError(gsl_ArithmeticError):
"""
cannot reach the specified tolerance in gradient
"""
errno = errno.GSL_ETOLG
pass
class gsl_EOFError(gsl_Error, exceptions.EOFError):
"""
end of file
"""
errno = errno.GSL_EOF
pass
class gsl_FloatingPointError(gsl_Error,exceptions.FloatingPointError):
"""
"""
pass
class gsl_DomainWarning(gsl_Warning):
"""
used by pygsl.histogram
"""
errno = errno.GSL_EDOM
class gsl_DomainError(gsl_Error):
"""
Domain Error. e.g. sqrt(-1)
"""
errno = errno.GSL_EDOM
pass
class pygsl_StrideError(gsl_SanityCheckError):
"""
Could not recalculate a stride of a python array object to the stride
of a gsl_vector or gsl_matrix.
"""
errno = errno.PyGSL_ESTRIDE
pass
class pygsl_NotImplementedError(gsl_Error, exceptions.NotImplementedError):
"""
Base for all Errors, which are known but not implemented yet!
"""
pass
def _get_exceptions(subclass):
tmp = []
globs = globals()
for name in globs:
i = globs[name]
# Does not work with python2.5
# These classes are seen as types
#if type(i) != types.ClassType:
# print "%s is not a class" % (i,)
# continue
try:
if not issubclass(i, subclass):
continue
except TypeError:
continue
try:
t = i.errno
except AttributeError:
continue
if type(t) != types.IntType:
continue
int(t)
tmp.append(i)
return tmp
def get_exceptions():
return _get_exceptions(gsl_Error)
def get_warnings():
return _get_exceptions(gsl_Warning)
| {
"repo_name": "juhnowski/FishingRod",
"path": "production/pygsl-0.9.5/pygsl/errors.py",
"copies": "1",
"size": "6452",
"license": "mit",
"hash": -8397720153080963000,
"line_mean": 18.8523076923,
"line_max": 77,
"alpha_frac": 0.6328270304,
"autogenerated": false,
"ratio": 3.5685840707964602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.967477073284315,
"avg_score": 0.0053280736706620094,
"num_lines": 325
} |
import sys
if sys.version_info[0] < 3:
import exceptions
Exception = exceptions.Exception
Warning = exceptions.Warning
import types
from pygsl import errno
class gsl_Error(Exception):
"""
base of all gsl exceptions defined here
"""
IsA = 'gsl_Error'
errno = None
pass
class gsl_Warning(Warning):
"""
base of all gsl warnings defined here
"""
errno = None
pass
class gsl_ArithmeticError(gsl_Error,ArithmeticError):
"""
base of all common arithmetic exceptions
"""
pass
class gsl_RangeError(gsl_ArithmeticError):
"""
output range error, e.g. exp(1e100)
"""
errno = errno.GSL_ERANGE
pass
class gsl_PointerError(gsl_Error):
"""
invalid pointer
"""
errno = errno.GSL_EFAULT
# Perhaps a lookup Error ???
pass
class gsl_InvalidArgumentError(gsl_Error):
"""
invalid argument supplied by user
"""
errno = errno.GSL_EINVAL
pass
class gsl_GenericError(gsl_Error):
"""
generic failure
"""
errno = errno.GSL_EFAILED
pass
class gsl_FactorizationError(gsl_Error):
"""
factorization failed
"""
errno = errno.GSL_EFACTOR
pass
class gsl_SanityCheckError(gsl_Error):
"""
sanity check failed - shouldn't happen
"""
errno = errno.GSL_ESANITY
pass
class gsl_BadFuncError(gsl_Error):
"""
problem with user-supplied function
"""
errno = errno.GSL_EBADFUNC
pass
class gsl_RunAwayError(gsl_ArithmeticError):
"""
iterative process is out of control
"""
errno = errno.GSL_ERUNAWAY
pass
class gsl_MaximumIterationError(gsl_ArithmeticError):
"""
exceeded max number of iterations
"""
errno = errno.GSL_EMAXITER
pass
class gsl_ZeroDivisionError(gsl_Error,ZeroDivisionError):
"""
"""
errno = errno.GSL_EZERODIV
pass
class gsl_BadToleranceError(gsl_Error):
"""
user specified an invalid tolerance
"""
errno = errno.GSL_EBADTOL
pass
class gsl_ToleranceError(gsl_ArithmeticError):
"""
failed to reach the specified tolerance
"""
errno = errno.GSL_ETOL
pass
class gsl_UnderflowError(gsl_Error,OverflowError):
"""
"""
errno = errno.GSL_EUNDRFLW
pass
class gsl_OverflowError(gsl_Error,OverflowError):
"""
"""
errno = errno.GSL_EOVRFLW
pass
class gsl_AccuracyLossError(gsl_ArithmeticError):
"""
failed to reach the specified tolerance
"""
errno = errno.GSL_ELOSS
pass
class gsl_RoundOffError(gsl_ArithmeticError):
"""
failed because of roundoff error
"""
errno = errno.GSL_EROUND
pass
class gsl_BadLength(gsl_Error):
"""
matrix; vector lengths are not conformant
"""
errno = errno.GSL_EBADLEN
pass
class gsl_MatrixNotSquare(gsl_Error):
"""
matrix not square
"""
errno = errno.GSL_ENOTSQR
pass
class gsl_SingularityError(gsl_ArithmeticError):
"""
apparent singularity detected
"""
errno = errno.GSL_ESING
pass
class gsl_DivergeError(gsl_ArithmeticError):
"""
integral or series is divergent
"""
errno = errno.GSL_EDIVERGE
pass
class gsl_NoHardwareSupportError(gsl_Error):
"""
requested feature is not supported by the hardware
"""
errno = errno.GSL_EUNSUP
pass
class gsl_NotImplementedError(gsl_Error, NotImplementedError):
"""
requested feature not (yet) implemented
"""
errno = errno.GSL_EUNIMPL
pass
class gsl_CacheLimitError(gsl_Error):
"""
cache limit exceeded
"""
errno = errno.GSL_ECACHE
pass
class gsl_TableLimitError(gsl_Error):
"""
Table limit exceeded
"""
errno = errno.GSL_ETABLE
pass
class gsl_NoProgressError(gsl_ArithmeticError):
"""
iteration is not making progress towards solution
"""
errno = errno.GSL_ENOPROG
pass
class gsl_JacobianEvaluationError(gsl_ArithmeticError):
"""
jacobian evaluations are not improving the solution
"""
errno = errno.GSL_ENOPROGJ
pass
class gsl_ToleranceFError(gsl_ArithmeticError):
"""
cannot reach the specified tolerance in F
"""
errno = errno.GSL_ETOLF
pass
class gsl_ToleranceXError(gsl_ArithmeticError):
"""
cannot reach the specified tolerance in X
"""
errno = errno.GSL_ETOLX
pass
class gsl_ToleranceGradientError(gsl_ArithmeticError):
"""
cannot reach the specified tolerance in gradient
"""
errno = errno.GSL_ETOLG
pass
class gsl_EOFError(gsl_Error, EOFError):
"""
end of file
"""
errno = errno.GSL_EOF
pass
class gsl_FloatingPointError(gsl_Error,FloatingPointError):
"""
"""
pass
class gsl_DomainWarning(gsl_Warning):
"""
used by pygsl.histogram
"""
errno = errno.GSL_EDOM
class gsl_DomainError(gsl_Error):
"""
Domain Error. e.g. sqrt(-1)
"""
errno = errno.GSL_EDOM
pass
class pygsl_StrideError(gsl_SanityCheckError):
"""
Could not recalculate a stride of a python array object to the stride
of a gsl_vector or gsl_matrix.
"""
errno = errno.PyGSL_ESTRIDE
pass
class pygsl_NotImplementedError(gsl_NotImplementedError):
"""
Base for all Errors, which are known but not implemented yet!
"""
errno = errno.PyGSL_EUNIMPL
pass
_not_exported_exceptions = (
gsl_Error,
gsl_Warning,
gsl_FloatingPointError,
gsl_ArithmeticError,
)
def _get_exceptions(subclass):
tmp = []
globs = globals()
for name in globs:
the_exception = globs[name]
if the_exception in _not_exported_exceptions:
continue
try:
if not issubclass(the_exception, subclass):
continue
except TypeError:
continue
try:
t_errno = the_exception.errno
except AttributeError:
continue
assert(t_errno != None)
int(t_errno)
tmp.append(the_exception)
return tmp
def get_exceptions():
return _get_exceptions(gsl_Error)
def get_warnings():
return _get_exceptions(gsl_Warning)
| {
"repo_name": "poojavade/Genomics_Docker",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/errors.py",
"copies": "1",
"size": "6512",
"license": "apache-2.0",
"hash": -6762786928466154000,
"line_mean": 18.380952381,
"line_max": 77,
"alpha_frac": 0.6371314496,
"autogenerated": false,
"ratio": 3.5410549211528006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4678186370752801,
"avg_score": null,
"num_lines": null
} |
__author__ = 'acidghost'
class Processor:
@staticmethod
def lowerize(sentences):
for sent in sentences:
for index in range(0, len(sent)):
sent[index] = sent[index].lower()
return sentences
@staticmethod
def remove_digits(sentences):
for sent in sentences:
for word in sent:
for char in word:
if char.isdigit():
sent.remove(word)
break
return sentences
@staticmethod
def remove_stopwords(sentences, stopwords):
for sent in sentences:
for word in sent:
if word in stopwords.words():
sent.remove(word)
return sentences
@staticmethod
def stem(sentences, stemmer):
for sent in sentences:
for index in range(len(sent)):
sent[index] = stemmer.stem(sent[index])
return sentences
@staticmethod
def bag_of_words(sent):
dict = {}
for word in sent:
dict[word] = True
return dict | {
"repo_name": "acidghost/sentipolc",
"path": "processor.py",
"copies": "1",
"size": "1112",
"license": "mit",
"hash": -5878928375656850000,
"line_mean": 25.5,
"line_max": 55,
"alpha_frac": 0.5251798561,
"autogenerated": false,
"ratio": 4.813852813852814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5839032669952814,
"avg_score": null,
"num_lines": null
} |
__author__ = 'acidghost'
import csv
from tweet import Tweet
with open('data/SENTIPOLC-Evalita-2014.csv', 'r') as csvfile:
csvReader = csv.reader(csvfile)
lines = [row for row in csvReader]
# Remove header line
lines = lines[1:]
# Compose tweets array
tweets = []
for line in lines:
# Skip if tweet is not available
if str(line[6]).startswith('Tweet Not Available'):
continue
tw = Tweet(line[0], line[1], line[2], line[3], line[4], line[5], line[6])
tweets.append(tw)
pos_tweets = []
neg_tweets = []
for tweet in tweets:
if tweet.opinion > 0:
pos_tweets.append(tweet)
elif tweet.opinion < 0:
neg_tweets.append(tweet)
print 'Pos: %s\nNeg: %s\n' % (len(pos_tweets), len(neg_tweets))
# Saving positive tweets into a file
with open('data/SENTIPOLC-pos.txt', 'w') as posfile:
posfile.writelines('\n'.join([tweet.text for tweet in pos_tweets]))
posfile.close()
# Saving negative tweets into another file
with open('data/SENTIPOLC-neg.txt', 'w') as negfile:
negfile.writelines('\n'.join([tweet.text for tweet in neg_tweets]))
negfile.close() | {
"repo_name": "acidghost/sentipolc",
"path": "extract-corpus.py",
"copies": "1",
"size": "1115",
"license": "mit",
"hash": -311938115914435100,
"line_mean": 25.5714285714,
"line_max": 77,
"alpha_frac": 0.6609865471,
"autogenerated": false,
"ratio": 2.911227154046997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4072213701146997,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aclapes'
from os.path import isfile, exists, join
from os import makedirs
import cPickle
import random
import time
from math import isnan
import sys
import numpy as np
from sklearn.metrics import pairwise
from sklearn.decomposition import PCA
from spectral_division import spectral_embedding_nystrom, spectral_clustering_division, reconstruct_tree_from_leafs, IndefiniteError, NumericalError
import cv2
from joblib import delayed, Parallel
INTERNAL_PARAMETERS = dict(
initial_ridge_value = 1e-10,
tries_per_ridge_value = 3
)
def cluster(tracklets_path, videonames, clusters_path, verbose=False, visualize=False):
inds = np.linspace(0, len(videonames)-1, len(videonames))
_cluster(tracklets_path, videonames, inds, tracklets_path, verbose=verbose, visualize=visualize)
def cluster_multiprocess(tracklets_path, videonames, st, num_videos, clusters_path, verbose=False):
inds = np.linspace(st, st+num_videos-1, num_videos)
_cluster(tracklets_path, videonames, inds, tracklets_path, verbose=verbose, visualize=False)
def cluster_multithread(tracklets_path, videonames, clusters_path, nt=4, verbose=False):
inds = np.random.permutation(len(videonames)).astype('int')
# inds = np.linspace(0,len(videonames)-1,len(videonames)).astype('int')
# step = np.int(np.floor(len(inds)/nt)+1)
Parallel(n_jobs=nt, backend='threading')(delayed(_cluster)(tracklets_path, videonames, \
[i], \
clusters_path, verbose=verbose, visualize=False)
for i in inds)
def _cluster(tracklets_path, videonames, indices, clusters_path, verbose=False, visualize=False):
"""
This function implements the method described in Section 2 ("Clustering dense tracklets")
of the paper 'Activity representation with motion hierarchies' (IJCV, 2014).
:param tracklets_path:
:param videonames:
:param indices:
:param clusters_path:
:param visualize:
:return:
"""
if not exists(clusters_path):
makedirs(clusters_path)
# process the videos
total = len(videonames)
for i in indices:
if isfile(join(clusters_path, videonames[i] + '.pkl')):
if verbose:
print('[_cluster] %s -> OK' % videonames[i])
continue
try:
with open(join(tracklets_path, 'obj', videonames[i] + '.pkl'), 'rb') as f:
data_obj = cPickle.load(f)
with open(join(tracklets_path, 'trj', videonames[i] + '.pkl'), 'rb') as f:
data_trj = cPickle.load(f)
except IOError:
sys.stderr.write("[Error] Tracklet files not found for %s." % videonames[i])
continue
start_time = time.time()
# (Sec. 2.2) get a dictionary of separate channels
D = dict()
for k in xrange(data_obj.shape[0]): # range(0,100): #
T = np.reshape(data_trj[k], (data_trj.shape[1]/2,2)) # trajectory features into matrix (time length x 2)
D.setdefault('x',[]).append( T[1:,0] ) # x's offset + x's relative displacement
D.setdefault('y',[]).append( T[1:,1] ) # y's offset + y's relative displacement
D.setdefault('t',[]).append( data_obj[k,0] - np.linspace(T.shape[0]-1, 0, T.shape[0]) )
D.setdefault('v_x',[]).append( T[1:,0] - T[:-1,0] )
D.setdefault('v_y',[]).append( T[1:,1] - T[:-1,1] )
# (Sec. 2.3.1)
# A, B = get_tracklet_similarities(D, data_obj[:,7:9])
# create a subsample (n << N) stratified by a grid
prob = 0.01
ret = False
while not ret:
insample, outsample = stratified_subsample_of_tracklets_in_grid(data_obj[:,7:9], p=prob)
if len(insample) > 2:
ret = True
else:
prob *= 10
# get the similarities of
A, medians = multimodal_product_kernel(D, insample, insample) # (n), n << N tracklets
B, _ = multimodal_product_kernel(D, insample, outsample, medians=medians) # (N - n) tracklets
# (Sec. 2.3.2 and 2.3.3)
AB = np.hstack((A,B)).astype('float64')
ridge = INTERNAL_PARAMETERS['initial_ridge_value']
success = False
while not success:
try:
E_ = spectral_embedding_nystrom(AB, ridge=ridge)
success = True
except (IndefiniteError, NumericalError, ValueError) as e:
# warn the user
# msg = "WARNING: increasing ridge, {0:.0e} -> {1:.0e}.\n"
# sys.stderr.write(msg.format(ridge, ridge * 10))
# sys.stderr.flush()
# # increase the ridge value
# if ridge >= 1e-6:
# ridge = -1
# break
# ridge *= 10
break
if not success:
n_left = np.count_nonzero(data_obj[:,0] <= np.median(data_obj[:,0]))
best_labels = ([0] * n_left) + ([1] * (data_obj.shape[0]-n_left))
int_paths = ([2] * n_left) + ([3] * (data_obj.shape[0]-n_left))
else:
# re-organize E rows according to in- and out-sample indices
E = np.zeros(E_.shape, dtype=E_.dtype)
E[insample,:] = E_[:len(insample),:]
E[outsample,:] = E_[len(insample):,:]
# (Sec. 2.4)
best_labels, int_paths = spectral_clustering_division(E, data_obj[:,7:10])
tree = reconstruct_tree_from_leafs(np.unique(int_paths))
elapsed_time = time.time() - start_time
if verbose:
print('[_cluster] %s -> %s (in %.2f secs)' % (join(clusters_path, videonames[i] + '.pkl'), 'YES' if success else 'NO', elapsed_time))
with open(join(clusters_path, videonames[i] + '.pkl'), 'wb') as f:
cPickle.dump({'best_labels' : best_labels, 'int_paths' : int_paths, 'tree' : tree, 'ridge' : ridge}, f)
# DEBUG
# -----
if visualize:
xres, yres = 528, 224
A = np.zeros((yres,1280,3), dtype=np.uint8)
n_unique_paths = len(np.unique(tree.keys()))
sorted_keys = sorted(tree.keys(), key=lambda x : x)
for i, key in enumerate(sorted_keys):
node = tree[key]
cluster_inds = np.concatenate([np.where(int_paths == i)[0] for i in node])
hue = ((float(i)/n_unique_paths) + random.random()) % 1
for k in xrange(0, len(cluster_inds)):
idx = cluster_inds[k]
T = np.reshape(data_trj[idx,:], (data_trj.shape[1]/2,2))
t = data_obj[idx,9]
for j in xrange(1,T.shape[0]):
pt1 = (int(T[j-1,0]*xres-xres/2+t*1280), int(T[j-1,1]*yres))
pt2 = (int(T[j,0]*xres-xres/2+t*1280), int(T[j,1]*yres))
cv2.line(A, pt1, pt2, hsv_to_rgb((hue,0.5,0.8)), 1)
cv2.circle(A, pt2, 1, hsv_to_rgb((hue,0.5,1.0)), -1)
cv2.imshow("#DEBUG Clustering visualization", A)
print key, node
cv2.waitKey(0)
# -----
# ==============================================================================
# Helper functions
# ==============================================================================
def stratified_subsample_of_tracklets_in_grid(P, nx=3, ny=3, p=0.01):
"""
Subsample a factor p of the total tracklets stratifying the sampling in a
grid of nx-by-ny cells.
:param P: N-by-2 matrix of tracklet (ending) positions
:param p: the sampling probability
:param nx: number of horizontal divisions of the grid
:param ny: number of vertical divisions of the grid
:return insample, outsample:
"""
p_cell = p / (nx*ny)
insample = []
outsample = []
for i in range(0,ny):
y_ran = (i*(1.0/ny), (i+1)*(1.0/ny))
for j in range(0,nx):
x_ran = (j*(1.0/nx), (j+1)*(1.0/nx))
cell_inds = np.where((P[:,0] >= x_ran[0]) & (P[:,0] < x_ran[1]) & (P[:,1] >= y_ran[0]) & (P[:,1] < y_ran[1]))[0]
m = len(cell_inds)
sorted_inds = sorted(np.arange(m, dtype=np.int32), key=lambda k: np.random.random())
insample.append(np.array(sorted_inds[:int(np.ceil(m*p_cell))], dtype=np.int32))
outsample.append(np.array(sorted_inds[int(np.ceil(m*p_cell)):], dtype=np.int32))
return np.concatenate(insample), np.concatenate(outsample)
def multimodal_product_kernel(D, primary_inds=None, secondary_inds=None, medians=None):
"""
Merges the different modalities (or channels) using the product of rbf kernels.
The similarity matrix computed is the one from the samples in the primary indices to the secondary indices.
If some indices are not specified (None) all samples are used.
:param D: a python dict containing the data in the different modalitites (or channels).
keys are the names of the modalities
:param primary_inds:
:param secondary_inds:
:return K:
"""
n = len(primary_inds) if primary_inds is not None else len(D['x'])
m = len(secondary_inds) if secondary_inds is not None else len(D['x'])
channels = ['x','y','t','v_x','v_y']
if medians is None:
medians = []
K = np.ones((n, m), dtype=np.float32) # prepare kernel product
for i, channel_t in enumerate(channels):
D[channel_t] = np.array(D[channel_t], dtype=np.float32)
X_primary = D[channel_t][primary_inds] if primary_inds is not None else D[channel_t]
X_secondary = D[channel_t][secondary_inds] if secondary_inds is not None else D[channel_t]
S = pairwise.pairwise_distances(X=X_primary, Y=X_secondary, metric='euclidean')
median = np.nanmedian(S[S!=0])
if len(medians) < len(channels):
medians.append(median)
gamma = 1.0/(2*median) if not isnan(median) and median != 0.0 else 0.0
K_tmp = np.exp(-gamma * np.power(S,2)) # rbf kernel and element-wise multiplication
K = np.multiply(K, K_tmp)
return K, medians
def hsv_to_rgb(hsv):
'''
HSV values in [0..1]
:param h:
:param s:
:param v:
:return (r, g, b) tuple, with values from 0 to 255:
'''
h, s, v = hsv[0], hsv[1], hsv[2]
h_i = int(h * 6)
f = h * 6 - h_i
p = v * (1 - s)
q = v * (1 - f*s)
t = v * (1 - (1 - f) * s)
if h_i == 0:
r, g, b = v, t, p
elif h_i == 1:
r, g, b = q, v, p
elif h_i == 2:
r, g, b = p, v, t
elif h_i == 3:
r, g, b = p, q, v
elif h_i == 4:
r, g, b = t, p, v
elif h_i == 5:
r, g, b = v, p, q
return (int(r*256), int(g*256), int(b*256)) | {
"repo_name": "aclapes/darwintree",
"path": "tracklet_clustering.py",
"copies": "1",
"size": "10849",
"license": "bsd-3-clause",
"hash": 8003376283325307000,
"line_mean": 39.4850746269,
"line_max": 148,
"alpha_frac": 0.5494515624,
"autogenerated": false,
"ratio": 3.2697408077154915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43191923701154916,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aclapes'
from os.path import isfile, isdir, exists, join, splitext, basename, dirname
from os import makedirs
import cPickle
import time
import numpy as np
from videodarwin import darwin
INTERNAL_PARAMETERS = dict(
)
def darwin(fullfeatnames, st, num_videos, darwins_path):
if not exists(darwins_path):
makedirs(darwins_path)
for feat_t in fullfeatnames:
# node_darwins[feat_t] = dict()
if not exists(join(darwins_path, feat_t)):
makedirs(join(darwins_path, feat_t))
for featname in fullfeatnames[feat_t]:
output_filepath = join(darwins_path, feat_t, basename(featname))
if isfile(output_filepath):
print('%s -> OK' % (featname))
continue
start_time = time.time()
with open(featname, 'rb') as f:
data = cPickle.load(f)
# compute VD
node_darwins = dict()
node_darwins[1] = darwin(data['X'])
for id, X in data['tree_perframe'].iteritems():
node_darwins[id] = darwin(X)
# construct a list of edge pairs for easy access
with open(output_filepath, 'wb') as f:
cPickle.dump(dict(node_darwins=node_darwins), f)
elapsed_time = time.time() - start_time
print('%s -> DONE (in %.2f secs)' % (output_filepath, elapsed_time))
return None
# ==============================================================================
# Helper functions
# ============================================================================== | {
"repo_name": "aclapes/darwintree",
"path": "darwintree.py",
"copies": "1",
"size": "1616",
"license": "bsd-3-clause",
"hash": -6155086715518369000,
"line_mean": 28.4,
"line_max": 80,
"alpha_frac": 0.5241336634,
"autogenerated": false,
"ratio": 3.9223300970873787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9942610166063608,
"avg_score": 0.0007707188847539724,
"num_lines": 55
} |
__author__ = 'aclapes'
from os.path import isfile, isdir, join, splitext
from os import listdir, system, remove
import shutil
# TODO: change MANUALLY these parameters
# ----------------------------------------------------
INTERNAL_PARAMETERS = dict(
home_path = '/Volumes/MacintoshHD/Users/aclapes/',
datasets_path = 'Datasets/',
dataset_dirname = 'ucf_sports_actions',
)
# ----------------------------------------------------
def get_video_from_images(images_path, videofile_path, prefix='', n_leadzeros=4, image_format='jpg', fps=10):
''' Use external program (ffmpeg) to convert a set of images to video (loseless) '''
parameters = ['-i ' + images_path + prefix + '%0' + str(n_leadzeros) + 'd.' + image_format,
'-vcodec libx264 -crf 20', # alt: '-codec copy',
# '-r ' + str(fps), # not working!
videofile_path]
cmd = 'ffmpeg ' + parameters[0] + ' ' + parameters[1] + ' ' + parameters[2]
system(cmd)
if __name__ == '__main__':
parent_path = INTERNAL_PARAMETERS['home_path'] + INTERNAL_PARAMETERS['datasets_path'] + INTERNAL_PARAMETERS['dataset_dirname'] + '/'
videos_dir = parent_path
for i, element in enumerate(listdir(videos_dir)):
if isdir(join(videos_dir, element)):
action_dir = element
for instance_dir in listdir(join(videos_dir, action_dir)):
if isdir(join(videos_dir, action_dir, element)):
instance_dir = element
print 'Processing', join(videos_dir, action_dir, instance_dir), '...'
# check if there are JPGs (uncropped) to generate a video
contains_jpegs = False
for element in listdir(join(videos_dir, action_dir, instance_dir)):
if isfile(join(videos_dir, action_dir, instance_dir, element)) and splitext(element)[1] == '.jpg':
contains_jpegs = True
break
if contains_jpegs:
dir = join(videos_dir, action_dir, instance_dir) + '/' # this is from where we'll get JPGs
else:
# check, as a secondary option, if there's already a video
contains_video = False
videoname = ''
for element in listdir(join(videos_dir, action_dir, instance_dir)):
if isfile(join(videos_dir, action_dir, instance_dir, element)) and splitext(element)[1] == '.avi':
contains_video = True
videoname = element
break
if contains_video:
shutil.copyfile(join(videos_dir, action_dir, instance_dir, element), \
join(videos_dir, action_dir + '_' + instance_dir + '.avi'))
continue
else:
# no JPGs (uncropped) and no video? use the cropped JPGs (in jpeg/ subfolder)
dir = join(videos_dir, action_dir, instance_dir, 'jpeg/')
videoname = ''
temporary_files = []
frame_ctr = 1
for element in listdir(dir):
if isfile(join(dir, element)) and splitext(element)[1] == '.jpg':
if videoname == '':
videoname = splitext(element)[0]
new_file = str(frame_ctr).zfill(4) + '.jpg'
try:
shutil.copyfile(join(dir, element), \
join(dir, new_file))
except shutil.Error:
print "Already existing file:", new_file, "(no need to copy)"
pass
temporary_files.append(join(dir, new_file))
frame_ctr += 1
# create my own video
get_video_from_images(dir, join(videos_dir, action_dir + '_' + instance_dir + '.avi'), \
image_format='jpg', prefix='', n_leadzeros=4, fps=10)
# remove image files that I copied with proper name to generate the video using ffmpeg
for filepath in temporary_files:
try:
remove(filepath)
except OSError:
pass
print 'DONE.'
print 'ALL videos DONE.' | {
"repo_name": "aclapes/darwintree",
"path": "fix_ucf_sports_dataset.py",
"copies": "1",
"size": "4747",
"license": "bsd-3-clause",
"hash": 2709360509529324000,
"line_mean": 47.4489795918,
"line_max": 136,
"alpha_frac": 0.4653465347,
"autogenerated": false,
"ratio": 4.7,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56653465347,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aclapes'
import numpy as np
from sklearn.svm import LinearSVR
def rootSIFT(X):
'''
:param X: rootSIFT operation applied to elements of X (element-wise).
Check Fisher Vectors literature.
:return:
'''
return np.multiply(np.sign(X), np.sqrt(np.abs(X)))
def normalizeL1(X):
"""
Normalize the data using L1-norm.
:param X: each row of X is an instance
:return: the normalized data
"""
X = np.matrix(X)
return X / np.sqrt(np.sum(np.abs(X), axis=1))
def normalizeL2(X):
"""
Normalize the data using L2-norm.
:param X: each row of X is an instance
:return: the normalized data
"""
X = np.matrix(X)
return X / np.sqrt(np.sum(np.multiply(X,X), axis=1))
def linearSVR(X, c_param, norm=2):
if norm == 1:
XX = normalizeL1(X)
else:
XX = normalizeL2(X)
T = X.shape[0] # temporal length
clf = LinearSVR(C=c_param, dual=False, loss='squared_epsilon_insensitive', \
epsilon=0.1, tol=0.001, verbose=False) # epsilon is "-p" in C's liblinear and tol is "-e"
clf.fit(XX, np.linspace(1,T,T))
return clf.coef_
def darwin(X, c_svm_param=1):
w_fw, w_rv = _darwin(X, c_svm_param=c_svm_param)
return np.concatenate([w_fw, w_rv])
def _darwin(X, c_svm_param=1):
'''
Computes the videodarwin representation of a multi-variate temporal series.
:param X: a N-by-T matrix, with N the number of features and T the time instants.
:param c_svm_param: the C regularization parameter of the linear SVM.
:return: the videodarwin representation
'''
T = X.shape[0] # temporal length
one_to_T = np.linspace(1,T,T)
one_to_T = one_to_T[:,np.newaxis]
V = np.cumsum(X,axis=0) / one_to_T
w_fw = linearSVR(rootSIFT(V), c_svm_param, 2) # videodarwin
V = np.cumsum(np.flipud(X),axis=0) / one_to_T # reverse videodarwin
w_rv = linearSVR(rootSIFT(V), c_svm_param, 2)
return w_fw, w_rv | {
"repo_name": "aclapes/darwintree",
"path": "videodarwin.py",
"copies": "1",
"size": "1964",
"license": "bsd-3-clause",
"hash": 1723413492944510500,
"line_mean": 28.328358209,
"line_max": 110,
"alpha_frac": 0.6227087576,
"autogenerated": false,
"ratio": 2.858806404657933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3981515162257933,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aclapes'
import numpy as np
import cPickle
from os.path import join
from os.path import isfile, exists
from os import makedirs
from sklearn import svm
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import StratifiedKFold
import sys
import itertools
from joblib import delayed, Parallel
from random import shuffle
import time
from sklearn import preprocessing
import videodarwin
from tracklet_representation import normalize
def compute_ATEP_kernels(feats_path, videonames, traintest_parts, feat_types, kernels_output_path, \
kernel_type='linear', norm='l2', power_norm=True, \
nt=4, use_disk=False, verbose=False):
"""
Compute All Tree Node Branch Evolution Pairs.
:param feats_path:
:param videonames:
:param traintest_parts:
:param feat_types:
:param nt:
:return:
"""
kernels = []
for k, part in enumerate(traintest_parts):
train_inds, test_inds = np.where(part <= 0)[0], np.where(part > 0)[0]
kernels_part = dict()
total = len(videonames)
for feat_t in feat_types:
train_filepath = join(kernels_output_path, kernel_type + ('-p-' if power_norm else '-') + feat_t + '-train-' + str(k) + '.pkl')
test_filepath = join(kernels_output_path, kernel_type + ('-p-' if power_norm else '-') + feat_t + '-test-' + str(k) + '.pkl')
if isfile(train_filepath) and isfile(test_filepath):
with open(train_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_train, Kn_train = data['Kr_train'], data['Kn_train']
with open(test_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_test, Kn_test = data['Kr_test'], data['Kn_test']
else:
if not exists(kernels_output_path):
makedirs(kernels_output_path)
# load data and compute kernels
try:
with open(train_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_train, Kn_train = data['Kr_train'], data['Kn_train']
except IOError:
if use_disk:
quit()
# if kernel_type == 'linear':
# Kr_train, Kn_train = linear_kernel(kernel_repr_path, videonames, train_inds, nt=nt)
# elif kernel_type == 'chirbf':
# Kr_train, Kn_train = chisquare_kernel(kernel_repr_path, videonames, train_inds, nt=nt)
else:
D_train = dict()
for i, idx in enumerate(train_inds):
if verbose:
print('[compute_ATEP_kernels] Load train: %s (%d/%d).' % (videonames[idx], i, len(train_inds)))
try:
with open(join(feats_path, feat_t + '-' + str(k), videonames[idx] + '.pkl'), 'rb') as f:
d = cPickle.load(f)
except IOError:
sys.stderr.write('[Error] Feats file not found: %s.\n' % (join(feats_path, feat_t + '-' + str(k), videonames[idx] + '.pkl')))
sys.stderr.flush()
quit()
root, nodes = _construct_edge_pairs(d, norm=norm, power_norm=power_norm)
D_train.setdefault('root',[]).append(root)
D_train.setdefault('nodes',[]).append(nodes)
st_kernel = time.time()
if verbose:
print("[compute_ATEP_kernels] Compute kernel matrix %s .." % (feat_t))
if kernel_type == 'intersection':
Kr_train, Kn_train = intersection_kernel(D_train, n_channels=1, nt=nt, verbose=verbose)
elif kernel_type == 'chirbf':
Kr_train, Kn_train = chisquare_kernel(D_train, n_channels=1, nt=nt, verbose=verbose)
else:
Kr_train, Kn_train = linear_kernel(D_train, n_channels=1, nt=nt, verbose=verbose)
if verbose:
print("[compute_ATEP_kernels] %s took %2.2f secs." % (feat_t, time.time()-st_kernel))
with open(train_filepath, 'wb') as f:
cPickle.dump(dict(Kr_train=Kr_train, Kn_train=Kn_train), f)
try:
with open(test_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_test, Kn_test = data['Kr_test'], data['Kn_test']
except IOError:
if use_disk:
quit()
# if kernel_type == 'linear':
# Kr_test, Kn_test = linear_kernel(kernel_repr_path, videonames, test_inds, Y=train_inds, nt=nt)
# elif kernel_type == 'chirbf':
# Kr_test, Kn_test = chisquare_kernel(kernel_repr_path, videonames, test_inds, Y=train_inds, nt=nt)
else:
if not 'D_train' in locals():
D_train = dict()
for i,idx in enumerate(train_inds):
if verbose:
print('[ATEP kernel computation] Load train: %s (%d/%d).' % (videonames[idx], i, len(train_inds)))
try:
with open(join(feats_path, feat_t + '-' + str(k), videonames[idx] + '.pkl'), 'rb') as f:
d = cPickle.load(f)
except IOError:
sys.stderr.write('[Error] Feats file not found: %s.\n' % (join(feats_path, feat_t + '-' + str(k), videonames[idx] + '.pkl')))
sys.stderr.flush()
quit()
root, nodes = _construct_edge_pairs(d, norm=norm, power_norm=power_norm)
D_train.setdefault('root',[]).append(root)
D_train.setdefault('nodes',[]).append(nodes)
D_test = dict()
for i,idx in enumerate(test_inds):
if verbose:
print('[compute_ATEP_kernels] Load test: %s (%d/%d).' % (videonames[idx], i, len(test_inds)))
try:
with open(join(feats_path, feat_t + '-' + str(k), videonames[idx] + '.pkl'), 'rb') as f:
d = cPickle.load(f)
except IOError:
sys.stderr.write('[Error] Feats file not found: %s.\n' % (join(feats_path, feat_t + '-' + str(k), videonames[idx] + '.pkl')))
sys.stderr.flush()
quit()
root, nodes = _construct_edge_pairs(d, norm=norm, power_norm=power_norm)
D_test.setdefault('root',[]).append(root)
D_test.setdefault('nodes',[]).append(nodes)
st_kernel = time.time()
if verbose:
print("[compute_ATEP_kernels] Compute kernel matrix %s .." % (feat_t))
if kernel_type == 'intersection':
Kr_test, Kn_test = intersection_kernel(D_test, Y=D_train, n_channels=1, nt=nt, verbose=verbose)
elif kernel_type == 'chirbf':
Kr_test, Kn_test = chisquare_kernel(D_test, Y=D_train, n_channels=1, nt=nt, verbose=verbose)
else:
Kr_test, Kn_test = linear_kernel(D_test, Y=D_train, n_channels=1, nt=nt, verbose=verbose)
if verbose:
print("[compute_ATEP_kernels] %s took %2.2f secs." % (feat_t, time.time()-st_kernel))
with open(test_filepath, 'wb') as f:
cPickle.dump(dict(Kr_test=Kr_test, Kn_test=Kn_test), f)
# Use also the parent
kernels_part.setdefault('train',{}).setdefault(feat_t,{})['root'] = (Kr_train[0],)
kernels_part['train'][feat_t]['nodes'] = (Kn_train[0],)
kernels_part.setdefault('test',{}).setdefault(feat_t,{})['root'] = (Kr_test[0],)
kernels_part['test'][feat_t]['nodes'] = (Kn_test[0],)
kernels.append(kernels_part)
return kernels
def compute_ATNBEP_kernels(feats_path, videonames, traintest_parts, feat_types, kernels_output_path, \
nt=-1, norm='l2', power_norm=True, use_disk=False, verbose=False):
"""
Compute All Tree Node Branch Evolution Pairs.
:param feats_path:
:param videonames:
:param traintest_parts:
:param feat_types:
:param nt:
:return:
"""
kernels = []
for k, part in enumerate(traintest_parts):
train_inds, test_inds = np.where(part <= 0)[0], np.where(part > 0)[0]
kernels_part = dict()
total = len(videonames)
for feat_t in feat_types:
train_filepath = join(kernels_output_path, 'linear' + ('-p-' if power_norm else '-') + feat_t + '-train-' + str(k) + '.pkl')
test_filepath = join(kernels_output_path, 'linear' + ('-p-' if power_norm else '-') + feat_t + '-test-' + str(k) + '.pkl')
if isfile(train_filepath) and isfile(test_filepath):
with open(train_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_train, Kn_train = data['Kr_train'], data['Kn_train']
with open(test_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_test, Kn_test = data['Kr_test'], data['Kn_test']
else:
kernel_repr_path = join(kernels_output_path, feat_t + '-' + str(k))
if not exists(kernel_repr_path):
makedirs(kernel_repr_path)
Parallel(n_jobs=nt, backend='threading')(delayed(construct_branch_evolutions)(join(feats_path, feat_t + '-' + str(k), videonames[i] + '.pkl'),
join(kernel_repr_path, videonames[i] + '.pkl'))
for i in xrange(total))
try:
with open(train_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_train, Kn_train = data['Kr_train'], data['Kn_train']
except IOError:
if use_disk:
Kr_train, Kn_train = linear_kernel(kernel_repr_path, videonames, train_inds, n_channels=2, nt=nt)
else:
D_train = dict()
for i,idx in enumerate(train_inds):
if verbose:
print('[compute_ATNBEP_kernels] Load train: %s (%d/%d).' % (videonames[idx], i, len(train_inds)))
try:
with open(join(kernel_repr_path, videonames[idx] + '.pkl'), 'rb') as f:
D_idx = cPickle.load(f)
except IOError:
sys.stderr.write(join(kernel_repr_path, videonames[idx] + '.pkl') + '\n')
sys.stderr.flush()
quit()
D_train.setdefault('root',[]).append(D_idx['root'])
D_train.setdefault('nodes',[]).append(D_idx['nodes'])
st_kernel = time.time()
if verbose:
print("[compute_ATNBEP_kernels] Compute kernel matrix %s .." % (feat_t))
Kr_train, Kn_train = linear_kernel(D_train, n_channels=2, nt=nt)
if verbose:
print("[compute_ATNBEP_kernels] %s took %2.2f secs." % (feat_t, time.time()-st_kernel))
with open(train_filepath, 'wb') as f:
cPickle.dump(dict(Kr_train=Kr_train, Kn_train=Kn_train), f)
try:
with open(test_filepath, 'rb') as f:
data = cPickle.load(f)
Kr_test, Kn_test = data['Kr_test'], data['Kn_test']
except IOError:
if use_disk:
Kr_test, Kn_test = linear_kernel(kernel_repr_path, videonames, test_inds, Y=train_inds, n_channels=2, nt=nt)
else:
if not 'D_train' in locals():
D_train = dict()
for i,idx in enumerate(train_inds):
if verbose:
print('[compute_ATNBEP_kernels] Load train: %s (%d/%d).' % (videonames[idx], i, len(train_inds)))
try:
with open(join(kernel_repr_path, videonames[idx] + '.pkl'), 'rb') as f:
D_idx = cPickle.load(f)
except IOError:
sys.stderr.write(join(kernel_repr_path, videonames[idx] + '.pkl') + '\n')
sys.stderr.flush()
quit()
D_train.setdefault('root',[]).append(D_idx['root'])
D_train.setdefault('nodes',[]).append(D_idx['nodes'])
D_test = dict()
for i,idx in enumerate(test_inds):
if verbose:
print('[compute_ATNBEP_kernels] Load test: %s (%d/%d).' % (videonames[idx], i, len(test_inds)))
try:
with open(join(kernel_repr_path, videonames[idx] + '.pkl'), 'rb') as f:
D_idx = cPickle.load(f)
except IOError:
sys.stderr.write(join(kernel_repr_path, videonames[idx] + '.pkl') + '\n')
sys.stderr.flush()
quit()
D_test.setdefault('root',[]).append(D_idx['root'])
D_test.setdefault('nodes',[]).append(D_idx['nodes'])
st_kernel = time.time()
if verbose:
print("[compute_ATNBEP_kernels] Compute kernel matrix %s .." % (feat_t))
Kr_test, Kn_test = linear_kernel(D_test, Y=D_train, n_channels=2, nt=nt)
if verbose:
print("[compute_ATNBEP_kernels] %s took %2.2f secs." % (feat_t, time.time()-st_kernel))
with open(test_filepath, 'wb') as f:
cPickle.dump(dict(Kr_test=Kr_test, Kn_test=Kn_test), f)
# kernels_part.setdefault('train',{}).setdefault(feat_t,{})['root'] = (Kr_train[0],)
# kernels_part['train'][feat_t]['nodes'] = (Kn_train[0],)
# kernels_part.setdefault('test',{}).setdefault(feat_t,{})['root'] = (Kr_test[0],)
# kernels_part['test'][feat_t]['nodes'] = (Kn_test[0],)
kernels_part.setdefault('train',{}).setdefault(feat_t,{})['root'] = (Kr_train[0],Kr_train[1])
kernels_part['train'][feat_t]['nodes'] = (Kn_train[0],Kn_train[1])
kernels_part.setdefault('test',{}).setdefault(feat_t,{})['root'] = (Kr_test[0],Kr_test[0])
kernels_part['test'][feat_t]['nodes'] = (Kn_test[0],Kn_test[1])
kernels.append(kernels_part)
return kernels
# ==============================================================================
# Helper functions
# ==============================================================================
def construct_edge_pairs(feat_repr_filepath, output_filepath, norm='l2', power_norm=True, ):
if not exists(output_filepath):
try:
print feat_repr_filepath
with open(feat_repr_filepath, 'rb') as f:
data = cPickle.load(f)
root, nodes = _construct_edge_pairs(data, norm=norm, power_norm=power_norm)
with open(output_filepath, 'wb') as f:
cPickle.dump(dict(root=root, nodes=nodes), f)
except IOError:
sys.stderr.write('# ERROR: missing training instance'
' {}\n'.format(feat_repr_filepath))
sys.stderr.flush()
quit()
def _construct_edge_pairs(data, norm='l2', power_norm=True, dtype=np.float32):
"""
A tree is a list of edges, with each edge as the concatenation of the repr. of parent and child nodes.
:param data:
:return root, edges:
"""
r = data['tree'][1].astype(dtype=dtype)
if power_norm:
r = np.sign(r) * np.sqrt(np.abs(r))
r = preprocessing.normalize(r[np.newaxis,:], norm=norm)
root = [np.squeeze(r), ]
edges = []
for id in data['tree'].keys():
if id > 1:
x_left = data['tree'][id].astype('float32')
x_right = data['tree'][int(id/2.)].astype('float32')
e = np.concatenate([x_left,x_right])
if power_norm:
e = np.sign(e) * np.sqrt(np.abs(e))
e = preprocessing.normalize(e[np.newaxis,:], norm=norm)
# if power_norm:
# x_left = np.sign(x_left) * np.sqrt(np.abs(x_left))
# x_right = np.sign(x_right) * np.sqrt(np.abs(x_right))
#
# e = (preprocessing.normalize(x_left[np.newaxis,:], norm=norm), preprocessing.normalize(x_right[np.newaxis,:], norm=norm) )
e = np.hstack(e)
edges.append([np.squeeze(e),])
return root, edges
def construct_clusters(feat_repr_filepath, output_filepath, norm='l2', power_norm=True, ):
if not exists(output_filepath):
try:
print feat_repr_filepath
with open(feat_repr_filepath, 'rb') as f:
data = cPickle.load(f)
root, clusters = _construct_clusters(data, norm=norm, power_norm=power_norm)
with open(output_filepath, 'wb') as f:
cPickle.dump(dict(root=root, clusters=clusters), f)
except IOError:
sys.stderr.write('# ERROR: missing training instance'
' {}\n'.format(feat_repr_filepath))
sys.stderr.flush()
quit()
def _construct_clusters(data, norm='l2', power_norm=True, dtype=np.float32):
"""
:param data:
:return root, clusters:
"""
r = data['tree'][1].astype(dtype=dtype)
if power_norm:
r = np.sign(r) * np.sqrt(np.abs(r))
r = preprocessing.normalize(r, norm=norm)
root = [np.squeeze(r), ]
clusters = []
for id in data['tree'].keys():
if id > 1:
c = data['tree'][id].astype('float32')
if power_norm:
c = np.sign(c) * np.sqrt(np.abs(c))
c = preprocessing.normalize(c,norm=norm)
clusters.append([np.squeeze(c),])
return root, clusters
def construct_branch_evolutions(input_filepath, output_filepath):
if not exists(output_filepath):
try:
with open(input_filepath, 'rb') as f:
data = cPickle.load(f)
root, nodes = _construct_branch_evolutions(data)
with open(output_filepath, 'wb') as f:
cPickle.dump(dict(root=root, nodes=nodes), f)
except IOError:
sys.stderr.write('# ERROR: missing training instance'
' {}\n'.format(input_filepath))
sys.stderr.flush()
quit()
return
def _construct_branch_evolutions(data, dtype=np.float32):
root = [np.array([0],dtype=dtype), np.array([0],dtype=dtype)]
branches = []
for (id_i, x) in data['tree'].iteritems():
if id_i > 1:
# construct the path matrix
X = []
id_j = id_i
while id_j > 0:
X.append(data['tree'][id_j])
id_j /= 2
w_fw, w_rv = videodarwin._darwin(np.array(X))
branches.append( [normalize(w_fw), normalize(w_rv)] )
return root, branches
def intersection_kernel(input_path, videonames, X, Y=None, n_channels=1, nt=1, verbose=False):
points = []
if Y is None:
# generate combinations
points += [(i,i) for i in xrange(len(X))] # diagonal
points += [p for p in itertools.combinations(np.arange(len(X)),2)] # upper-triangle combinations
is_symmetric = True
Y = X
else:
# generate product
points += [ p for p in itertools.product(*[np.arange(len(X)),np.arange(len(Y))]) ]
is_symmetric = False
if verbose:
print('Computing fast %dx%d intersection kernel ...\n' % (len(X),len(Y)))
step = np.int(np.floor(len(points)/nt)+1)
shuffle(points) # so all threads have similar workload
ret = Parallel(n_jobs=nt, backend='threading')(delayed(_intersection_kernel)(input_path, videonames, X, Y, points[i*step:((i+1)*step if (i+1)*step < len(points) else len(points))],
n_channels=n_channels, tid=i, verbose=True)
for i in xrange(nt))
# aggregate results of parallel computations
Kr, Ke = ret[0][0], ret[0][1]
for r in ret[1:]:
Kr += r[0]
Ke += r[1]
# if symmetric, replicate upper to lower triangle matrix
if is_symmetric:
Kr += np.triu(Kr,1).T
Ke += np.triu(Ke,1).T
return Kr, Ke
def intersection_kernel(X, Y=None, n_channels=1, nt=-1, verbose=False):
points = []
X['root'] = [[np.abs(root[i]) for i in xrange(n_channels)] for root in X['root']]
X['nodes'] = [[[np.abs(node[i]) for i in xrange(n_channels)] for node in tree] for tree in X['nodes']]
if Y is None:
# generate combinations
points += [(i,i) for i in xrange(len(X['root']))] # diagonal
points += [p for p in itertools.combinations(np.arange(len(X['root'])),2)] # upper-triangle combinations
is_symmetric = True
Y = X
else:
Y['root'] = [[np.abs(root[i]) for i in xrange(n_channels)] for root in Y['root']]
Y['nodes'] = [[[np.abs(node[i]) for i in xrange(n_channels)] for node in tree] for tree in Y['nodes']]
# generate product
points += [ p for p in itertools.product(*[np.arange(len(X['root'])),np.arange(len(Y['root']))]) ]
is_symmetric = False
if verbose:
print('[intersection_kernel] Computing fast %dx%d kernel ...\n' % (len(X['root']),len(Y['root'])))
shuffle(points) # so all threads have similar workload
step = np.int(np.floor(len(points)/nt)+1)
# ---
# ret = Parallel(n_jobs=nt, backend='threading')(delayed(_intersection_kernel_batch)(X, Y, points[i*step:((i+1)*step if (i+1)*step < len(points) else len(points))],
# n_channels=n_channels, job_id=i, verbose=True)
# for i in xrange(nt))
# # aggregate results of parallel computations
# Kr, Ke = ret[0][0], ret[0][1]
# for r in ret[1:]:
# Kr += r[0]
# Ke += r[1]
# ---
ret = Parallel(n_jobs=nt, backend='threading')(delayed(_intersection_kernel)(X['root'][i], Y['root'][j], X['nodes'][i], Y['nodes'][j],
n_channels=n_channels, job_id=job_id, verbose=verbose)
for job_id,(i,j) in enumerate(points))
Kr = np.zeros((n_channels,len(X['root']),len(Y['root'])), dtype=np.float64) # root kernel
Ke = Kr.copy()
# aggregate results of parallel computations
for job_id, res in ret:
i,j = points[job_id]
for c in xrange(n_channels):
Kr[c,i,j], Ke[c,i,j] = res[c,0], res[c,1]
# ---
# if symmetric, replicate upper to lower triangle matrix
if is_symmetric:
for i in xrange(n_channels):
Kr[i] += np.triu(Kr[i],1).T
Ke[i] += np.triu(Ke[i],1).T
return Kr, Ke
def chisquare_kernel(X, Y=None, n_channels=1, nt=-1, verbose=False):
points = []
X['root'] = [[np.abs(root[i]) for i in xrange(n_channels)] for root in X['root']]
X['nodes'] = [[[np.abs(node[i]) for i in xrange(n_channels)] for node in tree] for tree in X['nodes']]
if Y is None:
# generate combinations
points += [(i,i) for i in xrange(len(X['root']))] # diagonal
points += [p for p in itertools.combinations(np.arange(len(X['root'])),2)] # upper-triangle combinations
is_symmetric = True
Y = X
else:
Y['root'] = [[np.abs(root[i]) for i in xrange(n_channels)] for root in Y['root']]
Y['nodes'] = [[[np.abs(node[i]) for i in xrange(n_channels)] for node in tree] for tree in Y['nodes']]
# generate product
points += [ p for p in itertools.product(*[np.arange(len(X['root'])),np.arange(len(Y['root']))]) ]
is_symmetric = False
if verbose:
print('[chisquare_kernel] Computing fast %dx%d kernel ...\n' % (len(X['root']),len(Y['root'])))
shuffle(points) # so all threads have similar workload
step = np.int(np.floor(len(points)/nt)+1)
# ---
# ret = Parallel(n_jobs=nt, backend='threading')(delayed(_intersection_kernel_batch)(X, Y, points[i*step:((i+1)*step if (i+1)*step < len(points) else len(points))],
# n_channels=n_channels, job_id=i, verbose=True)
# for i in xrange(nt))
# # aggregate results of parallel computations
# Kr, Ke = ret[0][0], ret[0][1]
# for r in ret[1:]:
# Kr += r[0]
# Ke += r[1]
# ---
ret = Parallel(n_jobs=nt, backend='threading')(delayed(_chisquare_kernel)(X['root'][i], Y['root'][j], X['nodes'][i], Y['nodes'][j],
n_channels=n_channels, job_id=job_id, verbose=verbose)
for job_id,(i,j) in enumerate(points))
Kr = np.zeros((n_channels,len(X['root']),len(Y['root'])), dtype=np.float64) # root kernel
Ke = Kr.copy()
# aggregate results of parallel computations
for job_id, res in ret:
i,j = points[job_id]
for c in xrange(n_channels):
Kr[c,i,j], Ke[c,i,j] = res[c,0], res[c,1]
# ---
# if symmetric, replicate upper to lower triangle matrix
if is_symmetric:
for i in xrange(n_channels):
Kr[i] += np.triu(Kr[i],1).T
Ke[i] += np.triu(Ke[i],1).T
return Kr, Ke
def linear_kernel(X, Y=None, n_channels=1, nt=-1, verbose=False):
points = []
if Y is None:
# generate combinations
points += [(i,i) for i in xrange(len(X['root']))] # diagonal
points += [p for p in itertools.combinations(np.arange(len(X['root'])),2)] # upper-triangle combinations
is_symmetric = True
Y = X
else:
# generate product
points += [ p for p in itertools.product(*[np.arange(len(X['root'])),np.arange(len(Y['root']))]) ]
is_symmetric = False
if verbose:
print('[linear_kernel] Computing fast %dx%d kernel ...\n' % (len(X['root']),len(Y['root'])))
shuffle(points) # so all threads have similar workload
step = np.int(np.floor(len(points)/nt)+1)
# ---
# ret = Parallel(n_jobs=nt, backend='threading')(delayed(_intersection_kernel_batch)(X, Y, points[i*step:((i+1)*step if (i+1)*step < len(points) else len(points))],
# n_channels=n_channels, job_id=i, verbose=True)
# for i in xrange(nt))
# # aggregate results of parallel computations
# Kr, Ke = ret[0][0], ret[0][1]
# for r in ret[1:]:
# Kr += r[0]
# Ke += r[1]
# ---
ret = Parallel(n_jobs=nt, backend='threading')(delayed(_linear_kernel)(X['root'][i], Y['root'][j], X['nodes'][i], Y['nodes'][j],
n_channels=n_channels, job_id=job_id, verbose=verbose)
for job_id,(i,j) in enumerate(points))
Kr = np.zeros((n_channels,len(X['root']),len(Y['root'])), dtype=np.float64) # root kernel
Ke = Kr.copy()
# aggregate results of parallel computations
for job_id, res in ret:
i,j = points[job_id]
for c in xrange(n_channels):
Kr[c,i,j], Ke[c,i,j] = res[c,0], res[c,1]
# ---
# if symmetric, replicate upper to lower triangle matrix
if is_symmetric:
for i in xrange(n_channels):
Kr[i] += np.triu(Kr[i],1).T
Ke[i] += np.triu(Ke[i],1).T
return Kr, Ke
def _intersection_kernel(input_path, videonames, X, Y, points, tid=None, verbose=False):
"""
Compute the ATEP kernel.
:param X:
:param Y:
:param points: pairs of distances to compute among (i,j)-indexed rows of X and Y respectively.
:param tid: thread ID for verbosing purposes
:param verbose:
:return:
"""
Kr = np.zeros((len(X),len(Y)), dtype=np.float64) # root kernel
Kn = Kr.copy()
sorted_points = sorted(points) # sort set of points using the i-th index
prev_i = -1
for pid,(i,j) in enumerate(sorted_points):
if verbose:
print('[_intersection_kernel] Thread %d, progress = %.1f%%]' % (tid,100.*(pid+1)/len(points)))
# i-th tree already loaded, do not reload
if prev_i < i:
with open(join(input_path, videonames[i] + '.pkl'), 'rb') as f:
Di = cPickle.load(f)
Di['root'], Di['nodes'] = np.abs(Di['root']), np.abs(Di['nodes'])
prev_i = i
# always reload j-th tree
with open(join(input_path, videonames[j] + '.pkl'), 'rb') as f:
Dj = cPickle.load(f)
Dj['root'], Dj['nodes'] = np.abs(Dj['root']), np.abs(Dj['nodes'])
Kr[i,j] = np.minimum(Di['root'], Dj['root']).sum()
# pair-wise intersection of edges' histograms
sum_nodes = 0.
for node_i in xrange(len(Di['nodes'])):
for node_j in xrange(len(Dj['nodes'])):
sum_nodes += np.minimum(Di['nodes'][node_i], Dj['nodes'][node_j]).sum()
Kn[i,j] = sum_nodes / (len(Di['nodes']) * len(Dj['nodes']))
return Kr, Kn
def _intersection_kernel_batch(X, Y, points, n_channels=1, job_id=None, verbose=False):
"""
Compute the ATEP kernel.
:param X:
:param Y:
:param points: pairs of distances to compute among (i,j)-indexed rows of X and Y respectively.
:param tid: thread ID for verbosing purposes
:param verbose:
:return:
"""
Kr = np.zeros((n_channels,len(X['root']),len(Y['root'])), dtype=np.float64) # root kernel
Kn = Kr.copy()
# x = X['root'][0][0] # an arbitrary feature vector
# p = 1. # normalization factor
# if np.abs(1. - np.abs(x).sum()) <= 1e-6:
# p = 1./len(x)
# elif np.abs(1. - np.sqrt(np.dot(x,x))) <= 1e-6:
# p = 1./np.sqrt(len(x))
for pid,(i,j) in enumerate(points):
if verbose:
print('[_intersection_kernel_batch] Thread %d, progress = %.1f%%]' % (job_id,100.*(pid+1)/len(points)))
for k in xrange(n_channels):
Kr[k,i,j] = np.minimum(X['root'][i][k], Y['root'][j][k]).sum() # * p
# pair-wise intersection of edges' histograms
sum_nodes = np.zeros((n_channels,), dtype=np.float64)
for node_i in xrange(len(X['nodes'][i])):
for node_j in xrange(len(Y['nodes'][j])):
for k in xrange(n_channels):
sum_nodes[k] += np.minimum(X['nodes'][i][node_i][k], Y['nodes'][j][node_j][k]).sum() # * p
for k in xrange(n_channels):
Kn[k,i,j] = sum_nodes[k] / (len(X['nodes'][i]) * len(Y['nodes'][j]))
return Kr, Kn
def _intersection_kernel(Xr, Yr, Xn, Yn, n_channels=1, job_id=None, verbose=False):
K = np.zeros((n_channels,2), dtype=np.float64)
if verbose and (job_id % 10 == 0):
print('[_intersection_kernel] Job id %d, progress = ?]' % (job_id))
for k in xrange(n_channels):
K[k,0] = np.minimum(Xr[k], Yr[k]).sum() # * p
# pair-wise intersection of edges' histograms
for node_i in xrange(len(Xn)):
for node_j in xrange(len(Yn)):
for k in xrange(n_channels):
K[k,1] += np.minimum(Xn[node_i][k], Yn[node_j][k]).sum() # * p
for k in xrange(n_channels):
K[k,1] /= (len(Xn) * len(Yn))
return job_id, K
def _chisquare_kernel(Xr, Yr, Xn, Yn, gamma=1.0, n_channels=1, job_id=None, verbose=False):
'''
Data is assumed to be non-negative and L-normalized
:param Xr:
:param Yr:
:param Xn:
:param Yn:
:param gamma:
:param n_channels:
:param job_id:
:param verbose:
:return:
'''
K = np.zeros((n_channels,2), dtype=np.float64)
if verbose and (job_id % 10 == 0):
print('[_chisquare_kernel] Job id %d, progress = ?]' % (job_id))
for k in xrange(n_channels):
div = Xr[k] + Yr[k]
div[div < 1e-7] = 1.0
K[k,0] = np.sum(np.power(Xr[k] - Yr[k],2) / div)
# pair-wise intersection of edges' histograms
for node_i in xrange(len(Xn)):
for node_j in xrange(len(Yn)):
for k in xrange(n_channels):
div = Xn[node_i][k] + Yn[node_j][k]
div[div < 1e-7] = 1.0
K[k,1] += np.sum( np.power(Xn[node_i][k] - Yn[node_j][k],2) / div )
for k in xrange(n_channels):
K[k,1] /= (len(Xn) * len(Yn))
return job_id, K
def _linear_kernel(Xr, Yr, Xn, Yn, n_channels=1, job_id=None, verbose=False):
'''
:param Xr:
:param Yr:
:param Xn:
:param Yn:
:param gamma:
:param n_channels:
:param job_id:
:param verbose:
:return:
'''
K = np.zeros((n_channels,2), dtype=np.float64)
if verbose and (job_id % 10 == 0):
print('[_linear_kernel] Job id %d, progress = ?]' % (job_id))
for k in xrange(n_channels):
K[k,0] = np.dot(Xr[k],Yr[k])
# pair-wise intersection of edges' histograms
for node_i in xrange(len(Xn)):
for node_j in xrange(len(Yn)):
for k in xrange(n_channels):
K[k,1] += np.dot(Xn[node_i][k], Yn[node_j][k])
for k in xrange(n_channels):
K[k,1] /= (len(Xn) * len(Yn))
return job_id, K
| {
"repo_name": "aclapes/darwintree",
"path": "kernels.py",
"copies": "1",
"size": "35342",
"license": "bsd-3-clause",
"hash": 3865142625312993000,
"line_mean": 42.6320987654,
"line_max": 184,
"alpha_frac": 0.4926433139,
"autogenerated": false,
"ratio": 3.5967840423366577,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9561054760398429,
"avg_score": 0.0056745191676458185,
"num_lines": 810
} |
__author__ = 'aclapes'
import numpy as np
import cv2
import cv
import argparse
from os import listdir, rename
from os.path import splitext, join, isfile
import time
import random
import subprocess
from joblib import delayed, Parallel
from os.path import isfile, exists
from os import makedirs
import cPickle
PARAMETERS = dict(
face_cascade='haarcascade_frontalface_alt.xml',
eye_cascade='haarcascade_eye.xml',
bar_height=15, # (in pixels)
fadding=0.15, # video and audio in-out fadding time in a segment (in seconds)
seg_effective_duration=15, # segments' duration without fadding (in seconds)
greetings_duration=0.5, # hello and goodbye segment duration (seconds)
video_extension=['.mp4'],
# face_displacement_threshold=0.75,
#video_score_threshold=0.33, # 0-1 range
segment_score_threshold=0.75, # 0-1 range
cuts_per_video=6,
time_gran_secs=1,
)
def process(external_parameters, nt=1):
# create segments' output directory if it doesn't exist already
output_path = external_parameters['segments_dir_path']
if not exists(output_path):
makedirs(output_path)
else:
if not exists(join(output_path, 'clips/')):
makedirs(join(output_path, 'clips/'))
if not exists(join(output_path, 'clip_metadata/')):
makedirs(join(output_path, 'clip_metadata/'))
# get a list of files to process
dirlist = listdir(external_parameters['videos_dir_path'])
# process all the files!!
Parallel(n_jobs=nt, backend='threading')(delayed(process_file)(file, external_parameters)
for file in dirlist)
def process_file(file, external_parameters):
videos_path = external_parameters['videos_dir_path']
# discarded_path = external_parameters['discarded_dir_path']
output_path = external_parameters['segments_dir_path']
clip_duration = PARAMETERS['seg_effective_duration'] + 2*PARAMETERS['fadding']
# greet_duration = PARAMETERS['greetings_duration'] + PARAMETERS['fadding']
# process .mp4 videos
if splitext(file)[1] not in PARAMETERS['video_extension']:
return
input_videofile_path = join(videos_path, file)
file_parts = splitext(file)
output_metadata_path = join(output_path, 'clip_metadata/', file_parts[0] + '.pkl')
if isfile(output_metadata_path):
print('%s -> OK (already processed)' % file_parts[0])
return
print('Processing %s ..' % file_parts[0])
video_cap = cv2.VideoCapture(input_videofile_path)
fps = video_cap.get(cv.CV_CAP_PROP_FPS)
# proceed normally
st_total_time = time.time()
try:
st_sub_time = time.time()
clips, remaining_segs = get_random_clips(video_cap, clip_duration, \
n=PARAMETERS['cuts_per_video'], \
time_gran_secs=PARAMETERS['time_gran_secs'])
print('[Segment generation] Time took: %.2f' % (time.time() - st_sub_time))
except cv2.error:
print('%s -> ERROR' % input_videofile_path)
return
st_sub_time = time.time()
for i, clip in enumerate(clips):
# play_video(video_cap, start=seg[0], end=seg[1], frameskip=1, repeat=True, detect_faces=True)
output_cutfile_path = file_parts[0] + '.' + str(i).zfill(3) + file_parts[1]
cut_videofile(join(videos_path, input_videofile_path), \
join(output_path, 'clips/', output_cutfile_path), \
int(clip[0]/fps), clip_duration, \
fade_in=PARAMETERS['fadding'], fade_out=PARAMETERS['fadding'])
print('[Encoding clips] Time took: %.2f secs' % (time.time() - st_sub_time))
# Save beginning-end of segments into pickle
with open(output_metadata_path, 'w') as f:
cPickle.dump(
dict(
clips=clips, \
remaining_segs=remaining_segs, \
fps=fps, \
total_frames=video_cap.get(cv.CV_CAP_PROP_FRAME_COUNT), \
parameters_dict=PARAMETERS
), f)
print('%d clips generated -> DONE (Total time took: %.2f secs)' % (len(clips),time.time()-st_total_time))
def display_mosaic_and_ask_oracle(cap, counts, steps, nx=5, ny=5):
global small_img
_, frame_size, _ = get_capture_info(cap)
side_size = max(frame_size)
if nx*ny < len(counts):
sorted_sample = sorted(random.sample(range(len(counts)), nx*ny), key=lambda x:x)
else:
sorted_sample = range(len(counts))
M = np.zeros((360,640,3), dtype=np.uint8)
h = M.shape[0]/ny
w = M.shape[1]/nx
for i in xrange(ny):
for j in xrange(nx):
small_img = np.zeros((h,w,3), dtype=np.uint8) + 127
if i*nx+j < len(sorted_sample):
idx = sorted_sample[i*nx+j]
cap.set(cv.CV_CAP_PROP_POS_FRAMES, int(steps[idx]))
ret, img = cap.retrieve()
if ret:
small_img = cv2.resize(img, (w,h), interpolation = cv2.INTER_CUBIC)
thickness = 3
cv2.rectangle(small_img, \
(int(thickness/2),int(thickness/2)), \
(small_img.shape[1]-int(thickness/2)-1, small_img.shape[0]-int(thickness/2)-1), \
(0, 255 if counts[idx] == 1 else 0, 0 if counts[idx] == 1 else 255), \
thickness)
M[i*h:(i+1)*h,j*w:(j+1)*w] = small_img
ret = 0
cv2.namedWindow('mosaic')
cv2.imshow('mosaic', M)
while True:
if cv2.waitKey(1) & 0xFF == ord('r'):
ret = -1
break
elif cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyWindow('mosaic')
return ret
def moving_average_filter_in_1d_vec(x, kernel_size=5):
v = np.zeros(len(x), dtype=np.float32)
acc = np.sum(x[:kernel_size],dtype=np.float32)
for i in range(kernel_size/2, len(x) - kernel_size/2 - 1):
v[i] = acc / kernel_size
acc += x[i+kernel_size/2+1]
acc -= x[i-kernel_size/2]
v[i+1] = acc / kernel_size
return v
def get_random_clips(video_cap, duration, n=3, time_gran_secs=1.0, candidates=None):
"""
:param video_cap:
:param duration:
:param n: number of middle segment cuts per video
:param time_gran_secs: time granularity in seconds. if 1, face detection is performed every second; if 2, every two seconds, if 0.5, every half second
:return:
"""
fps = video_cap.get(cv.CV_CAP_PROP_FPS)
step = seconds_to_num_frames(duration, fps)
fskip = time_gran_secs*fps # that is performing face detection once per second of video
segment_kernel = int(step/fskip)
if candidates is None:
counts, cuts, faces = count_speakers_in_video_cuts(video_cap, frameskip=fskip)
# Filtering criteria
# -------
validness = np.ones((len(counts),), dtype=np.int32)
# (1) one and only speaker
validness[counts != 1] = 0
# -------
# not probable to find contiguious perfect segments, but good enough segments (as many as possible valid frames)
validness_avg = moving_average_filter_in_1d_vec(validness,kernel_size=segment_kernel)
candidates = np.where(validness_avg > PARAMETERS['segment_score_threshold'])[0]
segs = []
while len(segs) < n and len(candidates) >= segment_kernel:
# center_seg = random.sample(candidates, 1)[0]
idx = candidates[random.randint(0,len(candidates)-1)]
segs.append(((idx - int(segment_kernel/2)) * fskip, (idx + int(segment_kernel/2) + 1) * fskip))
mask = (candidates > idx + int(segment_kernel/2)) | (candidates < idx - int(segment_kernel/2))
candidates = candidates[np.where(mask)[0]] # update with the remaining candidates
return segs, candidates
def count_speakers_in_video_cuts(cap, start=-1, end=-1, frameskip=1):
_, frame_size, nframes = get_capture_info(cap)
height = frame_size[1]
if start < 0:
start = 0
if start >= end:
end = nframes
counts = []
cuts = []
faces = []
r = 1.0 / (height/240)
cap.set(cv.CV_CAP_PROP_POS_FRAMES, start)
while (True if end == 0 else cap.get(cv.CV_CAP_PROP_POS_FRAMES) < end):
fid = int(cap.get(cv.CV_CAP_PROP_POS_FRAMES))
ret, img = cap.retrieve()
if not ret:
break
faces_f, _ = detect_faces_and_contained_eyes(img, r=r)
counts.append(len(faces_f))
cuts.append(fid)
faces.append(faces_f)
cap.set(cv.CV_CAP_PROP_POS_FRAMES, fid + frameskip) # advance forward
faces_f, _ = detect_faces_and_contained_eyes(img, r=r)
counts.append(len(faces_f))
cuts.append(nframes)
faces.append(faces_f)
return np.array(counts), np.array(cuts), faces
def detect_faces_and_contained_eyes(img, r=1.0):
"""
Avoid true positives by not detecting faces that doesn't contain at least one eye.
Parameters
----------
img : colored input image where the faces must be detected
r : resizing factor of img (to speed up the detections)
Return
------
faces, eyes : the faces and eyes detected (and filtered)
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
height, width = gray.shape[:2]
gray = cv2.resize(gray,(int(r*width), int(r*height)), interpolation = cv2.INTER_CUBIC)
min_side = height if height < width else width
# Get an initial set of face hypthesis
faces_h = cv2.CascadeClassifier(PARAMETERS['face_cascade']).detectMultiScale(gray, scaleFactor=1.05, minNeighbors=4, \
minSize=(int(min_side/10),int(min_side/10)), maxSize=(min_side,min_side), \
flags = cv.CV_HAAR_SCALE_IMAGE) # naive detection
# Confirm hypothesis detecting eyes within those faces
faces = [] # final set of faces
eyes = []
for face in faces_h:
x,y,w,h = face
face_eyes = cv2.CascadeClassifier(PARAMETERS['eye_cascade']).detectMultiScale(gray[y:y+h,x:x+w])
if len(face_eyes) > 0: # at least one eye detected
faces.append(face)
eyes.append(face_eyes)
return faces, eyes
def play_video(cap, start=-1, end=-1, frameskip=1, repeat=True, detect_faces=True):
if not cap.isOpened() or start > end:
return
fps, frame_size, nframes = get_capture_info(cap)
height = frame_size[1]
if start < 0:
start = 0
if start >= end:
end = nframes
first_play = True
while first_play or repeat:
if detect_faces:
detection_counts = np.zeros((nframes,), dtype=np.int32) # -1 means we are not checking it
detection_counts[:start-1] = -1
detection_counts[end+1:] = -1
fid = start
cap.set(cv.CV_CAP_PROP_POS_FRAMES, start)
while cap.grab() and cap.get(cv.CV_CAP_PROP_POS_FRAMES) < end:
ret, img = cap.retrieve()
if detect_faces:
r=1.0/(height/240)
faces, eyes = detect_faces_and_contained_eyes(img, r=r)
detection_counts[fid:fid+frameskip] = len(faces)
draw_faces_and_eyes(faces, eyes, img, r=r)
draw_detection_counts(fid, detection_counts, img)
cv2.imshow('frame', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
repeat = False
break
cap.set(cv.CV_CAP_PROP_POS_FRAMES, cap.get(cv.CV_CAP_PROP_POS_FRAMES) + frameskip - 1)
fid += frameskip
first_play = False
cv2.destroyAllWindows()
# def write_video(cap, filename, start=-1, end=-1, frameskip=1):
# if not cap.isOpened() or start > end:
# return
#
# fourcc = int(cap.get(cv.CV_CAP_PROP_FOURCC))
# fps = int(cap.get(cv.CV_CAP_PROP_FPS))
# frame_size = (int(cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)))
# writer = cv2.VideoWriter(filename, fourcc, fps, frame_size, isColor=True)
# if not writer.isOpened():
# return
#
# fps, frame_size, nframes = get_capture_info(cap)
# height = frame_size[1]
#
# if start < 0:
# start = 0
# if start >= end:
# end = nframes
#
# fid = start
# cap.set(cv.CV_CAP_PROP_POS_FRAMES, start)
# while cap.grab() and cap.get(cv.CV_CAP_PROP_POS_FRAMES) < end:
# ret, img = cap.retrieve()
#
# # write img to disk
# writer.write(img)
#
# cap.set(cv.CV_CAP_PROP_POS_FRAMES, cap.get(cv.CV_CAP_PROP_POS_FRAMES) + frameskip - 1)
# fid += frameskip
#
# writer.release()
# return
def cut_videofile(input_videofile_path, output_cutfile_path, start, duration, \
fade_in=-1, fade_out=-1, verbose=False):
''' Use external program (DenseTrack) to extract the features '''
# argsArray = ['ffmpeg',
# '-ss', str(start), # important to have -ss before -i
# '-i', input_videofile_path, \
# '-t', str(duration), \
# '-vcodec copy -acodec copy -async 1 -copyinkf -y', # copyinkf to keep non-key frames before the starting (avoid initial freeze)
# '' if verbose else '-loglevel quiet', \
# output_cutfile_path] # DenseTrackStab is not accepting parameters, hardcoded the L in there
# argsArray = ['ffmpeg',
# '-i', input_videofile_path, \
# '-ss', str(start), # note -ss in here goes after -i
# '-t', str(duration), \
# '-vcodec copy -acodec copy -async 1',
# '' if verbose else '-loglevel quiet', \
# output_cutfile_path] # DenseTrackStab is not accepting parameters, hardcoded the L in there
vfade_arg = ''
afade_arg = ''
if fade_in > 0:
afade_arg += 'afade=t=in:st=0:d=' + str(fade_in)
vfade_arg += 'fade=t=in:st=0:d=' + str(fade_in)
if fade_out > 0:
if fade_in > 0:
vfade_arg += ','
afade_arg += ','
vfade_arg += 'fade=t=out:st=' + str(duration-fade_out) + ':d=' + str(fade_out)
afade_arg += 'afade=t=out:st=' + str(duration-fade_out) + ':d=' + str(fade_out)
argsArray = ['ffmpeg',
'-ss', str(start), \
'-i', input_videofile_path, \
'-t', str(duration), \
"-vf '"+ vfade_arg + "'" if vfade_arg != '' else '', \
"-af '"+ afade_arg + "'" if afade_arg != '' else '', \
'-y', \
'' if verbose else '-loglevel quiet', \
output_cutfile_path] # DenseTrackStab is not accepting parameters, hardcoded the L in there
#ffmpeg -i movie.mp4 -ss 00:00:03 -t 00:00:08 -async 1 cut.mp4
argsArray = ['ffmpeg',
'-i', input_videofile_path, \
'-ss', str(start), \
'-t', str(duration), \
'-async 1',
'-y', \
'' if verbose else '-loglevel quiet', \
output_cutfile_path] # DenseTrackStab is not accepting parameters, hardcoded the L in there
cmd = ' '.join(argsArray)
p = subprocess.Popen(cmd, shell=True)
p.communicate()
def draw_faces_and_eyes(faces, eyes, img, r=1.0):
"""
Draw the faces and the corresponding eyes.
Parameters
----------
faces: faces returned by detect_faces_and_contained_eyes(...)
eyes: eyes returned by detect_faces_and_contained_eyes(...)
img: colored image where to draw the faces and eyes
"""
for i, (x,y,w,h) in enumerate(faces):
cv2.rectangle(img,(int(x/r),int(y/r)),(int((x+w)/r),int((y+h)/r)),(255,0,0),2)
roi_color = img[int(y/r):int((y+h)/r), int(x/r):int((x+w)/r)]
for (ex,ey,ew,eh) in eyes[i]:
cv2.rectangle(roi_color,(int(ex/r),int(ey/r)),(int((ex+ew)/r),int((ey+eh)/r)),(0,255,0),2)
def draw_detection_counts(ptr, counts, img):
height, width = img.shape[:2]
bar = np.zeros((PARAMETERS['bar_height'], len(counts), 3), dtype=np.uint8)
# draw colors in function of detections
bar[:,counts == 0] = (255,255,255) # white if no faces
bar[:,counts == 1] = (0,192,0) # green if one and only one face
bar[:,counts > 1] = (0,0,192) # red if more than one
resized_bar = cv2.resize(bar, (width,PARAMETERS['bar_height']))
img[:PARAMETERS['bar_height'],:] = resized_bar
def get_capture_info(cap):
fps = int(cap.get(cv.CV_CAP_PROP_FPS))
frame_size = (int(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)), int(cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)))
nframes = int(cap.get(cv.CV_CAP_PROP_FRAME_COUNT))
return fps, frame_size, nframes
def seconds_to_num_frames(time_seconds, fps=24):
return fps * time_seconds
if __name__ == "__main__":
# parse the input arguments
parser = argparse.ArgumentParser(description='Process the videos to see whether they contain speaking-while-facing-a-camera scenes.')
parser.add_argument('--videos-dir-path', help='the directory where videos are downloaded.')
# parser.add_argument('--discarded-dir-path', help='the directory where videos are discarded.')
parser.add_argument('--segments-dir-path', help='the directory where to output segments.')
parser.add_argument('--num-threads', help='the directory where to output segments.')
# parser.add_argument('-O', '--oracle-mode', action='store_true')
args = parser.parse_args()
external_parameters = dict()
external_parameters['videos_dir_path'] ='videos/'
if args.videos_dir_path:
external_parameters['videos_dir_path'] = args.videos_dir_path
# external_parameters['discarded_dir_path'] ='discarded/'
# if args.discarded_dir_path:
# external_parameters['discarded_dir_path'] = args.discarded_dir_path
external_parameters['segments_dir_path'] ='segments/'
if args.segments_dir_path:
external_parameters['segments_dir_path'] = args.segments_dir_path
nt = 1
if args.num_threads:
nt = int(args.num_threads)
# external_parameters['oracle_mode'] = args.oracle_mode
# PROCEED downloading videos from the queries
process(external_parameters=external_parameters, nt=nt)
| {
"repo_name": "aclapes/clip-extractor",
"path": "videocapture_test.py",
"copies": "1",
"size": "18376",
"license": "mit",
"hash": -1684113174936290000,
"line_mean": 35.9738430584,
"line_max": 154,
"alpha_frac": 0.5866891598,
"autogenerated": false,
"ratio": 3.3050359712230217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43917251310230215,
"avg_score": null,
"num_lines": null
} |
__author__ = 'aclapes'
import numpy as np
import subprocess
from sklearn.neighbors import KDTree
import time
import cPickle
from os.path import isfile, exists, join
from os import makedirs
import fileinput
from spectral_division import build_geom_neighbor_graph
import pyflann
from joblib import delayed, Parallel
import sys
# some hard-coded constants
FEATURE_EXTRACTOR_RELPATH = 'release/'
INTERNAL_PARAMETERS = dict(
L = 15,
feats_dict = dict(
obj = 10,
trj = 2, # 2-by-L actually
hog = 96,
hof = 108,
mbh = 192
),
indices = dict(
meanx = 1,
meany = 2
)
)
def extract(fullvideonames, videonames, feat_types, tracklets_path, verbose=False):
_extract(fullvideonames, videonames, np.arange(len(videonames)), feat_types, tracklets_path, verbose=verbose)
def extract_multiprocess(fullvideonames, videonames, st, num_videos, feat_types, tracklets_path, verbose=False):
inds = np.linspace(st, st+num_videos-1, num_videos)
_extract(fullvideonames, videonames, inds, feat_types, tracklets_path, verbose=verbose)
def extract_multithread(fullvideonames, videonames, feat_types, tracklets_path, nt=4, verbose=False):
# inds = np.random.permutation(len(videonames))
inds = np.linspace(0,len(videonames)-1,len(videonames)).astype('int')
# step = np.int(np.floor(len(inds)/nt)+1)
#inds[i*step:((i+1)*step if (i+1)*step < len(inds) else len(inds))],
Parallel(n_jobs=nt, backend='threading')(delayed(_extract)(fullvideonames, videonames, [i], \
feat_types, tracklets_path, verbose=verbose)
for i in inds)
def _extract(fullvideonames, videonames, indices, feat_types, tracklets_path, verbose=False):
"""
Extract features using Improved Dense Trajectories by Wang et. al.
:param fullvideonames:
:param videonames:
:param indices:
:param feat_types:
:param tracklets_path:
:return:
"""
feats_beginend = get_features_beginend(INTERNAL_PARAMETERS['feats_dict'], INTERNAL_PARAMETERS['L'])
# prepare output directories
try:
makedirs(tracklets_path)
except OSError:
pass
try:
makedirs(join(tracklets_path, 'tmp'))
except OSError:
pass
for feat_t in feats_beginend.keys():
try:
makedirs(join(tracklets_path, feat_t))
except OSError:
pass
# process the videos
total = len(fullvideonames)
for i in indices:
all_done = np.all([isfile(join(tracklets_path, feat_t, videonames[i] + '.pkl'))
for feat_t in feats_beginend.keys()])
if all_done:
if verbose:
print('[_extract] %s -> OK' % fullvideonames[i])
continue
start_time = time.time()
# extract the features into temporary file
tracklets_filepath = join(tracklets_path, 'tmp/', videonames[i] + '.dat')
if not isfile(tracklets_filepath):
extract_wang_features(fullvideonames[i], INTERNAL_PARAMETERS['L'], tracklets_filepath)
# read the temporary file to numpy array
finput = fileinput.FileInput(tracklets_filepath)
data = []
for line in finput:
row = np.array(line.strip().split('\t'), dtype=np.float32)
data.append(row)
finput.close()
try:
data = np.vstack(data)
except ValueError:
# empty row
sys.stderr.write("[Error] Reading tracklets file: " + tracklets_filepath + '\n')
sys.stderr.flush()
continue
# filter low density tracklets
try:
inliers = filter_low_density(data)
except:
sys.stderr.write("[Error] Filtering low density: " + tracklets_filepath + '\n')
sys.stderr.flush()
continue
# store feature types separately
for feat_t in feats_beginend.keys():
with open(join(tracklets_path, feat_t, videonames[i] + '.pkl'),'wb') as f:
cPickle.dump(data[:, feats_beginend[feat_t][0]:feats_beginend[feat_t][1]], f) # TODO: : -> inliners
elapsed_time = time.time() - start_time
if verbose:
print('[_extract] %s -> DONE (in %.2f secs)' % (videonames[i], elapsed_time))
# ==============================================================================
# Helper functions
# ==============================================================================
def get_features_beginend(feats_dict, L):
# establish the features and their dimensions' start-end
feats_beginend = {'obj' : (0, \
feats_dict['obj']), \
'trj' : (feats_dict['obj'], \
feats_dict['obj']+(feats_dict['trj']*L)), \
'hog' : (feats_dict['obj']+(feats_dict['trj']*L), \
feats_dict['obj']+(feats_dict['trj']*L)+feats_dict['hog']), \
'hof' : (feats_dict['obj']+(feats_dict['trj']*L)+feats_dict['hog'], \
feats_dict['obj']+(feats_dict['trj']*L)+feats_dict['hog']+feats_dict['hof']), \
'mbh' : (feats_dict['obj']+(feats_dict['trj']*L)+feats_dict['hog']+feats_dict['hof'], \
feats_dict['obj']+(feats_dict['trj']*L)+feats_dict['hog']+feats_dict['hof']+feats_dict['mbh'])}
return feats_beginend
# Version using precomputed optical flow (stored in .flo files)
def extract_wang_features(videofile_path, traj_length, output_features_path):
''' Use external program (DenseTrack) to extract the features '''
argsArray = ['./DenseTrackStab', videofile_path, \
'-L', str(traj_length)] # DenseTrackStab is not accepting parameters, hardcoded the L in there
try:
f = open(output_features_path,'wb')
proc = subprocess.Popen(' '.join(argsArray), cwd=FEATURE_EXTRACTOR_RELPATH, shell=True, stdout=f)
proc.communicate()
f.close()
except IOError:
sys.stderr.write('[Error] Cannot open file for writing: %s\n' % videofile_path)
def filter_low_density(data, k=30, r=5):
"""
Filter out low density tracklets from the sequence.
:param data: the tracklets, a T x num_features matrix.
:return:
"""
# each tracklet's mean x and y position
P = data[:,[INTERNAL_PARAMETERS['indices']['meanx'],INTERNAL_PARAMETERS['indices']['meany']]] # (these are index 1 and 2 of data)
all_sparsities = np.zeros((P.shape[0],k), dtype=np.float32)
subset_indices = [] # optimization. see (*) below
for i in range(0, P.shape[0]):
new_subset_indices = np.where((data[:,0] >= data[i,0] - r) & (data[:,0] <= data[i,0] + r))[0]
if len(new_subset_indices) == 1:
all_sparsities[i,:] = np.nan
else:
# (*) a new KDTree is constructed only if the subset of data changes
if not np.array_equal(new_subset_indices, subset_indices):
subset_indices = new_subset_indices
tree = KDTree(P[subset_indices,:], leaf_size=1e3)
p = P[i,:].reshape(1,-1) # query instance
if k+1 <= len(subset_indices):
dists, inds = tree.query(p, k=k+1)
dists = dists[0,1:] # asked the neighbors of only 1 instance, returned in dists as 0-th element
else: #len(subset_indices) > 1:
dists, inds = tree.query(p, k=len(subset_indices))
dists = np.concatenate([dists[0,1:], [np.nan]*(k-len(dists[0,1:]))])
all_sparsities[i,:] = dists
local_sparsities = np.nanmean(all_sparsities, axis=1)
mean_sparsity = np.nanmean(all_sparsities)
stddev_sparsity = np.nanstd(all_sparsities)
inliers = np.where(local_sparsities <= (mean_sparsity + stddev_sparsity))[0]
return inliers
| {
"repo_name": "aclapes/darwintree",
"path": "tracklet_extraction.py",
"copies": "1",
"size": "8094",
"license": "bsd-3-clause",
"hash": -1983553602909210000,
"line_mean": 38.8719211823,
"line_max": 134,
"alpha_frac": 0.5678280208,
"autogenerated": false,
"ratio": 3.6005338078291813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9618582469766738,
"avg_score": 0.009955871772488753,
"num_lines": 203
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
class DataReader(object):
'''
@:parameter a string that is the file path to the input file
@:parameter a string that is the file path to the output file
It only supports the following file extensions:
for input: .in .txt .csv
for output: .out .txt .csv
'''
def __init__(self, input_file_path, output_file_path):
self.__input_file_path = input_file_path
self.__output_file_path = output_file_path
if input_file_path.endswith('.in'):
self.__input_file_extension = '.in'
elif input_file_path.endswith('.txt'):
self.__input_file_extension = '.txt'
elif input_file_path.endswith('.csv'):
self.__input_file_extension = '.csv'
else:
raise Exception("Invalid input file format!")
if output_file_path.endswith('.out'):
self.__output_file_extension = '.out'
elif output_file_path.endswith('.txt'):
self.__output_file_extension = '.txt'
elif output_file_path.endswith('.csv'):
self.__output_file_extension = '.csv'
else:
raise Exception("Invalid output file format!")
def __format__(self, *args, **kwargs):
return super(DataReader, self).__format__(*args, **kwargs)
def __repr__(self):
return super(DataReader, self).__repr__()
def __sizeof__(self):
return super(DataReader, self).__sizeof__()
def __setattr__(self, name, value):
return super(DataReader, self).__setattr__(name, value)
@classmethod
def __subclasshook__(cls, subclass):
return super(DataReader, cls).__subclasshook__(subclass)
def __delattr__(self, name):
return super(DataReader, self).__delattr__(name)
@staticmethod
def __new__(cls, *more):
return super(DataReader, cls).__new__(*more)
def __hash__(self):
return super(DataReader, self).__hash__()
def __getattribute__(self, name):
return super(DataReader, self).__getattribute__(name)
def __str__(self):
return super(DataReader, self).__str__()
def __reduce__(self, *args, **kwargs):
return super(DataReader, self).__reduce__(*args, **kwargs)
def __reduce_ex__(self, *args, **kwargs):
return super(DataReader, self).__reduce_ex__(*args, **kwargs)
def read(self, case):
if case is "sorting":
input_arrays_list = []
output_arrays_list = []
if self.__input_file_extension is '.in' or self.__input_file_extension is '.txt':
with open(self.__input_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_elements = line.split(" ")
array_elements = map(lambda it: int(it), array_elements)
input_arrays_list.append(array_elements)
if self.__output_file_extension is '.out' or self.__output_file_extension is '.txt':
with open(self.__output_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_elements = line.split(" ")
array_elements = map(lambda it: int(it), array_elements)
output_arrays_list.append(array_elements)
return input_arrays_list, output_arrays_list
if case is "freq":
input_arrays_list = []
output_arrays_frequencies = []
if self.__input_file_extension is '.in' or self.__input_file_extension is '.txt':
with open(self.__input_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_elements = line.split(" ")
array_elements = map(lambda it: int(it), array_elements)
input_arrays_list.append(array_elements)
if self.__output_file_extension is '.out' or self.__output_file_extension is '.txt':
with open(self.__output_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_freq = line.split(" ")
array_freq = map(lambda it: (it.split(":")[0], it.split(":")[1]), array_freq)
output_arrays_frequencies.append(array_freq)
return input_arrays_list, output_arrays_frequencies
if case is "matmul":
input_arrays_list = []
output_array_results = []
if self.__input_file_extension is '.in' or self.__input_file_extension is '.txt':
with open(self.__input_file_path, 'wb') as reader:
n_cases = int(reader.readline())
for t in range(n_cases):
matrix_a_size_row, matrix_a_size_col, matrix_b_size_row, matrix_b_size_col = reader.readline().split(" ")
matrix_a_size = (matrix_a_size_row, matrix_a_size_col)
matrix_b_size = (matrix_a_size_row, matrix_b_size_col)
matrix_a = [[map(lambda it: int(it), reader.readline().split(" ")) for i in range(matrix_a_size[0])]]
reader.readline()
matrix_b = [[map(lambda it: int(it), reader.readline().split(" ")) for i in range(matrix_b_size[0])]]
input_arrays_list.append((matrix_a_size, matrix_b_size, matrix_a, matrix_b))
if self.__output_file_extension is '.out' or self.__output_file_extension is '.txt':
with open(self.__output_file_path, 'wb') as reader:
n_cases = int(reader.readline())
for t in range(n_cases):
row_size, column_size = reader.readline().split(" ")
resulted_matrix = [[map(lambda it: int(it), reader.readline().split(" ")) for i in range(row_size)]]
output_array_results.append((row_size, column_size, resulted_matrix))
return input_arrays_list, output_array_results
raise Exception("Invalid problem definition!") | {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/dev/server/datasets_processing/reader.py",
"copies": "1",
"size": "6317",
"license": "mit",
"hash": -3472676337261391000,
"line_mean": 41.12,
"line_max": 129,
"alpha_frac": 0.5394965965,
"autogenerated": false,
"ratio": 4.147734734077479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5187231330577479,
"avg_score": null,
"num_lines": null
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
import unittest
from dataset_generator._mat2 import Mat2
class Mat2Test(unittest.TestCase):
def setUp(self):
self.__min = 1
self.__max = 10
self.__mat_a_size = (3, 3)
self.__mat_b_size = (3, 3)
self.__mat_2 = Mat2(self.__mat_a_size, self.__mat_b_size)
def test_class_creation(self):
self.assertIsNotNone(self.__mat_2)
def test_matrix_creation(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
self.assertIsNotNone(mat_a)
self.assertIsNotNone(mat_b)
def test_matrix_size(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
self.assertEqual(self.__mat_2.get_matrix_a_size(), self.__mat_a_size)
self.assertEqual(self.__mat_2.get_matrix_b_size(), self.__mat_a_size)
def test_matrix_elements(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
for i in range(len(mat_a)):
for j in range(len(mat_a[0])):
self.assertTrue(self.__max >= mat_a[i][j] >= self.__min)
for i in range(len(mat_b)):
for j in range(len(mat_b[0])):
self.assertTrue(self.__max >= mat_b[i][j] >= self.__min)
if __name__ == '__main__':
unittest.main() | {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/testing/dev_test/server_test/datasets_generator_test/_mat2_test.py",
"copies": "1",
"size": "1347",
"license": "mit",
"hash": -1957273684330746000,
"line_mean": 29.6363636364,
"line_max": 77,
"alpha_frac": 0.5590200445,
"autogenerated": false,
"ratio": 3.054421768707483,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9107460951963464,
"avg_score": 0.0011961722488038277,
"num_lines": 44
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
import unittest
from dataset_generator._mat2 import Mat2
class MatmulTest(unittest.TestCase):
def setUp(self):
self.__min = 1
self.__max = 10
self.__mat_a_size = (3, 3)
self.__mat_b_size = (3, 3)
self.__mat_2 = Mat2(self.__mat_a_size, self.__mat_b_size)
def test_class_creation(self):
self.assertIsNotNone(self.__mat_2)
def test_matrix_creation(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
self.assertIsNotNone(mat_a)
self.assertIsNotNone(mat_b)
def test_matrix_size(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
self.assertEqual(self.__mat_2.get_matrix_a_size(), self.__mat_a_size)
self.assertEqual(self.__mat_2.get_matrix_b_size(), self.__mat_a_size)
def test_matrix_elements(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
for i in range(len(mat_a)):
for j in range(len(mat_a[0])):
self.assertTrue(self.__max >= mat_a[i][j] >= self.__min)
for i in range(len(mat_b)):
for j in range(len(mat_b[0])):
self.assertTrue(self.__max >= mat_b[i][j] >= self.__min)
def test_mul_validation(self):
mat_a, mat_b = self.__mat_2.get_matrices(self.__min, self.__max)
self.assertEqual(len(mat_a[0]), len(mat_b))
if __name__ == '__main__':
unittest.main() | {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/testing/dev_test/server_test/datasets_generator_test/_matmul_test.py",
"copies": "1",
"size": "1511",
"license": "mit",
"hash": 261586976643963700,
"line_mean": 29.8571428571,
"line_max": 77,
"alpha_frac": 0.5625413633,
"autogenerated": false,
"ratio": 3.0402414486921527,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4102782811992153,
"avg_score": null,
"num_lines": null
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
class DataReader(object):
def __init__(self, input_file_path, output_file_path):
"""
@:parameter a string that is the file path to the input file
@:parameter a string that is the file path to the output file
It only supports the following file extensions:
for input: .in .txt .csv
for output: .out .txt .csv
"""
self.__input_file_path = input_file_path
self.__output_file_path = output_file_path
if input_file_path.endswith('.in'):
self.__input_file_extension = '.in'
elif input_file_path.endswith('.txt'):
self.__input_file_extension = '.txt'
elif input_file_path.endswith('.csv'):
self.__input_file_extension = '.csv'
else:
raise Exception("Invalid input file format!")
if output_file_path.endswith('.out'):
self.__output_file_extension = '.out'
elif output_file_path.endswith('.txt'):
self.__output_file_extension = '.txt'
elif output_file_path.endswith('.csv'):
self.__output_file_extension = '.csv'
else:
raise Exception("Invalid output file format!")
def __format__(self, *args, **kwargs):
return super(DataReader, self).__format__(*args, **kwargs)
def __repr__(self):
return super(DataReader, self).__repr__()
def __sizeof__(self):
return super(DataReader, self).__sizeof__()
def __setattr__(self, name, value):
return super(DataReader, self).__setattr__(name, value)
@classmethod
def __subclasshook__(cls, subclass):
return super(DataReader, cls).__subclasshook__(subclass)
def __delattr__(self, name):
return super(DataReader, self).__delattr__(name)
@staticmethod
def __new__(cls, *more):
return super(DataReader, cls).__new__(*more)
def __hash__(self):
return super(DataReader, self).__hash__()
def __getattribute__(self, name):
return super(DataReader, self).__getattribute__(name)
def __str__(self):
return super(DataReader, self).__str__()
def __reduce__(self, *args, **kwargs):
return super(DataReader, self).__reduce__(*args, **kwargs)
def __reduce_ex__(self, *args, **kwargs):
return super(DataReader, self).__reduce_ex__(*args, **kwargs)
def read(self, case):
if case is "sorting":
input_arrays_list = []
output_arrays_list = []
if self.__input_file_extension is '.in' or self.__input_file_extension is '.txt':
with open(self.__input_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_elements = line.split(" ")
array_elements = map(lambda it: int(it), array_elements)
input_arrays_list.append(array_elements)
if self.__output_file_extension is '.out' or self.__output_file_extension is '.txt':
with open(self.__output_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_elements = line.split(" ")
array_elements = map(lambda it: int(it), array_elements)
output_arrays_list.append(array_elements)
return input_arrays_list, output_arrays_list
if case is "freq":
input_arrays_list = []
output_arrays_frequencies = []
if self.__input_file_extension is '.in' or self.__input_file_extension is '.txt':
with open(self.__input_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_elements = line.split(" ")
array_elements = map(lambda it: int(it), array_elements)
input_arrays_list.append(array_elements)
if self.__output_file_extension is '.out' or self.__output_file_extension is '.txt':
with open(self.__output_file_path, 'wb') as reader:
content = reader.readall()
for line in content:
array_freq = line.split(" ")
array_freq = map(lambda it: (it.split(":")[0], it.split(":")[1]), array_freq)
output_arrays_frequencies.append(array_freq)
return input_arrays_list, output_arrays_frequencies
if case is "matmul":
input_arrays_list = []
output_array_results = []
if self.__input_file_extension is '.in' or self.__input_file_extension is '.txt':
with open(self.__input_file_path, 'wb') as reader:
n_cases = int(reader.readline())
for t in range(n_cases):
matrix_a_size_row, matrix_a_size_col, matrix_b_size_row, matrix_b_size_col = reader.readline().split(" ")
matrix_a_size = (matrix_a_size_row, matrix_a_size_col)
matrix_b_size = (matrix_a_size_row, matrix_b_size_col)
matrix_a = [[map(lambda it: int(it), reader.readline().split(" ")) for i in range(matrix_a_size[0])]]
reader.readline()
matrix_b = [[map(lambda it: int(it), reader.readline().split(" ")) for i in range(matrix_b_size[0])]]
input_arrays_list.append((matrix_a_size, matrix_b_size, matrix_a, matrix_b))
if self.__output_file_extension is '.out' or self.__output_file_extension is '.txt':
with open(self.__output_file_path, 'wb') as reader:
n_cases = int(reader.readline())
for t in range(n_cases):
row_size, column_size = reader.readline().split(" ")
resulted_matrix = [[map(lambda it: int(it), reader.readline().split(" ")) for i in range(row_size)]]
output_array_results.append((row_size, column_size, resulted_matrix))
return input_arrays_list, output_array_results
raise Exception("Invalid problem definition!")
| {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/release/datasets_processing/reader.py",
"copies": "1",
"size": "6353",
"license": "mit",
"hash": -88091092449932850,
"line_mean": 39.4649681529,
"line_max": 129,
"alpha_frac": 0.5364394774,
"autogenerated": false,
"ratio": 4.165901639344263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5202341116744262,
"avg_score": null,
"num_lines": null
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
class DataWriter(object):
def __init__(self, file_path):
self.__file_path = file_path
self.__file_extension = '.txt' if self.__file_path.endswith('.out') else '.csv'
def write_file(self, case, output, n_cases):
if case is "sort":
with open(self.__file_path, "wb") as writer:
for array in output:
writer.write(' '.join(map(lambda u: str(u), array)))
if case is "freq":
with open(self.__file_path, "wb") as writer:
for dictionary in output:
line = ""
for item in dictionary.items():
line += item[0] + ":" + item[1]
line += " "
writer.write(line)
if case is "matmul":
with open(self.__file_path, "wb") as writer:
writer.write(str(n_cases))
for i in range(n_cases):
item = output[i]
row_size, col_size, result_matrix = item[0], item[1], item[2]
writer.write(str(row_size) + " " + str(col_size))
for i in range(row_size):
line = ""
for j in range(col_size):
if j < col_size - 1:
line += str(result_matrix[i][j]) + " "
else:
line += str(result_matrix[i][j])
writer.write(line) | {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/release/datasets_processing/writer.py",
"copies": "1",
"size": "1586",
"license": "mit",
"hash": -8214035324896304000,
"line_mean": 35.9069767442,
"line_max": 87,
"alpha_frac": 0.419924338,
"autogenerated": false,
"ratio": 4.140992167101827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011015879563809228,
"num_lines": 43
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/22/2016}
'''
class Validator(object):
@staticmethod
def validate_custom_sorting_dataset(arrays):
"""
Check if the arrays are valid for sorting
:param arrays: (array of arrays) each array contains an array
:return: Throw an exception if array isn't valid
"""
for i in range(len(arrays)):
current_array = arrays[i]
for i in range(len(current_array)):
if not str(current_array[i]).isdigit():
return False
return True
@staticmethod
def validate_custom_freq_dataset(arrays):
"""
Check if the arrays are valid for frequencing
:param arrays: (array of arrays) each array contains an array
:return: Throw an exception if array isn't valid
"""
for i in range(len(arrays)):
current_array = arrays[i]
for i in range(len(current_array)):
if not str(current_array[i]).isdigit():
raise Exception("Array elements can't be characters at index: " + str(i))
return True
@staticmethod
def validate_custom_matmul_dataset(matrices_list):
"""
Check if the matrices are valid for multiplication
:param arrays: (array of tuples) each array contains a tuple that contains key: first matrix value: second matrix (i.e (matrix_a, matrix_b)
:return: Throw an exception if matrices aren't valid
"""
idx = 0
for item in matrices_list:
matrix_a = item[0]
matrix_b = item[1]
if len(matrix_a[0]) != len(matrix_b):
raise Exception("Matrices aren't compatible at index: " + str(idx))
idx += 1
return True
| {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/dev/server/datasets_processing/validator.py",
"copies": "1",
"size": "1814",
"license": "mit",
"hash": 8940457357034902000,
"line_mean": 29.7457627119,
"line_max": 147,
"alpha_frac": 0.5738699008,
"autogenerated": false,
"ratio": 4.319047619047619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0174457596022688,
"num_lines": 59
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/24/2016}
'''
import sys
from datasets_processing.reader import DataReader
from datasets_processing.writer import DataWriter
from practical._sort import Sort
from practical._freq import Freq
from practical._matmul import Matmul
from submit import Submit
def main():
params = {'opt': "", "input_file": "", "output_file": ""}
def set_parameters():
if len(sys.argv) > 1:
print "Number of Arguments: ", len(sys.argv), "arguments"
params['opt'] = sys.argv[1]
params["input_file"] = sys.argv[2]
params["output_file"] = sys.argv[3]
else:
params['opt'] = "sort" # sort || freq || matmul
params["input_file"] = "./datasets/sorting/sort.in" # sorting || matmul || freq
params["output_file"] = "./datasets/sorting/sort.out" # sorting || matmul || freq
set_parameters()
data_reader = DataReader(params["input_file"], params["output_file"])
data = []
def get_data():
input_data, output = data_reader.read(params['opt'])
data.append((input_data, output))
get_data()
data_writer = DataWriter(params["output_file"])
if params['opt'] is "sort":
test_cases = data[0][0]
output = data[0][1]
for array in test_cases:
_sort = Sort(array)
sorted_array = _sort.bubble_sort()
# print(sorted_array)
output.append(sorted_array)
data_writer.write_file(params['opt'], output, len(output))
if params['opt'] is "freq":
test_cases = data[0][0]
output = data[0][1]
for array in test_cases:
_freq = Freq(array)
freq_array = _freq.get_frequency_array()
# print(freq_array)
output.append(freq_array)
data_writer.write_file(params['opt'], output, len(output))
if params['opt'] is "matmul":
test_cases = data[0][0]
output = data[0][1]
n_cases = len(test_cases)
for case in test_cases:
matrix_a_size, matrix_b_size, matrix_a, matrix_b = case[0], case[1], case[2], case[3]
_matmul = Matmul(matrix_a, matrix_b)
result_matrix = _matmul.matrix_multiplication()
# print(result_matrix)
result_matrix_size = (len(result_matrix), len(result_matrix[0]))
output.append((result_matrix_size[0], result_matrix[1], result_matrix))
data_writer.write_file(params['opt'], output, n_cases)
if params['opt'] is "submit":
sort_input_path = "./datasets/sorting/sort.in"
sort_output_path = "./datasets/sorting/sort.out"
freq_input_path = "./datasets/freq/freq.in"
freq_output_path = "./datasets/freq/freq.out"
matmul_input_path = "./datasets/matmul/matmul.in"
matmul_output_path = "./datasets/matmul/matmul.out"
project_dir = "."
submit = Submit(project_dir)
submit.send(sort_input_path, sort_output_path, freq_input_path, freq_output_path, matmul_input_path, matmul_output_path)
return 0
if __name__ == '__main__':
main() | {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/release/session_1.py",
"copies": "1",
"size": "3148",
"license": "mit",
"hash": -8701875299523500000,
"line_mean": 32.5,
"line_max": 128,
"alpha_frac": 0.581321474,
"autogenerated": false,
"ratio": 3.5251959686450167,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4606517442645017,
"avg_score": null,
"num_lines": null
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/24/2016}
'''
import time
class Matmul(object):
def __init__(self, matrix_a, matrix_b):
self.__matrix_a = matrix_a
self.__matrix_b = matrix_b
self.__matrix_a_size = (len(matrix_a), len(matrix_a[0]))
self.__matrix_b_size = (len(matrix_b), len(matrix_b[0]))
self.__result_matrix = None
self.__result_matrix_size = None
self.__running_time = round(time.time() * 1000)
def get_matrix_a(self):
return self.__matrix_a
def get_matrix_b(self):
return self.__matrix_b
def get_matrix_a_size(self):
return self.__matrix_a_size
def get_matrix_b_size(self):
return self.__matrix_b_size
def get_result_matrix(self):
if self.__result_matrix is None:
raise Exception("The result matrix is empty, check your function implementation!")
return self.__result_matrix
def get_running_time(self):
return self.__running_time
def get_result_matrix_size(self):
if self.__result_matrix_size is None:
raise Exception("The result matrix is empty, check your function implementation!")
return self.__result_matrix_size
def matrix_multiplication(self):
"""
Implement your matrix multiplication algorithm
:return: (array of array [matrix]) that contains the resultant of multiplying matrix_a with matrix_b
"""
#TODO
self.__result_matrix_size = (len(self.__result_matrix), len(self.__result_matrix[0]))
self.__running_time = round(time.time() * 1000) - self.__running_time
return self.__result_matrix
| {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/release/practical/_matmul.py",
"copies": "1",
"size": "1686",
"license": "mit",
"hash": 4311011149814967300,
"line_mean": 27.1,
"line_max": 108,
"alpha_frac": 0.606168446,
"autogenerated": false,
"ratio": 3.7887640449438202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9798453461088736,
"avg_score": 0.019295805971016836,
"num_lines": 60
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/24/2016}
'''
import time
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
def get_array_length(self):
return self.__array_length
def get_frequency_array(self):
if self.__frequency_dict is None:
raise Exception("The frequency array is empty, check your function implementation!")
return self.__frequency_dict
def get_running_time(self):
return self.__running_time
def get_frequency(self):
"""
Implement your elements frequency algorithm
:return: (dictionary) that contains key: element in array, value: frequency. Note that your dictionary should be sorted by key!
"""
#TODO
self.__running_time = round(time.time() * 1000) - self.__running_time
return self.__frequency_dict
| {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/release/practical/_freq.py",
"copies": "1",
"size": "1076",
"license": "mit",
"hash": 3912211037073518600,
"line_mean": 24.023255814,
"line_max": 135,
"alpha_frac": 0.6059479554,
"autogenerated": false,
"ratio": 4.1226053639846745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228553319384674,
"avg_score": null,
"num_lines": null
} |
___author__ = 'acmASCIS'
'''
by ahani at {9/26/2016}
'''
import sys
from judge import Judge
def main():
if len(sys.argv) <= 1:
raise Exception("Check your arguments please!")
project_dir = sys.argv[1]
user_dataset = []
def get_user_dataset():
sort_input_path = sys.argv[2]
sort_output_path = sys.argv[3]
freq_input_path = sys.argv[4]
freq_output_path = sys.argv[5]
matmul_input_path = sys.argv[6]
matmul_output_path = sys.argv[7]
user_dataset.append(sort_input_path)
user_dataset.append(sort_output_path)
user_dataset.append(freq_input_path)
user_dataset.append(freq_output_path)
user_dataset.append(matmul_input_path)
user_dataset.append(matmul_output_path)
get_user_dataset()
server_dataset = []
def get_server_dataset():
server_sort_input_file = "./datasets/sorting/sort.in"
server_sort_output_file = "./datasets/sorting/sort.out"
server_freq_input_file = "./datasets/freq/freq.in"
server_freq_output_file = "./datasets/freq/freq.out"
server_matmul_input_file = "./datasets/matmul/matmul.in"
server_matmul_output_file = "./datasets/matmul/matmul.out"
server_dataset.append(server_sort_input_file)
server_dataset.append(server_sort_output_file)
server_dataset.append(server_freq_input_file)
server_dataset.append(server_freq_output_file)
server_dataset.append(server_matmul_input_file)
server_dataset.append(server_matmul_output_file)
get_server_dataset()
if len(server_dataset) != len(user_dataset):
raise Exception("User and Server datasets aren't equal size!")
for i in range(len(server_dataset)):
judge = Judge(server_dataset[i], user_dataset[i])
if not judge.check():
raise Exception("Files ", server_dataset[i], "and", user_dataset[i], "aren't matched!")
# serialization
if __name__ == '__main__':
main() | {
"repo_name": "AhmedHani/acmASCIS-ML-Hack-2017",
"path": "Session_1/dev/server/server.py",
"copies": "1",
"size": "2013",
"license": "mit",
"hash": 8169862351076439000,
"line_mean": 31.4838709677,
"line_max": 99,
"alpha_frac": 0.6318926975,
"autogenerated": false,
"ratio": 3.452830188679245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4584722886179245,
"avg_score": null,
"num_lines": null
} |
# Utility file - adds some colormaps to matplotlib's built-in collection. Currently, they include:
# - Australian
# - dc_au_ClearObservations
# - dc_au_WaterObservations
# - dc_au_Confidence
# - dc_au_WaterSummary
import matplotlib
import matplotlib.colors
def htmlColorMap(html,step=False,name='customColorMap'):
"""Return a dictionary suitable for passing to matplotlib.colors.LinearSegmentedColormap
html: a sequence of numbers and HTML style (hex string) colors. The numbers will be normalized.
step: indicates whether the map should be smooth (False, default) or have hard steps (True).
name: a name for the custom gradient. Defaults to 'customColorMap'.
"""
stops = html[::2]
cols = html[1::2]
stop_min = float(min(stops))
stop_max = float(max(stops))
cdict = {'red':[], 'green':[], 'blue':[]}
stops = [(float(s)-stop_min)/(stop_max-stop_min) for s in stops] # Normalize
cols = [matplotlib.colors.hex2color(c) for c in cols] # Convert html to (r,g,b)
# Smooth gradient
if (step==False):
for i, item in enumerate(stops):
r, g, b = cols[i]
cdict['red'].append([item,r,r])
cdict['green'].append([item,g,g])
cdict['blue'].append([item,b,b])
else:
# Solid bands (color is FROM the %, so color @ 100% is ignored)
cols = [(0,0,0)]+cols
for i, item in enumerate(stops):
r1, g1, b1 = cols[i]
r2, g2, b2 = cols[i+1]
cdict['red'].append([item,r1,r2])
cdict['green'].append([item,g1,g2])
cdict['blue'].append([item,b1,b2])
#return cdict;
ret = matplotlib.colors.LinearSegmentedColormap(name,cdict);
matplotlib.pyplot.register_cmap(cmap=ret)
ret.levels = html[::2] # Add a levels property which retains the un-normalized threshold values
return ret;
dc_au_ClearObservations_discrete = htmlColorMap([
0,'#FFFFFF',
10,'#B21800',
25,'#FF4400',
50,'#FF8000',
100,'#FFA200',
150,'#FFC000',
200,'#FFD500',
250,'#FFF300',
300,'#E6FF00',
350,'#BCFF00',
400,'#89FF00',
500,'#68C400',
600,'#44C400',
700,'#03B500',
800,'#039500',
1000,'#026900',
],True,'dc_au_ClearObservations_discrete')
dc_au_ClearObservations = htmlColorMap([
0,'#FFFFFF',
10,'#B21800',
25,'#FF4400',
50,'#FF8000',
100,'#FFA200',
150,'#FFC000',
200,'#FFD500',
250,'#FFF300',
300,'#E6FF00',
350,'#BCFF00',
400,'#89FF00',
500,'#68C400',
600,'#44C400',
700,'#03B500',
800,'#039500',
1000,'#026900',
],False,'dc_au_ClearObservations')
dc_au_WaterObservations_discrete = htmlColorMap([
0,'#FFFFFF',
2,'#890000',
5,'#990000',
10,'#E38400',
25,'#E3DF00',
50,'#A6E300',
100,'#00E32D',
150,'#00E3C8',
200,'#0097E3',
250,'#005FE3',
300,'#000FE3',
350,'#000EA9',
400,'#5700E3',
],True,'dc_au_WaterObservations_discrete')
dc_au_WaterObservations = htmlColorMap([
0,'#FFFFFF',
2,'#890000',
5,'#990000',
10,'#E38400',
25,'#E3DF00',
50,'#A6E300',
100,'#00E32D',
150,'#00E3C8',
200,'#0097E3',
250,'#005FE3',
300,'#000FE3',
350,'#000EA9',
400,'#5700E3',
],False,'dc_au_WaterObservations')
dc_au_Confidence_discrete = htmlColorMap([
0,'#FFFFFF',
1,'#000000',
2,'#990000',
5,'#CF2200',
10,'#E38400',
25,'#E3DF00',
50,'#A6E300',
75,'#62E300',
100,'#00E32D',
],True,'dc_au_Confidence_discrete')
dc_au_Confidence = htmlColorMap([
0,'#FFFFFF',
1,'#000000',
2,'#990000',
5,'#CF2200',
10,'#E38400',
25,'#E3DF00',
50,'#A6E300',
75,'#62E300',
100,'#00E32D',
],False,'dc_au_Confidence')
dc_au_WaterSummary_discrete = htmlColorMap([
0.2,'#FFFFFF',
0.5,'#8E0101',
1,'#CF2200',
2,'#E38400',
5,'#E3DF00',
10,'#A6E300',
20,'#62E300',
30,'#00E32D',
40,'#00E384',
50,'#00E3C8',
60,'#00C5E3',
70,'#0097E3',
80,'#005FE3',
90,'#000FE3',
100,'#5700E3',
100,'#5700E3'
],True,'dc_au_WaterSummary_discrete')
dc_au_WaterSummary = htmlColorMap([
0.002,'#FFFFFF',
0.005,'#8E0101',
0.01,'#CF2200',
0.02,'#E38400',
0.05,'#E3DF00',
0.10,'#A6E300',
0.20,'#62E300',
0.30,'#00E32D',
0.40,'#00E384',
0.50,'#00E3C8',
0.60,'#00C5E3',
0.70,'#0097E3',
0.80,'#005FE3',
0.90,'#000FE3',
1.00,'#5700E3',
1.10,'#5700E3',
],False,'dc_au_WaterSummary')
| {
"repo_name": "ceos-seo/data_cube_notebooks",
"path": "notebooks/dc_au_colormaps.py",
"copies": "1",
"size": "4594",
"license": "apache-2.0",
"hash": 8813904192497508000,
"line_mean": 23.9673913043,
"line_max": 99,
"alpha_frac": 0.5709621245,
"autogenerated": false,
"ratio": 2.672484002326934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3743446126826934,
"avg_score": null,
"num_lines": null
} |
import sys
import time
import random
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from tkinter import *
from tkinter import ttk
import search
from utils import argmax
LARGE_FONT = ('Verdana', 12)
EXTRA_LARGE_FONT = ('Consolas', 36, 'bold')
canvas_width = 800
canvas_height = 600
black = '#000000'
white = '#ffffff'
p_blue = '#042533'
lp_blue = '#0c394c'
# genetic algorithm variables
# feel free to play around with these
target = 'Genetic Algorithm' # the phrase to be generated
max_population = 100 # number of samples in each population
mutation_rate = 0.1 # probability of mutation
f_thres = len(target) # fitness threshold
ngen = 1200 # max number of generations to run the genetic algorithm
generation = 0 # counter to keep track of generation number
u_case = [chr(x) for x in range(65, 91)] # list containing all uppercase characters
l_case = [chr(x) for x in range(97, 123)] # list containing all lowercase characters
punctuations1 = [chr(x) for x in range(33, 48)] # lists containing punctuation symbols
punctuations2 = [chr(x) for x in range(58, 65)]
punctuations3 = [chr(x) for x in range(91, 97)]
numerals = [chr(x) for x in range(48, 58)] # list containing numbers
# extend the gene pool with the required lists and append the space character
gene_pool = []
gene_pool.extend(u_case)
gene_pool.extend(l_case)
gene_pool.append(' ')
# callbacks to update global variables from the slider values
def update_max_population(slider_value):
global max_population
max_population = slider_value
def update_mutation_rate(slider_value):
global mutation_rate
mutation_rate = slider_value
def update_f_thres(slider_value):
global f_thres
f_thres = slider_value
def update_ngen(slider_value):
global ngen
ngen = slider_value
# fitness function
def fitness_fn(_list):
fitness = 0
# create string from list of characters
phrase = ''.join(_list)
# add 1 to fitness value for every matching character
for i in range(len(phrase)):
if target[i] == phrase[i]:
fitness += 1
return fitness
# function to bring a new frame on top
def raise_frame(frame, init=False, update_target=False, target_entry=None, f_thres_slider=None):
frame.tkraise()
global target
if update_target and target_entry is not None:
target = target_entry.get()
f_thres_slider.config(to=len(target))
if init:
population = search.init_population(max_population, gene_pool, len(target))
genetic_algorithm_stepwise(population)
# defining root and child frames
root = Tk()
f1 = Frame(root)
f2 = Frame(root)
# pack frames on top of one another
for frame in (f1, f2):
frame.grid(row=0, column=0, sticky='news')
# Home Screen (f1) widgets
target_entry = Entry(f1, font=('Consolas 46 bold'), exportselection=0, foreground=p_blue, justify=CENTER)
target_entry.insert(0, target)
target_entry.pack(expand=YES, side=TOP, fill=X, padx=50)
target_entry.focus_force()
max_population_slider = Scale(f1, from_=3, to=1000, orient=HORIZONTAL, label='Max population', command=lambda value: update_max_population(int(value)))
max_population_slider.set(max_population)
max_population_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
mutation_rate_slider = Scale(f1, from_=0, to=1, orient=HORIZONTAL, label='Mutation rate', resolution=0.0001, command=lambda value: update_mutation_rate(float(value)))
mutation_rate_slider.set(mutation_rate)
mutation_rate_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
f_thres_slider = Scale(f1, from_=0, to=len(target), orient=HORIZONTAL, label='Fitness threshold', command=lambda value: update_f_thres(int(value)))
f_thres_slider.set(f_thres)
f_thres_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
ngen_slider = Scale(f1, from_=1, to=5000, orient=HORIZONTAL, label='Max number of generations', command=lambda value: update_ngen(int(value)))
ngen_slider.set(ngen)
ngen_slider.pack(expand=YES, side=TOP, fill=X, padx=40)
button = ttk.Button(f1, text='RUN', command=lambda: raise_frame(f2, init=True, update_target=True, target_entry=target_entry, f_thres_slider=f_thres_slider)).pack(side=BOTTOM, pady=50)
# f2 widgets
canvas = Canvas(f2, width=canvas_width, height=canvas_height)
canvas.pack(expand=YES, fill=BOTH, padx=20, pady=15)
button = ttk.Button(f2, text='EXIT', command=lambda: raise_frame(f1)).pack(side=BOTTOM, pady=15)
# function to run the genetic algorithm and update text on the canvas
def genetic_algorithm_stepwise(population):
root.title('Genetic Algorithm')
for generation in range(ngen):
# generating new population after selecting, recombining and mutating the existing population
population = [search.mutate(search.recombine(*search.select(2, population, fitness_fn)), gene_pool, mutation_rate) for i in range(len(population))]
# genome with the highest fitness in the current generation
current_best = ''.join(argmax(population, key=fitness_fn))
# collecting first few examples from the current population
members = [''.join(x) for x in population][:48]
# clear the canvas
canvas.delete('all')
# displays current best on top of the screen
canvas.create_text(canvas_width / 2, 40, fill=p_blue, font='Consolas 46 bold', text=current_best)
# displaying a part of the population on the screen
for i in range(len(members) // 3):
canvas.create_text((canvas_width * .175), (canvas_height * .25 + (25 * i)), fill=lp_blue, font='Consolas 16', text=members[3 * i])
canvas.create_text((canvas_width * .500), (canvas_height * .25 + (25 * i)), fill=lp_blue, font='Consolas 16', text=members[3 * i + 1])
canvas.create_text((canvas_width * .825), (canvas_height * .25 + (25 * i)), fill=lp_blue, font='Consolas 16', text=members[3 * i + 2])
# displays current generation number
canvas.create_text((canvas_width * .5), (canvas_height * 0.95), fill=p_blue, font='Consolas 18 bold', text=f'Generation {generation}')
# displays blue bar that indicates current maximum fitness compared to maximum possible fitness
scaling_factor = fitness_fn(current_best) / len(target)
canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.9, 100, outline=p_blue)
canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.1 + scaling_factor * canvas_width * 0.8, 100, fill=lp_blue)
canvas.update()
# checks for completion
fittest_individual = search.fitness_threshold(fitness_fn, f_thres, population)
if fittest_individual:
break
raise_frame(f1)
root.mainloop() | {
"repo_name": "jo-tez/aima-python",
"path": "gui/genetic_algorithm_example.py",
"copies": "1",
"size": "6944",
"license": "mit",
"hash": 4739425559933267000,
"line_mean": 39.3779069767,
"line_max": 184,
"alpha_frac": 0.7363191244,
"autogenerated": false,
"ratio": 3.1780320366132724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44143511610132724,
"avg_score": null,
"num_lines": null
} |
from tkinter import *
from functools import partial
import time
import random
import numpy as np
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from search import astar_search, EightPuzzle
import utils
root = Tk()
state = [1, 2, 3, 4, 5, 6, 7, 8, 0]
puzzle = EightPuzzle(tuple(state))
solution = None
b = [None]*9
# TODO: refactor into OOP, remove global variables
def scramble():
""" Scrambles the puzzle starting from the goal state """
global state
global puzzle
possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']
scramble = []
for _ in range(60):
scramble.append(random.choice(possible_actions))
for move in scramble:
if move in puzzle.actions(state):
state = list(puzzle.result(state, move))
puzzle = EightPuzzle(tuple(state))
create_buttons()
def solve():
""" Solves the puzzle using astar_search """
return astar_search(puzzle).solution()
def solve_steps():
""" Solves the puzzle step by step """
global puzzle
global solution
global state
solution = solve()
print(solution)
for move in solution:
state = puzzle.result(state, move)
create_buttons()
root.update()
root.after(1, time.sleep(0.75))
def exchange(index):
""" Interchanges the position of the selected tile with the zero tile under certain conditions """
global state
global solution
global puzzle
zero_ix = list(state).index(0)
actions = puzzle.actions(state)
current_action = ''
i_diff = index//3 - zero_ix//3
j_diff = index%3 - zero_ix%3
if i_diff == 1:
current_action += 'DOWN'
elif i_diff == -1:
current_action += 'UP'
if j_diff == 1:
current_action += 'RIGHT'
elif j_diff == -1:
current_action += 'LEFT'
if abs(i_diff) + abs(j_diff) != 1:
current_action = ''
if current_action in actions:
b[zero_ix].grid_forget()
b[zero_ix] = Button(root, text=f'{state[index]}', width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, zero_ix))
b[zero_ix].grid(row=zero_ix//3, column=zero_ix%3, ipady=40)
b[index].grid_forget()
b[index] = Button(root, text=None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, index))
b[index].grid(row=index//3, column=index%3, ipady=40)
state[zero_ix], state[index] = state[index], state[zero_ix]
puzzle = EightPuzzle(tuple(state))
def create_buttons():
""" Creates dynamic buttons """
# TODO: Find a way to use grid_forget() with a for loop for initialization
b[0] = Button(root, text=f'{state[0]}' if state[0] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 0))
b[0].grid(row=0, column=0, ipady=40)
b[1] = Button(root, text=f'{state[1]}' if state[1] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 1))
b[1].grid(row=0, column=1, ipady=40)
b[2] = Button(root, text=f'{state[2]}' if state[2] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 2))
b[2].grid(row=0, column=2, ipady=40)
b[3] = Button(root, text=f'{state[3]}' if state[3] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 3))
b[3].grid(row=1, column=0, ipady=40)
b[4] = Button(root, text=f'{state[4]}' if state[4] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 4))
b[4].grid(row=1, column=1, ipady=40)
b[5] = Button(root, text=f'{state[5]}' if state[5] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 5))
b[5].grid(row=1, column=2, ipady=40)
b[6] = Button(root, text=f'{state[6]}' if state[6] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 6))
b[6].grid(row=2, column=0, ipady=40)
b[7] = Button(root, text=f'{state[7]}' if state[7] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 7))
b[7].grid(row=2, column=1, ipady=40)
b[8] = Button(root, text=f'{state[8]}' if state[8] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, 8))
b[8].grid(row=2, column=2, ipady=40)
def create_static_buttons():
""" Creates scramble and solve buttons """
scramble_btn = Button(root, text='Scramble', font=('Helvetica', 30, 'bold'), width=8, command=partial(init))
scramble_btn.grid(row=3, column=0, ipady=10)
solve_btn = Button(root, text='Solve', font=('Helvetica', 30, 'bold'), width=8, command=partial(solve_steps))
solve_btn.grid(row=3, column=2, ipady=10)
def init():
""" Calls necessary functions """
global state
global solution
state = [1, 2, 3, 4, 5, 6, 7, 8, 0]
scramble()
create_buttons()
create_static_buttons()
init()
root.mainloop()
| {
"repo_name": "jo-tez/aima-python",
"path": "gui/eight_puzzle.py",
"copies": "1",
"size": "4618",
"license": "mit",
"hash": -5303499428820125000,
"line_mean": 32.4637681159,
"line_max": 138,
"alpha_frac": 0.6632741447,
"autogenerated": false,
"ratio": 2.6724537037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3835727848403704,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
import numpy as np
def parameter_learning_train_1(parent,child):
parent_uniq=np.unique(parent)
child_uniq=np.unique(child)
parent_size=parent_uniq.size
child_size=child_uniq.size
# buat cpd polos / make plain cpd table
cpd=np.zeros((parent_size*child_size,3))
tmp=0
for p in parent_uniq:
for c in child_uniq:
cpd[tmp][0]=p
cpd[tmp][1]=c
tmp+=1
# isi cpd / fill the cpd table
print("isi cpd")
for c in cpd:
temp2=0
temp=list(((parent[:]==c[0]) & (child[:]==c[1])))
temp2=temp.count(True)+1
c[2]=temp2
# copy matrix
cpd_mdl=cpd
tmp_zeros=np.zeros((cpd.shape[0],1))
cpd_mdl=np.concatenate((cpd_mdl, tmp_zeros), axis=1)
# isi probability / fill the probability to cpd
temp=len(child_uniq)
temp2=max(child)
for c in range(len(cpd_mdl)):
jum=0
if(cpd_mdl[c,1]==temp2):
jum=sum(cpd_mdl[c-(temp-1):c+1,2])
cpd_mdl[c-(temp-1):c+1,3]=jum
cpd[:,2]=cpd[:,2]/cpd_mdl[:,3]
return cpd,cpd_mdl
# example code with ess =1 , data with csv document
data=np.loadtxt('data.csv',int,delimiter=',');
parent=data[:,-1]
child=data[:,0]
h_v,m_h_v=parameter_learning_train_1(parent,child)
print("h_v")
print(h_v) # probability h given by v
print("m_h_v")
print(m_h_v) # probability h given by v for mdl(scoring funtion purpose)
| {
"repo_name": "wiraindrak/AmazingAI",
"path": "PGM/parameter_learning.py",
"copies": "1",
"size": "1438",
"license": "mit",
"hash": 934317870716432300,
"line_mean": 25.1454545455,
"line_max": 72,
"alpha_frac": 0.5890125174,
"autogenerated": false,
"ratio": 2.7030075187969924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37920200361969925,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
import sys
import os
def copy_some(obj_from, obj_to, names):
for n in names:
if hasattr(obj_from, n):
v = getattr(obj_from, n)
setattr(obj_to, n, v)
# Strip the advice and unchanged lines from a ndiff list
def make_comp_diff(delta_list):
comp_delta = dict()
i = 1
for j, v in enumerate(delta_list):
if v.startswith(" "):
i += 1
elif v.startswith("+ ") or v.startswith("- "):
if i not in comp_delta:
comp_delta[i] = list()
comp_delta[i].append(v)
return comp_delta
def get_exec_path():
if sys.platform.startswith("win"):
exeName = "Python.exe"
import win32api
# This usually points to PythonService.exe
# Go hunting like winserviceutil does for that executable
for path in [sys.prefix] + sys.path:
look = os.path.join(path, exeName)
if os.path.isfile(look):
return win32api.GetFullPathName(look)
# Try the global Path.
try:
return win32api.SearchPath(None, exeName)[0]
except win32api.error:
raise RuntimeError("Unable to locate python.exe")
else:
return sys.executable
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/useful.py",
"copies": "1",
"size": "1261",
"license": "mit",
"hash": -2876332495475552000,
"line_mean": 27.6590909091,
"line_max": 65,
"alpha_frac": 0.5701823949,
"autogenerated": false,
"ratio": 3.71976401179941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.478994640669941,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
"""
Credentials for mysql database access
"""
from sqlalchemy import create_engine
import xml.etree.ElementTree as ET
class Credentials(object):
"""
This is the parent class for the various credential classes which hold the login credentials for the mysql databases
Attributes:
db_host: String location of the host with the database
db_user: String username for accessing the database
db_name: String name of the database schema
db_password: String password for accessing the database
db_port: String port at which database is located
"""
def __init__(self):
self.db_host = ""
self.db_user = ''
self.db_name = ''
self.db_password = ''
self.db_port = ''
def load_credentials(self, credentials_file):
"""
Imports the database connection credentials from xml file
Args:
credentials_file: Path and filename to the credentials file to use
"""
credentials = ET.parse(credentials_file)
self.db_host = credentials.find('db_host').text
self.db_port = credentials.find('db_port').text
if self.db_port is not None:
self.db_port = int(self.db_port)
self.db_user = credentials.find('db_user').text
self.db_name = credentials.find('db_name').text
self.db_password = credentials.find('db_password').text
def host(self):
"""
Returns:
String location of host
"""
return self.db_host
def database(self):
"""
Returns:
String name of the database schema
"""
return self.db_name
def password(self):
"""
Returns:
String password for schema
"""
return self.db_password
def username(self):
"""
Returns:
String username for access
"""
return self.db_user
def port(self):
"""
Returns:
String identifying port to connect with
"""
if self.db_port is not None:
return self.db_port
def sql_alchemy_engine(self):
"""
Creates a mysql alchemy engine for connecting to the db with the format:
dialect+driver://username:password@host:port/database
"""
if self.db_port is not None:
engine_string = "mysql://%s:%s@%s:%s/%s" % (self.db_user, self.db_password, self.db_host, self.db_port, self.db_name)
else:
engine_string = "mysql://%s:%s@%s/%s" % (self.db_user, self.db_password, self.db_host, self.db_name)
return create_engine(engine_string)
class TestingCredentials(Credentials):
"""
These are dummy credentials for testing
"""
def __init__(self):
Credentials.__init__(self)
self.username = 'testusername'
self.db_name = 'testdbname'
self.password = 'testpassword'
self.port = 'testport'
| {
"repo_name": "PainNarrativesLab/TwitterMining",
"path": "DatabaseAccessObjects/SqlCredentials.py",
"copies": "1",
"size": "2995",
"license": "mit",
"hash": -4834472421508077000,
"line_mean": 28.362745098,
"line_max": 129,
"alpha_frac": 0.5859766277,
"autogenerated": false,
"ratio": 4.254261363636363,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5340237991336363,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
from datetime import datetime
from lxml import etree
import xmltodict
from pytrademonster.constants import TradeMonsterConstants
from pytrademonster.objects import AccountTransaction, Account
class AccountRequests(object):
def createAccountHistoryPayload(self, userId, numTransactions, accountId, transactionType, start=None, end=None ):
'''
start and end are in iso8601 YYYYMMDDThh:mm:ss format
'''
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_ACCOUNT_HISTORY
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getAccountHistory']['userId'] = userId
if start != None and end != None:
startDatetime = datetime.strptime(start, TradeMonsterConstants.ISO_TIME)
endDatetime = datetime.strptime(end, TradeMonsterConstants.ISO_TIME)
xmlObj['getAccountHistory']['startTime']['year'] = startDatetime.year
xmlObj['getAccountHistory']['startTime']['month'] = startDatetime.month
xmlObj['getAccountHistory']['startTime']['date'] = startDatetime.day
xmlObj['getAccountHistory']['startTime']['minutes'] = startDatetime.minute
xmlObj['getAccountHistory']['startTime']['seconds'] = startDatetime.second
xmlObj['getAccountHistory']['endTime']['year'] = endDatetime.year
xmlObj['getAccountHistory']['endTime']['month'] = endDatetime.month
xmlObj['getAccountHistory']['endTime']['date'] = endDatetime.day
xmlObj['getAccountHistory']['endTime']['minutes'] = endDatetime.minute
xmlObj['getAccountHistory']['endTime']['seconds'] = endDatetime.second
else:
xmlObj['getAccountHistory']['timeRange'] = 'TODAY'
xmlObj['getAccountHistory']['numOfTransactions'] = numTransactions
xmlObj['getAccountHistory']['accountIds'] = accountId
xmlObj['getAccountHistory']['transactionTypes'] = transactionType
return xmltodict.unparse(xmlObj)
def createAccountBalancePayload(self, accountNumber):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_CUST_WIDGET
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getCustomerWidgetData']['accountNumber'] = accountNumber
return xmltodict.unparse(xmlObj)
def createDeleteGroupPayload(self, groupId):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_DELETE_GROUPS
xmlObj = xmltodict.parse(xmlStr)
xmlObj['deleteGroups']['groupIds'] = groupId
return xmltodict.unparse(xmlObj)
def createNewGroupPayload(self, groupNameToUse, accountIds):
createAcct = etree.Element(TradeMonsterConstants.AccountRequests.CREATE_ACCOUNT_GROUP)
groupName = etree.SubElement(createAcct, 'groupName')
groupName.text = groupNameToUse
groupId = etree.SubElement(createAcct, 'groupId')
for acct in accountIds:
accountIds = etree.SubElement(createAcct, 'accountIds')
accountIds.text = acct
def createAccountPerformancePayload(self, accountNumber,accountId, fromDate, toDate, category=TradeMonsterConstants.AccountRequests.PERFORMANCE_CATEGORY.SYMBOL ):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_ACCOUNT_PERFORMANCE
xmlObj = xmltodict.parse(xmlStr)
startDatetime = datetime.strptime(fromDate, TradeMonsterConstants.ISO_TIME)
endDatetime = datetime.strptime(toDate, TradeMonsterConstants.ISO_TIME)
xmlObj['getAccountPerformance']['fromDateVO']['dateObj'] = startDatetime.strftime(TradeMonsterConstants.PERFORMANCE_TIME)
xmlObj['getAccountPerformance']['fromDateVO']['date'] = startDatetime.day
xmlObj['getAccountPerformance']['fromDateVO']['hours'] = startDatetime.hour
xmlObj['getAccountPerformance']['fromDateVO']['year'] = startDatetime.year
xmlObj['getAccountPerformance']['fromDateVO']['seconds'] = startDatetime.second
xmlObj['getAccountPerformance']['fromDateVO']['month'] = startDatetime.month
xmlObj['getAccountPerformance']['fromDateVO']['minutes'] = startDatetime.minute
xmlObj['getAccountPerformance']['toDateVO']['dateObj'] = endDatetime.strftime(TradeMonsterConstants.PERFORMANCE_TIME)
xmlObj['getAccountPerformance']['toDateVO']['date'] = endDatetime.day
xmlObj['getAccountPerformance']['toDateVO']['hours'] = endDatetime.hour
xmlObj['getAccountPerformance']['toDateVO']['year'] = endDatetime.year
xmlObj['getAccountPerformance']['toDateVO']['seconds'] = endDatetime.second
xmlObj['getAccountPerformance']['toDateVO']['month'] = endDatetime.month
xmlObj['getAccountPerformance']['toDateVO']['minutes'] = endDatetime.minute
xmlObj['getAccountPerformance']['accountIds'] = accountId
xmlObj['getAccountPerformance']['accountNumber'] = accountNumber
xmlObj['getAccountPerformance']['category'] = category
xmlObj['getAccountPerformance']['toDate'] = int(endDatetime.strftime('%s'))
xmlObj['getAccountPerformance']['fromDate'] = int(startDatetime.strftime('%s'))
return xmltodict.unparse(xmlObj)
def createCashTransferPayload(self, accountId):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_CASH_TRANSFER
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getCashTransfers']['accountId'] = accountId
return xmltodict.unparse(xmlObj)
class AccountServices(object):
'''
Class for invoking various account specific services
'''
def __init__(self, pyTradeMonster):
self.pyTradeMonster = pyTradeMonster
self.accountRequests = AccountRequests()
def getParsedAccountObjects(self):
'''
Populate a dictionary of Account objects
Key is AccountNumber - i.e., '5PDXXXXX
:return:
'''
xmlObj = self.getAccounts()
accountDict = {}
root = xmlObj[TradeMonsterConstants.ResponseRoots.RETRIEVE_ACCT_ROOT]
for acct in root['accountList']:
account = Account()
account.accountId = acct['accountId']
account.accountNumber = acct['accountNumber']
account.accountDisplayName = acct['accountDisplayName']
account.accountType = acct['accountType']
account.accountInceptionDate = acct['accountInceptionDate']
account.accountRegistrationType = acct['accountRegistrationType']
account.accountStatus = acct['accountStatus']
account.alertEmail = acct['alertEmail']
account.ownerFirstName = acct['primaryAccountHolderFirstName']
account.ownerLastName = acct['primaryAccountHolderLastName']
account.userId = root['userProfile']['userId']
accountDict[account.accountNumber] = account
return accountDict
def getParsedAccountHistory(self, account, numTransactions, transactionType, start=None, end=None):
'''
Return a list of account history objects
:param userId:
:param numTransactions:
:param accountId:
:param transactionType:
:param start:
:param end:
:return: trnasactionList
'''
xmlObj = self.getAccountHistory(account.userId, numTransactions, account.accountId, transactionType, start, end)
transactionList = []
root = xmlObj[TradeMonsterConstants.ResponseRoots.RETRIEVE_ACCT_HISTORY_ROOT]
for item in root['accountHistoryVO']:
transaction = AccountTransaction()
transaction.accountId = item['accountId']
transaction.currency = item['amount']['currency']
transaction.amount = item['amount']['amount']
transaction.transactionDescription = item['transactionDescription']
transaction.transactionDate = item['transactionDate']
transaction.transactionType = item['transactionType']
if 'acType' in item:
transaction.accountType = item['acType']
if 'fee' in item:
transaction.fee = item['fee']['amount']
if 'instrumentType' in item :
transaction.instrumentType = item['instrumentType']
if 'side' in item :
transaction.buyOrSell = item['side']
if 'quantity' in item :
transaction.quantity = item['quantity']
if 'status' in item :
transaction.status = item['status']
if 'symbol' in item :
transaction.symbol = item['symbol']
if 'commission' in item :
transaction.commissionAmount = item['commission']['amount']
transactionList.append(transaction)
return transactionList
def getAccounts(self):
url = TradeMonsterConstants.URLS.ACCOUNT_PERSON_SERVICE
payload = TradeMonsterConstants.AccountRequests.DEFAULT_FETCH_ACCOUNTS
return self.pyTradeMonster.doCall(url,payload)
def getAccountHistory(self,userId, numTransactions, accountId, transactionType, start=None, end=None):
url = TradeMonsterConstants.URLS.ACCOUNT_HISTORY_SERVICE
payload = self.accountRequests.createAccountHistoryPayload(userId,numTransactions,accountId, transactionType, start, end)
return self.pyTradeMonster.doCall(url,payload)
def getAllGroups(self):
url = TradeMonsterConstants.URLS.ACCOUNT_GROUP_SERVICE
payload = TradeMonsterConstants.AccountRequests.DEFAULT_FETCH_GROUPS
return self.pyTradeMonster.doCall(url,payload)
def getBalanceForAccount(self, accountNumber):
url = TradeMonsterConstants.URLS.ACCOUNT_BALANCES_SERVICE
payload = self.accountRequests.createAccountBalancePayload(accountNumber)
return self.pyTradeMonster.doCall(url,payload)
def getAccountPerformance(self, accountNumber, accountId, fromDate, toDate, category):
url = TradeMonsterConstants.URLS.ACCOUNT_PERFORMANCE_SERVICE
payload = self.accountRequests.createAccountPerformancePayload(accountNumber, accountId, fromDate, toDate, category)
return self.pyTradeMonster.doCall(url, payload)
def getCashTransfers(self, accountId):
url = TradeMonsterConstants.URLS.ACCOUNT_CASH_TRANSFER_SERVICE
payload = self.accountRequests.createCashTransferPayload(accountId)
return self.pyTradeMonster.doCall(url,payload)
def doCreateAccountGroup(self, groupNameToUse, accountIds):
url = TradeMonsterConstants.URLS.GROUP_SERVICE
payload =self.accountRequests.createNewGroupPayload(groupNameToUse, accountIds)
self.pyTradeMonster.doCall(url,payload)
| {
"repo_name": "femtotrader/pytrademonster",
"path": "pytrademonster/services/accountService.py",
"copies": "2",
"size": "10630",
"license": "mit",
"hash": 8440252944786226000,
"line_mean": 48.212962963,
"line_max": 166,
"alpha_frac": 0.6895578551,
"autogenerated": false,
"ratio": 4.28111155859847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008633390764365706,
"num_lines": 216
} |
__author__ = 'adam'
from datetime import datetime
import pandas as pd
from ggplot import *
import os.path, sys
# used to set path to be root
parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent)
#from pyTradeMonster import log, PyTradeMonster
from pytrademonster.services import AccountServices
from pytrademonster.constants import TradeMonsterConstants
from pytrademonster import PyTradeMonster, log
"""
Methods for plotting useful account information
Right now only plots a view on PNL
"""
#global for testing
MAX_TRANSACTIONS = 5000
def plotAccountPnl(pyTradeMonster, transactionType, accountNumber, start, end, filterTicker = None):
'''
Retrieve all live accounts' activity and plot cumulative pnl for a given transaction type and account
Filter ticker filters out PNL by an individual ticker, i.e. 'GOOG'
'''
accountService = AccountServices(pyTradeMonster)
accounts = accountService.getParsedAccountObjects()
graphFrame = pd.DataFrame()
for account in accounts.itervalues():
if account.accountNumber == accountNumber and account.accountType != TradeMonsterConstants.AccountRequests.ACCOUNT_TYPES.OPTION:
log.info('Processing account: {0}'.format(account))
accountHistory = accountService.getParsedAccountHistory(account, MAX_TRANSACTIONS, transactionType, start, end )
historyList = [{key:value for key, value in x.__dict__.items() if not key.startswith('__') and not callable(key)} for x in accountHistory]
historyFrame = pd.DataFrame(historyList)
historyFrame = historyFrame.reindex(index=historyFrame.index[::-1]) #make dataframe sorted in ascending chronological order
historyFrame.transactionDate = historyFrame.transactionDate.str[:-6]
if filterTicker:
historyFrame = historyFrame[historyFrame['symbol'].str.contains(filterTicker)]
historyFrame['date'] = historyFrame.transactionDate.apply(lambda d : datetime.strptime(d,TradeMonsterConstants.TRANSACTION_TIME))
historyFrame['cumPnl'] = historyFrame.amount.astype(float).cumsum()
graphFrame = graphFrame.append(historyFrame)
plot = ggplot(aes(x='date',y='cumPnl'), data=graphFrame) + geom_line()
print plot
def main():
"""
Sample use case
REPLACE ACCOUNT NUMBER WITH YOUR ACTUAL ACCOUNT NUMBER!
:return:
"""
pyTradeMonster = PyTradeMonster()
accountNumber = 'xxxxxx'
plotAccountPnl(pyTradeMonster, TradeMonsterConstants.AccountRequests.TRANSACTION_TYPES.TRADE, accountNumber, '20110701T00:00:00', '20150830T00:00:00', 'SPY')
if __name__ == '__main__':
main() | {
"repo_name": "NunoEdgarGub1/pytrademonster",
"path": "pytrademonster/visualizer.py",
"copies": "3",
"size": "2699",
"license": "mit",
"hash": 5088652055461910000,
"line_mean": 38.1304347826,
"line_max": 161,
"alpha_frac": 0.7232308262,
"autogenerated": false,
"ratio": 3.8447293447293447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6067960170929344,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
from datetime import datetime
import xmltodict
from pytrademonster.constants import TradeMonsterConstants
from pytrademonster.objects import AccountTransaction, Account
class AccountRequests(object):
def createAccountHistoryPayload(self, userId, numTransactions, accountId, transactionType, start=None, end=None ):
'''
start and end are in iso8601 YYYYMMDDThh:mm:ss format
'''
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_ACCOUNT_HISTORY
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getAccountHistory']['userId'] = userId
if start != None and end != None:
startDatetime = datetime.strptime(start, TradeMonsterConstants.ISO_TIME)
endDatetime = datetime.strptime(end, TradeMonsterConstants.ISO_TIME)
xmlObj['getAccountHistory']['startTime']['year'] = startDatetime.year
xmlObj['getAccountHistory']['startTime']['month'] = startDatetime.month
xmlObj['getAccountHistory']['startTime']['date'] = startDatetime.day
xmlObj['getAccountHistory']['startTime']['minutes'] = startDatetime.minute
xmlObj['getAccountHistory']['startTime']['seconds'] = startDatetime.second
xmlObj['getAccountHistory']['endTime']['year'] = endDatetime.year
xmlObj['getAccountHistory']['endTime']['month'] = endDatetime.month
xmlObj['getAccountHistory']['endTime']['date'] = endDatetime.day
xmlObj['getAccountHistory']['endTime']['minutes'] = endDatetime.minute
xmlObj['getAccountHistory']['endTime']['seconds'] = endDatetime.second
else:
xmlObj['getAccountHistory']['timeRange'] = 'TODAY'
xmlObj['getAccountHistory']['numOfTransactions'] = numTransactions
xmlObj['getAccountHistory']['accountIds'] = accountId
xmlObj['getAccountHistory']['transactionTypes'] = transactionType
return xmltodict.unparse(xmlObj)
def createAccountBalancePayload(self, accountNumber):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_CUST_WIDGET
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getCustomerWidgetData']['accountNumber'] = accountNumber
return xmltodict.unparse(xmlObj)
def createDeleteGroupPayload(self, groupId):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_DELETE_GROUPS
xmlObj = xmltodict.parse(xmlStr)
xmlObj['deleteGroups']['groupIds'] = groupId
return xmltodict.unparse(xmlObj)
def createNewGroupPayload(self, groupNameToUse, accountIds):
xmlStr = TradeMonsterConstants.AccountRequests.CREATE_ACCOUNT_GROUP
xmlObj = xmltodict.parse(xmlStr)
xmlObj['createAccountGroup']['groupName'] = groupNameToUse
xmlObj['createAccountGroup']['groupId'] = ''
xmlObj['createAccountGroup']['accountIds'] = accountIds
return xmltodict.unparse(xmlObj)
def createAccountPerformancePayload(self, accountNumber,accountId, fromDate, toDate, category=TradeMonsterConstants.AccountRequests.PERFORMANCE_CATEGORY.SYMBOL ):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_ACCOUNT_PERFORMANCE
xmlObj = xmltodict.parse(xmlStr)
startDatetime = datetime.strptime(fromDate, TradeMonsterConstants.ISO_TIME)
endDatetime = datetime.strptime(toDate, TradeMonsterConstants.ISO_TIME)
xmlObj['getAccountPerformance']['fromDateVO']['dateObj'] = startDatetime.strftime(TradeMonsterConstants.PERFORMANCE_TIME)
xmlObj['getAccountPerformance']['fromDateVO']['date'] = startDatetime.day
xmlObj['getAccountPerformance']['fromDateVO']['hours'] = startDatetime.hour
xmlObj['getAccountPerformance']['fromDateVO']['year'] = startDatetime.year
xmlObj['getAccountPerformance']['fromDateVO']['seconds'] = startDatetime.second
xmlObj['getAccountPerformance']['fromDateVO']['month'] = startDatetime.month
xmlObj['getAccountPerformance']['fromDateVO']['minutes'] = startDatetime.minute
xmlObj['getAccountPerformance']['toDateVO']['dateObj'] = endDatetime.strftime(TradeMonsterConstants.PERFORMANCE_TIME)
xmlObj['getAccountPerformance']['toDateVO']['date'] = endDatetime.day
xmlObj['getAccountPerformance']['toDateVO']['hours'] = endDatetime.hour
xmlObj['getAccountPerformance']['toDateVO']['year'] = endDatetime.year
xmlObj['getAccountPerformance']['toDateVO']['seconds'] = endDatetime.second
xmlObj['getAccountPerformance']['toDateVO']['month'] = endDatetime.month
xmlObj['getAccountPerformance']['toDateVO']['minutes'] = endDatetime.minute
xmlObj['getAccountPerformance']['accountIds'] = accountId
xmlObj['getAccountPerformance']['accountNumber'] = accountNumber
xmlObj['getAccountPerformance']['category'] = category
xmlObj['getAccountPerformance']['toDate'] = int(endDatetime.strftime('%s'))
xmlObj['getAccountPerformance']['fromDate'] = int(startDatetime.strftime('%s'))
return xmltodict.unparse(xmlObj)
def createCashTransferPayload(self, accountId):
xmlStr = TradeMonsterConstants.AccountRequests.DEFAULT_CASH_TRANSFER
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getCashTransfers']['accountId'] = accountId
return xmltodict.unparse(xmlObj)
class AccountServices(object):
'''
Class for invoking various account specific services
'''
def __init__(self, pyTradeMonster):
self.pyTradeMonster = pyTradeMonster
self.accountRequests = AccountRequests()
def getParsedAccountObjects(self):
'''
Populate a dictionary of Account objects
Key is AccountNumber - i.e., '5PDXXXXX
:return:
'''
xmlObj = self.getAccounts()
accountDict = {}
root = xmlObj[TradeMonsterConstants.ResponseRoots.RETRIEVE_ACCT_ROOT]
for acct in root['accountList']:
account = Account()
account.accountId = acct['accountId']
account.accountNumber = acct['accountNumber']
account.accountDisplayName = acct['accountDisplayName']
account.accountType = acct['accountType']
account.accountInceptionDate = acct['accountInceptionDate']
account.accountRegistrationType = acct['accountRegistrationType']
account.accountStatus = acct['accountStatus']
account.alertEmail = acct['alertEmail']
account.ownerFirstName = acct['primaryAccountHolderFirstName']
account.ownerLastName = acct['primaryAccountHolderLastName']
account.userId = root['userProfile']['userId']
accountDict[account.accountNumber] = account
return accountDict
def getParsedAccountHistory(self, account, numTransactions, transactionType, start=None, end=None):
'''
Return a list of account history objects
:param userId:
:param numTransactions:
:param accountId:
:param transactionType:
:param start:
:param end:
:return: trnasactionList
'''
xmlObj = self.getAccountHistory(account.userId, numTransactions, account.accountId, transactionType, start, end)
transactionList = []
root = xmlObj[TradeMonsterConstants.ResponseRoots.RETRIEVE_ACCT_HISTORY_ROOT]
for item in root['accountHistoryVO']:
transaction = AccountTransaction()
transaction.accountId = item['accountId']
transaction.currency = item['amount']['currency']
transaction.amount = item['amount']['amount']
transaction.transactionDescription = item['transactionDescription']
transaction.transactionDate = item['transactionDate']
transaction.transactionType = item['transactionType']
if 'acType' in item:
transaction.accountType = item['acType']
if 'fee' in item:
transaction.fee = item['fee']['amount']
if 'instrumentType' in item :
transaction.instrumentType = item['instrumentType']
if 'side' in item :
transaction.buyOrSell = item['side']
if 'quantity' in item :
transaction.quantity = item['quantity']
if 'status' in item :
transaction.status = item['status']
if 'symbol' in item :
transaction.symbol = item['symbol']
if 'commission' in item :
transaction.commissionAmount = item['commission']['amount']
transactionList.append(transaction)
return transactionList
def getAccounts(self):
url = TradeMonsterConstants.URLS.ACCOUNT_PERSON_SERVICE
payload = TradeMonsterConstants.AccountRequests.DEFAULT_FETCH_ACCOUNTS
return self.pyTradeMonster.doCall(url,payload)
def getAccountHistory(self,userId, numTransactions, accountId, transactionType, start=None, end=None):
url = TradeMonsterConstants.URLS.ACCOUNT_HISTORY_SERVICE
payload = self.accountRequests.createAccountHistoryPayload(userId,numTransactions,accountId, transactionType, start, end)
return self.pyTradeMonster.doCall(url,payload)
def getAllGroups(self):
url = TradeMonsterConstants.URLS.ACCOUNT_GROUP_SERVICE
payload = TradeMonsterConstants.AccountRequests.DEFAULT_FETCH_GROUPS
return self.pyTradeMonster.doCall(url,payload)
def getBalanceForAccount(self, accountNumber):
url = TradeMonsterConstants.URLS.ACCOUNT_BALANCES_SERVICE
payload = self.accountRequests.createAccountBalancePayload(accountNumber)
return self.pyTradeMonster.doCall(url,payload)
def getAccountPerformance(self, accountNumber, accountId, fromDate, toDate, category):
url = TradeMonsterConstants.URLS.ACCOUNT_PERFORMANCE_SERVICE
payload = self.accountRequests.createAccountPerformancePayload(accountNumber, accountId, fromDate, toDate, category)
return self.pyTradeMonster.doCall(url, payload)
def getCashTransfers(self, accountId):
url = TradeMonsterConstants.URLS.ACCOUNT_CASH_TRANSFER_SERVICE
payload = self.accountRequests.createCashTransferPayload(accountId)
return self.pyTradeMonster.doCall(url,payload)
def doCreateAccountGroup(self, groupNameToUse, accountIds):
'''
Create a new group with a list of accountIds
:param groupNameToUse:
:param accountIds: list of account ids
:return:
'''
url = TradeMonsterConstants.URLS.GROUP_SERVICE
payload =self.accountRequests.createNewGroupPayload(groupNameToUse, accountIds)
self.pyTradeMonster.doCall(url,payload)
| {
"repo_name": "adamsherman/pytrademonster",
"path": "pytrademonster/services/accountService.py",
"copies": "1",
"size": "10732",
"license": "mit",
"hash": 3699817696934770700,
"line_mean": 47.5610859729,
"line_max": 166,
"alpha_frac": 0.6869176295,
"autogenerated": false,
"ratio": 4.284231536926148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5471149166426148,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.python import log
from twisted.protocols import basic
import json
from fsprocedures import FileSystemProcedures
import os
class SlaveClient(basic.LineReceiver):
json_command_handler = None
def __init__(self):
pass
def connectionMade(self):
log.msg("Made connection to server")
msg = dict()
msg["service_username"] = self.factory.username
self.sendLine(json.dumps(msg))
log.msg("Sent user identification")
self.json_command_handler = FileSystemProcedures(self.factory.username,
self.factory.user_dir)
self.json_command_handler.subscribe("Tasks", self.sendObject)
def lineReceived(self, sdata):
try:
data = json.loads(sdata)
except ValueError:
log.msg("Unable to decode JSON data: %s" % sdata)
else:
# handle cmd
log.msg("Handling: %s" % sdata)
# Once we've exhausted the prior options (server->slave commands)
self.sendObject(self.json_command_handler(data))
def sendObject(self, obj):
return_str = json.dumps(obj)
log.msg("Responding: %s" % return_str)
self.sendLine(return_str)
def connectionLost(self, reason=protocol.connectionDone):
log.msg("Lost connection to the server")
class SlaveFactory(protocol.ClientFactory):
protocol = SlaveClient
def clientConnectionFailed(self, connector, reason):
log.msg("Connection failed, exiting")
reactor.stop()
def clientConnectionLost(self, connector, reason):
log.msg("Lost connection, exiting")
reactor.stop()
def main(username, user_dir, service_port):
log.startLogging(open('./log/slave_%s.out.txt' % username, 'w'))
print username, user_dir, service_port
f = SlaveFactory()
f.username = username
f.user_dir = user_dir
os.environ["USERNAME"] = os.environ["USER"] = username
os.environ["HOME"] = os.environ["USERPROFILE"] = user_dir
reactor.connectTCP("localhost", service_port, f)
reactor.run()
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/slave.py",
"copies": "1",
"size": "2217",
"license": "mit",
"hash": 7330565596078343000,
"line_mean": 30.2253521127,
"line_max": 79,
"alpha_frac": 0.6414073072,
"autogenerated": false,
"ratio": 4.009041591320073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 71
} |
__author__ = 'Adam'
from twisted.internet import reactor
from twisted.python import log
import win32serviceutil
import win32service
import os
import sys
class AppServerSvc(win32serviceutil.ServiceFramework):
_svc_name_ = "FileService"
_svc_display_name_ = "File Service"
remote_debug = False
local_debug = False
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
reactor.stop()
def SvcDoRun(self):
self.remote_debug = "rdebug" in sys.argv
self.local_debug = "debug" in sys.argv
if self.remote_debug:
sys.path.append(os.path.join(sys.prefix, "pycharm-debug.egg"))
import pydevd
pydevd.settrace('localhost', port=43234, stdoutToServer=True,
stderrToServer=True)
# Change to our dir
main_dir = os.path.dirname(os.path.realpath(__file__)) + "/.."
os.chdir(main_dir)
sys.path.append(main_dir)
if self.remote_debug or self.local_debug:
log.startLogging(sys.stdout)
else:
log.startLogging(open('./log/service.out.txt', 'w'))
import webservice
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
webservice.main()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def main(rdebug=False):
args = sys.argv
if args[1].lower() == "rdebug":
args.append("rdebug")
args[1] = "restart"
elif args[1].lower() == "debug":
args.append("debug")
print args
win32serviceutil.HandleCommandLine(
AppServerSvc, os.path.realpath(__file__)[:-3] + ".AppServerSvc", args
)
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/service/winservice.py",
"copies": "1",
"size": "1776",
"license": "mit",
"hash": -6139824421357308000,
"line_mean": 26.75,
"line_max": 77,
"alpha_frac": 0.6255630631,
"autogenerated": false,
"ratio": 3.6319018404907975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4757464903590798,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
from twisted.protocols import basic
from twisted.internet import protocol
from twisted.python import log
from twisted.internet import reactor
import userprocess
import subject
import json
import os
import useful
#That should be enough
basic.LineReceiver.MAX_LENGTH = 100000
class SlaveConnectionHandler(basic.LineReceiver):
def __init__(self):
self.username = None
def connectionMade(self):
log.msg("Connected to a slave")
def connectionLost(self, reason=protocol.connectionDone):
if self.username is not None:
log.msg("Lost connection to %s's slave" % self.username)
else:
log.msg("Lost connection to unknown slave")
def disconnect(self):
log.err("Disconnecting %s's slave connection" % (
self.username or "UNKNOWN"
))
self.transport.loseConnection()
def lineReceived(self, message):
try:
self.objectReceived(json.loads(message))
except ValueError:
log.err("Received non-JSON data on service connection")
self.disconnect()
def objectReceived(self, data):
if self.username is None:
if "service_username" in data:
self.username = data["service_username"]
if not self.factory.register_slave(self.username):
self.disconnect()
else:
self.factory.subscribe("in." + self.username,
self.sendObject)
log.msg("%s's slave connected" % self.username)
else:
self.disconnect()
else:
log.msg("Notifying handlers from %s's slave" % self.username)
self.factory.notify("out." + self.username, data)
def sendObject(self, obj):
json_data = json.dumps(obj)
log.msg(
"Forwarding data to %s's slave: %s" % (self.username, json_data))
self.sendLine(json_data)
class SlaveHandler(protocol.ServerFactory, subject.EventRetainer):
def __init__(self):
subject.EventRetainer.__init__(self)
self.protocol = SlaveConnectionHandler
listener = reactor.listenTCP(0, self)
self.service_port = listener.getHost().port
self.exec_path = useful.get_exec_path()
self.script_path = os.path.abspath(
os.path.dirname(os.path.realpath(__file__)))
self.launching = []
def register_slave(self, username):
if username not in self.launching:
log.err(
"Was not expecting a connection from %s's slave" % username
)
elif self.count("in." + username):
log.err(
"There is already an active slave connection for %s" % username
)
else:
self.launching.remove(username)
return True
return False
def launch_slave_process(self, user_auth):
log.msg("Launching %s %s %s %s %s" % (
self.exec_path, self.script_path, "slave", user_auth.home_dir,
self.service_port
))
userprocess.create_process_as(user_auth, [
self.exec_path,
self.script_path,
"slave",
user_auth.username,
user_auth.home_dir,
str(self.service_port)
])
self.launching.append(user_auth.username)
def dispatch_command(self, user_auth, cmd_obj):
if self.notify("in." + user_auth.username, cmd_obj) == 0 \
and user_auth.username not in self.launching:
self.launch_slave_process(user_auth)
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/slaveservice.py",
"copies": "1",
"size": "3665",
"license": "mit",
"hash": -6847050988904950000,
"line_mean": 30.8695652174,
"line_max": 79,
"alpha_frac": 0.5822646658,
"autogenerated": false,
"ratio": 4.183789954337899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5266054620137899,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
import json
import requests
from pytrademonster.constants import TradeMonsterConstants
def lookupOption(strike, underlier, expiryDate = None, rowsPerPage = 10000, optionType = None, ):
'''
:param strike: None for all expiries, otherwise a specific strike, i.e. 200
:param underlier: underlying equity
:param expiryDate: MM/DD/YYYY format, i.e. 08/12/2016
:param rowsPerPage:
:param optionType: Nnne for put and calls, or PUT or CALL for a specific side
:return: A list of dictionaries with valid option symbols, i.e. 'SPYS1715C191500'
'''
strike = float(strike) if strike != None else None
jsonObj = {'jsonObject' : {"remoteClassName":'com.om.dh.sm.vo.DetailedLookUpReqVO',
'expirationDate': expiryDate
,'optionType': optionType
,'strikePrice': strike
,'pagination':{'sortBy':None,'startRow':0,'totalPages':0,'rowsPerPage':rowsPerPage,
'currentPage':1,'results':[],'endRow':0,'totalRows':0,
'remoteClassName': 'com.om.dh.dao.pagination.PaginationResult',
'previousPage':False,'nextPage':False},
'underlier': underlier }
}
jsonStr = json.dumps(jsonObj)
jsonStr = jsonStr[1:-1]
colon = jsonStr.find(':')
jsonStr =jsonStr[1:colon-1] + '=' + jsonStr[colon+2:]
postedResult = requests.post(TradeMonsterConstants.OPTION_LOOKUP_SERIVCE, data = jsonStr, headers = {'Content-type': 'application/x-www-form-urlencoded'})
resultJson = json.loads(postedResult.text)
return resultJson
| {
"repo_name": "femtotrader/pytrademonster",
"path": "pytrademonster/services/lookupService.py",
"copies": "3",
"size": "1820",
"license": "mit",
"hash": 4432692690475954000,
"line_mean": 42.3333333333,
"line_max": 158,
"alpha_frac": 0.5758241758,
"autogenerated": false,
"ratio": 4.008810572687224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6084634748487224,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
import logging
log = logging.getLogger('whether.parsing')
# defining some constants for tuple indices
# stations
STATION_ID = 0 # 'station_id'
WBAN_ID = 1 # 'wban_id'
STATION_NAME = 2 # 'station_name'
COUNTRY = 3 # 'country'
STATE = 4 # 'state'
LATITUDE = 5 # 'latitude'
LONGITUDE = 6 # 'longitude'
ELEVATION = 7 # 'elevation'
def parse_stations(filename):
stations = {}
found_data_start = False
stations_file = open(filename)
line = stations_file.readline()
while line != '':
line = stations_file.readline()
if not found_data_start and line[:4] == 'USAF':
found_data_start = True
line = stations_file.readline()
continue
elif not found_data_start:
continue
# break it apart
station_id_str = line[:7]
wban_id_str = line[7:13]
station_name = line[13:43].strip()
country = line[43:48].strip()
state = line[48:51].strip()
lat_str = line[51:59].strip()
lon_str = line[59:68].strip()
elev_str = line[68:76].strip()
station_name = None if station_name == '' else station_name
country = None if country == '' else country
state = None if state == '' else state
# parse the relevant fields into integers, etc
try:
station_id = int(station_id_str)
wban_id = int(wban_id_str)
except ValueError:
continue
latitude = float(lat_str) if (lat_str != '+00.000' and lat_str != '') else None
longitude = float(lon_str) if (lon_str != '+000.000' and lon_str != '') else None
elevation = float(elev_str) if (elev_str != '-0999.0' and elev_str != '') else None
key = (station_id, wban_id)
this_station = (station_id, wban_id, station_name, country, state, latitude, longitude, elevation)
if key not in stations:
stations[key] = this_station
else:
old = stations[key]
stations[key] = merge_stations(old, this_station)
# close out reader
stations_file.close()
# find duplicate stations by lat/lon
stations = filter_and_merge_stations_by_location(stations)
return stations
def merge_stations(first, second):
station_id = first[STATION_ID]
wban_id = first[WBAN_ID]
station_name = first[STATION_NAME] if first[STATION_NAME] != '' else second[STATION_NAME]
country = first[COUNTRY] if first[COUNTRY] != '' else second[STATION_NAME]
state = first[STATE] if first[STATE] != '' else second[STATE]
latitude = first[LATITUDE] if first[LATITUDE] is not None else second[LATITUDE]
longitude = first[LONGITUDE] if first[LONGITUDE] is not None else second[LONGITUDE]
elevation = first[ELEVATION] if first[ELEVATION] is not None else second[ELEVATION]
new_station = (station_id, wban_id, station_name, country, state, latitude, longitude, elevation)
return new_station
def filter_and_merge_stations_by_location(stations):
log.info("Number of stations initially: %d", len(stations))
stations = sorted(list(stations.values()),
key=lambda station: station[LONGITUDE] if station[LONGITUDE] is not None else 200)
deduped_stations = {}
removed_stations = 0
while len(stations) > 0:
base_station = stations[0]
del stations[0]
key = (base_station[0], base_station[1])
other_found = []
for i in range(0, len(stations)):
other_station = stations[i]
if equal_latitude_longitude(base_station[LATITUDE], base_station[LONGITUDE],
other_station[LATITUDE], other_station[LONGITUDE]):
other_found.append(i)
base_station = merge_stations(base_station, other_station)
else:
if None in (base_station[LATITUDE], other_station[LATITUDE],
base_station[LONGITUDE], other_station[LONGITUDE]):
continue
elif abs(base_station[LATITUDE] - other_station[LATITUDE]) > 0.021 and abs(
base_station[LONGITUDE] - other_station[LONGITUDE]) > 0.021:
break
# for each duplicate, merge it into the first one found and remove from station list
for i in sorted(other_found, reverse=True):
alt_key = (stations[i][0], stations[i][1])
# maintain list of duplicate id replacements
deduped_stations[alt_key] = base_station
del stations[i]
removed_stations += 1
deduped_stations[key] = base_station
log.info("Done removing stations! %d removed.", removed_stations)
return deduped_stations
def equal_latitude_longitude(first_lat, first_lon, second_lat, second_lon):
if isinstance(first_lat, float) and isinstance(first_lon, float) and isinstance(second_lat, float) and isinstance(
second_lon, float):
lat_error = abs(first_lat - second_lat)
lon_error = abs(first_lon - second_lon)
return lat_error <= 0.02 and lon_error <= 0.02
else:
return False
def parse_summary(file_path, stations):
with open(file_path, 'r') as f:
summaries = []
# skip the first line
for line in f.readlines()[1:]:
# split into relevant fields
station_id = int(line[0:6].strip())
wban_id = int(line[7:12].strip())
# now we need to check the map of duplicate stations to get the id that goes to the db
this_station = stations.get((station_id, wban_id))
if this_station is None:
continue
station_id, wban_id = this_station[0:2]
date = line[14:18] + '-' + line[18:20] + '-' + line[20:22]
mean_temp_str = line[24:30]
mean_temp = mean_temp_str if mean_temp_str != '9999.9' else None
mean_temp_obs = line[31:33]
dew_point_str = line[35:41]
dew_point = dew_point_str if dew_point_str != '9999.9' else None
dew_point_obs = line[42:44]
sea_pressure_str = line[46:52]
sea_pressure = sea_pressure_str if sea_pressure_str != '9999.9' else None
sea_pressure_obs = line[53:55]
station_pressure_str = line[57:63]
station_pressure = station_pressure_str if station_pressure_str != '9999.9' else None
station_pressure_obs = line[64:66]
visibility_str = line[68:73]
visibility = visibility_str if visibility_str != '999.9' else None
visibility_obs = line[74:76]
mean_wind_spd_str = line[78:83]
mean_wind_spd = mean_wind_spd_str if mean_wind_spd_str != '999.9' else None
mean_wind_spd_obs = line[84:86]
max_wind_spd_str = line[88:93]
max_wind_spd = max_wind_spd_str if max_wind_spd_str != '999.9' else None
max_wind_gust_str = line[95:100]
max_wind_gust = max_wind_gust_str if max_wind_gust_str != '999.9' else None
max_temp_str = line[102:108]
max_temp = max_temp_str if max_temp_str != '9999.9' else None
max_temp_hourly = line[108] == '*'
min_temp_str = line[110:116]
min_temp = min_temp_str if min_temp_str != '9999.9' else None
min_temp_hourly = line[116] == '*'
precip_str = line[118:123]
precip_flag = line[123]
precip = precip_str if (
precip_str != '99.99' and precip_flag != 'H' and precip_flag != 'I') else None
snow_depth_str = line[125:130]
snow_depth = snow_depth_str if snow_depth_str != '999.9' else None
fog = line[132] == '1'
rain = line[133] == '1'
snow = line[134] == '1'
hail = line[135] == '1'
thunder = line[136] == '1'
tornado = line[137] == '1'
summary = '\t'.join([
str(station_id),
str(wban_id),
date,
str(mean_temp),
str(mean_temp_obs),
str(max_temp),
str(max_temp_hourly),
str(min_temp),
str(min_temp_hourly),
str(dew_point),
str(dew_point_obs),
str(sea_pressure),
str(sea_pressure_obs),
str(station_pressure),
str(station_pressure_obs),
str(visibility),
str(visibility_obs),
str(mean_wind_spd),
str(mean_wind_spd_obs),
str(max_wind_spd),
str(max_wind_gust),
str(precip),
precip_flag,
str(snow_depth),
str(fog),
str(rain),
str(snow),
str(hail),
str(thunder),
str(tornado)
]) + '\n'
summary = summary.encode('utf-8')
summaries.append(summary)
return summaries
| {
"repo_name": "dikaiosune/whether",
"path": "py_whether/parsing.py",
"copies": "1",
"size": "9160",
"license": "mit",
"hash": -4105282344639931400,
"line_mean": 35.64,
"line_max": 118,
"alpha_frac": 0.5515283843,
"autogenerated": false,
"ratio": 3.586530931871574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4638059316171574,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
import pygame as pg
import Colors
from Blocks import BlockShape
class Game():
screen = None
blocks = []
currentBlock = None
lanes = (75,100,125,150,175,200,225,250,275,300)
done = False
def __init__(self,screen):
self.screen = screen
self.makeNewCurrent()
def isDone(self):
return self.done
def handleEvents(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
if event.type == pg.KEYUP:
if event.key == pg.K_DOWN:
self.currentBlock.boost(False)
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
self.done = True
if event.key == pg.K_LEFT:
self.currentBlock.shift(-1)
if event.key == pg.K_RIGHT:
self.currentBlock.shift(1)
if event.key == pg.K_DOWN:
self.currentBlock.boost(True)
if event.key == pg.K_UP:
self.currentBlock.rotate()
def makeNewCurrent(self):
print(self.currentBlock)
b = None
if isinstance(self.currentBlock,BlockShape):
# currentTop = int(self.currentBlock.rect.top)
# currentLane = int(self.currentBlock.lane)
b = self.currentBlock.copy() #BlockShape(self.lanes,currentLane,currentTop)
b.speed = 0
self.currentBlock = BlockShape(self.lanes, 5, 0)
if b != None:
self.blocks.append(b)
print(self.currentBlock)
def update(self):
self.drawEnviroment()
self.currentBlock.update()
if len(pg.sprite.spritecollide(self.currentBlock,self.blocks,False)) > 0 or self.currentBlock.rect.bottom >= 650:
self.currentBlock.stop()
self.makeNewCurrent()
for block in self.blocks:
block.draw(self.screen)
self.currentBlock.draw(self.screen)
def drawEnviroment(self):
self.screen.fill(Colors.WHITE)
pg.draw.line(self.screen,Colors.BLACK,(75, 0), (75, 650))
# for i in range(9):
# pg.draw.line(self.screen,Colors.BLACK,(100+25*i, 0),(100+25*i,650))
pg.draw.line(self.screen, Colors.BLACK, ( 325, 0), (325,650))
pg.draw.line(self.screen, Colors.BLACK, ( 75,650), (325,650)) | {
"repo_name": "artog/intro-prog",
"path": "Tetris/Game.py",
"copies": "1",
"size": "2430",
"license": "mit",
"hash": 5847560025204340000,
"line_mean": 27.6,
"line_max": 121,
"alpha_frac": 0.5534979424,
"autogenerated": false,
"ratio": 3.610698365527489,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4664196307927489,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
import sys
import subprocess
"""
Description:
Create a process as another user.
Basically popen with a prefixed authentication parameter.
On windows this requires the calling process/user to have impersonate,
create objects, and change process token privileges (AKA Local Service
user)
On Linux sudo is used requiring the user has the correct permissions
in the sudoers file.
Usage:
create_process_as( Authentication, args, **kwargs)
Return:
Result of subprocess.popen
Arguments:
Authentication instance with valid username credential (windows requires
the win32_token attribute to be set)
List/String of arguments forwarded to popen
dict of args also forwarded to popen
>>> auth = Authentication("test","test")
>>> popen_obj = create_process_as(auth, "whoami")
>>> popen_obj.wait()
>>> # etc...
Note: I can't seem to get popen's env argument working with this
"""
if sys.platform.startswith("win"):
import win32process
import useful
def __convert_startup_info(old_info):
new_info = None
if old_info.__class__ is subprocess.STARTUPINFO:
new_info = win32process.STARTUPINFO()
elif type(old_info) is type(win32process.STARTUPINFO()):
new_info = subprocess.STARTUPINFO()
useful.copy_some(old_info, new_info,
["dwFlags", "hStdInput", "hStdOutput", "hStdErr",
"wShowWindow"])
return new_info
"""
Replace builtin CreateProcess and call CreateProcessAsUser if a token
is supplied
"""
__builtin_CreateProcess = subprocess._subprocess.CreateProcess
def __create_process(*args):
if hasattr(args[8], "token"):
arg_list = list(args)
arg_list[8] = __convert_startup_info(arg_list[8])
return win32process.CreateProcessAsUser(args[8].token,
*tuple(arg_list))
else:
return __builtin_CreateProcess(*args)
subprocess._subprocess.CreateProcess = __create_process
def create_process_as(user_auth, args=list(), **kwargs):
if isinstance(args, str):
args = list(args)
if "startupinfo" not in kwargs:
kwargs["startupinfo"] = subprocess.STARTUPINFO()
kwargs["startupinfo"].token = user_auth.win32_token
return subprocess.Popen(args, **kwargs)
elif sys.platform in ["linux2", "darwin"]:
import subprocess
def create_process_as(user_auth, args=list(), **kwargs):
if isinstance(args, str):
args = list(args)
return subprocess.Popen(["sudo", "-nu", user_auth.username] + args,
**kwargs)
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/userprocess.py",
"copies": "1",
"size": "2751",
"license": "mit",
"hash": 4016351410452346000,
"line_mean": 30.2613636364,
"line_max": 76,
"alpha_frac": 0.6277717194,
"autogenerated": false,
"ratio": 4.180851063829787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5308622783229787,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
import sys
'''
Description:
Use available methods to authenticate supplied system credentials.
On windows the native LogonUser is used.
On Linux, PAM is required and used by default.
If specified, the passwd and shadow files are used - note this
requires the calling user to be part of the shadow group.
Usage:
Authentication(username, password, use_PAM = True)
Return:
Authentication instance
Authentication attributes:
username
home_dir
[win32_token]
Arguments:
Username
Password
[use_PAM] - If False /etc/passwd and /etc/shadow are used
Example:
# Use PAM
auth = Authentication("test","test")
print "Authenticated %s" % auth.username
# Use passwd and shadow
# Calling process/user must be part of shadow group
auth = Authentication("test","test",False)
Throws:
LoginError - Base of all exceptions
LoginNoPasswordError - No password has been set for the user
LoginLockedError - User account has been locked
LoginNoUser - No such user
LoginExpiredError - The user account password has expired
LoginInvalid - Invalid credentials supplied
'''
class LoginError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class LoginExpiredError(LoginError):
def __init__(self, user):
LoginError.__init__(self, "Password for user '%s' has expired" % user)
class LoginNoPasswordError(LoginError):
def __init__(self, user):
LoginError.__init__(self, "No password set for user '%s'" % user)
class LoginLockedError(LoginError):
def __init__(self, user):
LoginError.__init__(self, "Account for user '%s' is locked" % user)
class LoginNoUser(LoginError):
def __init__(self, user):
LoginError.__init__(self, "No such user '%s'" % user)
class LoginInvalid(LoginError):
def __init__(self, user):
LoginError.__init__(self, "Invalid login for '%s'" % user)
if sys.platform.startswith("win"):
import win32security
import win32profile
class Authentication:
def __init__(self, username, password, use_pam=None):
try:
self.win32_token = win32security.LogonUser(
username, None, password,
win32security.LOGON32_LOGON_INTERACTIVE,
win32security.LOGON32_PROVIDER_DEFAULT,
)
except win32security.error as e:
raise {
# 1317: LoginNoUser(username),
1326: LoginInvalid(username),
1327: LoginLockedError(username),
1328: LoginLockedError(username),
1330: LoginExpiredError(username),
1331: LoginLockedError(username),
}.get(
e[0],
LoginError("Failed to log in as '%s': %i %s" % (
username, e[0], e[2]
))
)
self.username = username
self.home_dir = win32profile.GetUserProfileDirectory(
self.win32_token
)
self.validated = True
elif sys.platform in ["linux2", "darwin"]:
import crypt
import pwd
import spwd
class Authentication:
"""
Use PAM if the process's owner does not have access to /etc/shadow
Access is usually with root or being a member of the shadow group
Don't use root
"""
def __init__(self, username, password, use_pam=True):
try:
pwd_entry = pwd.getpwnam(username)
if use_pam:
import PAM
def pam_conv(_auth, _query_list, _userData):
return [(password, 0)]
try:
p = PAM.pam()
p.start("passwd")
p.set_item(PAM.PAM_USER, username)
p.set_item(PAM.PAM_CONV, pam_conv)
p.authenticate()
p.acct_mgmt()
except PAM.error, p_error:
print "Auth:", p_error
raise LoginInvalid(username)
else:
enc_pw = pwd_entry[1]
if enc_pw in ["*", "x"]:
try:
shadow_entry = spwd.getspnam(username)
enc_pw = shadow_entry[1]
if enc_pw in ["NP", "!", "", None]:
raise LoginNoPasswordError(username)
elif enc_pw in ["LK", "*"]:
raise LoginLockedError(username)
elif enc_pw == "!!":
raise LoginNoPasswordError(username)
except KeyError:
raise LoginError("Unable to access shadow file")
if crypt.crypt(password, enc_pw) != enc_pw:
raise LoginInvalid(username)
self.home_dir = pwd_entry[5]
except KeyError:
# raise LoginNoUser(username)
raise LoginInvalid(username)
self.username = username
self.validated = True
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/authentication.py",
"copies": "1",
"size": "5611",
"license": "mit",
"hash": 6648019423967747000,
"line_mean": 29.3297297297,
"line_max": 78,
"alpha_frac": 0.5065050793,
"autogenerated": false,
"ratio": 4.703269069572507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5709774148872507,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
import weakref
from functools import partial
class WeakMethodInvalid(Exception):
def __str__(self):
return "A weak method was invoked on a dead object"
class WeakCallable:
def __init__(self, obj, func):
self._obj = obj
self._method = func
def __call__(self, *args, **kws):
return self._method(self._obj, *args, **kws)
def __getattr__(self, attr):
if attr == 'im_self':
return self._obj
if attr == 'im_func':
return self._meth
raise AttributeError(attr)
class WeakMethod:
def __init__(self, fn):
if hasattr(fn, "im_self"):
self._obj = weakref.ref(fn.im_self)
self._method = fn.im_func
else:
self._obj = None
self._method = fn
def get(self):
if self._obj is None:
return self._method
elif self._obj() is not None:
return WeakCallable(self._obj(), self._method)
else:
return None
def dead(self):
return self._obj is not None and self._obj() is None
def __call__(self, *args, **kwargs):
method = self.get()
if method is None:
raise WeakMethodInvalid()
return method(*args, **kwargs)
class EventSubject:
"""
Offers observer/subscriber pattern support
"""
def __init__(self):
self.subscribers = {}
def cleanup(self, name=None):
if name is None:
i = 0
for name in self.subscribers:
i += self.cleanup(name)
return i
elif name in self.subscribers:
to_remove = []
for fn in self.subscribers[name]:
if isinstance(fn, WeakMethod):
if fn.dead():
to_remove.append(fn)
for fn in to_remove:
self.subscribers[name].remove(fn)
print "Removed ", fn
return len(to_remove)
else:
return None
def count(self, name):
return 0 if self.cleanup(name) is None else len(self.subscribers[name])
def subscribe(self, name, func):
if not callable(func):
raise TypeError("Expecting callable type")
if name not in self.subscribers:
self.subscribers[name] = []
self.subscribers[name].append(WeakMethod(func))
def notify(self, name, *args):
i = 0
if self.cleanup(name) is not None:
for fn in self.subscribers[name]:
fn(*args)
i += 1
return i
class EventRetainer(EventSubject):
"""
Following class keeps hold of the event if a subscriber is not available.
Re-fires as soon as a subscriber becomes available.
"""
def __init__(self):
EventSubject.__init__(self)
self.retained = {}
def subscribe(self, name, func):
EventSubject.subscribe(self, name, func)
if name in self.retained:
for ev in self.retained[name]:
EventSubject.notify(self, name, *ev)
self.retained.pop(name, None)
def notify(self, name, *args):
n_called = EventSubject.notify(self, name, *args)
if n_called == 0:
if name not in self.retained:
self.retained[name] = []
self.retained[name].append(args)
return n_called
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/subject.py",
"copies": "1",
"size": "3421",
"license": "mit",
"hash": -7275027185959374000,
"line_mean": 26.8130081301,
"line_max": 79,
"alpha_frac": 0.5399006139,
"autogenerated": false,
"ratio": 4.126658624849216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5166559238749215,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
import xml.etree.ElementTree as ET
from rdflib import RDF, Graph, BNode, URIRef, Literal
class Builder(object):
URI_XML_SCHEMA_ROOT = 'http://xml-convert.com/schema#'
CONTAINMENT = URIRef(URI_XML_SCHEMA_ROOT + 'contains')
TEXT = URIRef(URI_XML_SCHEMA_ROOT + 'text')
FILE = URIRef(URI_XML_SCHEMA_ROOT + 'file')
FILENAME = URIRef(URI_XML_SCHEMA_ROOT + 'filename')
FROM_FILE = URIRef(URI_XML_SCHEMA_ROOT + 'from_file')
URI_FILE_SCHEMA_ROOT = 'http://this-file.com/schema#'
def __init__(self, g=None):
if g:
self.g = g
else:
self.g = Graph()
def parse(self, xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
# Make a node for this file
file_bnode = BNode()
self.g.add((file_bnode, RDF.type, self.FILE))
self.g.add((file_bnode, self.FILENAME, Literal(xml_file)))
self.__parse_node__(root, file_bnode=file_bnode)
return self.g
def __parse_node__(self,
node,
parent_uri=None,
file_bnode=None):
uri_concept = URIRef(self.URI_FILE_SCHEMA_ROOT + node.tag)
bnode = BNode()
self.g.add((bnode, RDF.type, uri_concept))
# Link to file (if root passed in)
if file_bnode:
self.g.add((bnode, self.FROM_FILE, file_bnode))
# Link to parent
if parent_uri:
self.g.add((parent_uri, self.CONTAINMENT, bnode))
# Process attributes
for k, v in node.attrib.iteritems():
uri_attrib_concept = URIRef('{0}{1}_{2}'.format(self.URI_FILE_SCHEMA_ROOT,
node.tag,
k))
self.g.add((bnode, uri_attrib_concept, Literal(v)))
# Does it have content? If so, save it
if node.text is not None and not str(node.text).isspace():
self.g.add((bnode, self.TEXT, Literal(node.text)))
# Process children
for child in node.findall('./'):
self.__parse_node__(child, bnode)
| {
"repo_name": "adamnagel/xml2rdf",
"path": "xml2rdf/builder.py",
"copies": "1",
"size": "2173",
"license": "mit",
"hash": 8383171920062575000,
"line_mean": 31.9242424242,
"line_max": 86,
"alpha_frac": 0.535204786,
"autogenerated": false,
"ratio": 3.568144499178982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9602478508446135,
"avg_score": 0.00017415534656913968,
"num_lines": 66
} |
__author__ = 'adam'
import xmltodict
from pytrademonster.constants import TradeMonsterConstants
from pytrademonster.objects import PositionItem
class PositionRequests(object):
'''
Class for creating the request strings for the position service
'''
def createPositionsBasicPayload(self, symbol, underlier):
xmlStr = TradeMonsterConstants.PositionRequests.DEFAULT_POSITIONS_BASIC
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getBasicPositionDetails']['symbol'] = symbol
xmlObj['getBasicPositionDetails']['underlyer'] = underlier
return xmltodict.unparse(xmlObj)
def createPositionsDetailPayload(self,accountId):
xmlStr = TradeMonsterConstants.PositionRequests.DEFAULT_POSITIONS_DETAIL
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getPositionsDetailNew']['accountIds'] = accountId
xmlObj['getPositionsDetailNew']['accountId'] = accountId
return xmltodict.unparse(xmlObj)
def createPositionsSelectedPayload(self,accountId, symbol, instrumentType):
xmlStr = TradeMonsterConstants.PositionRequests.DEFAULT_POSITIONS_SELECTED
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getSelectedPosition']['accountIds'] = accountId
xmlObj['getSelectedPosition']['accountId'] = accountId
xmlObj['getSelectedPosition']['symbol'] = symbol
xmlObj['getSelectedPosition']['instrumentType'] = instrumentType
return xmltodict.unparse(xmlObj)
def createPositionsUnderliersPayload(self,accountId):
xmlStr = TradeMonsterConstants.PositionRequests.DEFAULT_POSITIONS_UNDERLIERS
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getHeldUnderlyers']['accountId'] = accountId
return xmltodict.unparse(xmlObj)
def createTransactionHistoryPayload(self, accountId, positionType, symbol, instrumentType, userId):
xmlStr = TradeMonsterConstants.PositionRequests.DEFAULT_POSITIONS_TRANSACTIONS
xmlObj = xmltodict.parse(xmlStr)
xmlObj['getTxHistoryForInstrument']['positionType'] = positionType
xmlObj['getTxHistoryForInstrument']['accountId'] = accountId
xmlObj['getTxHistoryForInstrument']['symbol'] = symbol
xmlObj['getTxHistoryForInstrument']['instrumentType'] = instrumentType
xmlObj['getTxHistoryForInstrument']['userId'] = userId
return xmltodict.unparse(xmlObj)
class PositionServices(object):
'''
Class for invoking various position specific services
'''
def __init__(self, pyTradeMonster):
self.pyTradeMonster = pyTradeMonster
self.positionRequests = PositionRequests()
self.url = TradeMonsterConstants.URLS.POSITION_SERVICE
def getPositionsDetail(self,accountId):
payload = self.positionRequests.createPositionsDetailPayload(accountId)
return self.pyTradeMonster.doCall(self.url,payload)
def getPositionsBasic(self,symbol, underlyer):
payload = self.positionRequests.createPositionsBasicPayload(symbol, underlyer)
return self.pyTradeMonster.doCall(self.url,payload)
def getPositionsSelected(self,accountId, symbol, insturmentType):
payload = self.positionRequests.createPositionsSelectedPayload(accountId, symbol, insturmentType)
return self.pyTradeMonster.doCall(self.url,payload)
def getPositionsUnderliers(self, accountId):
payload = self.positionRequests.createPositionsUnderliersPayload(accountId)
return self.pyTradeMonster.doCall(self.url,payload)
def getPositionsTransactions(self, accountId, positionType, symbol, instrumentType, userId):
payload = self.positionRequests.createTransactionHistoryPayload(accountId, positionType, symbol, instrumentType, userId)
return self.pyTradeMonster.doCall(self.url,payload)
def getParsedPositionsDetail(self, accountId):
'''
Populate a dictionary of PositionsDetail
:return: list of all the positions
'''
positionDetailedResponse = self.getPositionsDetail(accountId)
items = positionDetailedResponse[TradeMonsterConstants.ResponseRoots.RETRIEVE_POSITIONS_DETAILED_ROOT]['item']
positions = []
if isinstance(items,list):
for item in items:
for Position in item['positions']:
position = PositionItem()
position.UnderlierBeta = item['beta']
position.UnderlierDescription = item['description']
position.UnderlierInstrumentId = item['instrumentId']
position.UnderlierInstrumentType = item['instrumentType']
position.UnderlierMargin = item['margin']
position.UnderlierPmMargin = item['pmMargin']
position.UnderlierSymbol = item['symbol']
self.parseSinglePositionQuote(position, Position)
positions.append(position)
else:
position = PositionItem()
Position = items['positions']
position.UnderlierBeta = items['beta']
position.UnderlierDescription = items['description']
position.UnderlierInstrumentId = items['instrumentId']
position.UnderlierInstrumentType = items['instrumentType']
position.UnderlierMargin = items['margin']
position.UnderlierPmMargin = items['pmMargin']
position.UnderlierSymbol = items['symbol']
self.parseSinglePositionQuote(position, Position)
positions.append(position)
return positions
def parseSinglePositionQuote(self, position, xmlPosition):
'''
Helper fu
:param position:
:param xmlPosition:
:return:
'''
position.accountId = xmlPosition['accountId']
position.costOpen = xmlPosition['costOpen']
position.costTotal = xmlPosition['costTotal']
position.day = xmlPosition['day']
position.dayCostOpen = xmlPosition['dayCostOpen']
position.dayCostTotal = xmlPosition['dayCostTotal']
position.daysToExpiry = xmlPosition['daysToExpiry']
position.description = xmlPosition['description']
position.exerciseStyle = xmlPosition['exerciseStyle']
position.expirationDate = xmlPosition['expirationDate']
position.holdingType = xmlPosition['holdingType']
position.instrumentId = xmlPosition['instrumentId']
position.instrumentType = xmlPosition['instrumentType']
position.month = xmlPosition['month']
position.mtdCostOpen = xmlPosition['mtdCostOpen']
position.mtdCostTotal = xmlPosition['mtdCostTotal']
position.opraCode = xmlPosition['opraCode']
position.optionType = xmlPosition['optionType']
position.positionId = xmlPosition['positionId']
position.positionType = xmlPosition['positionType']
position.quantity = xmlPosition['quantity']
position.shortDescription = xmlPosition['shortDescription']
position.strategyName = xmlPosition['strategyName']
position.strikePrice = xmlPosition['strikePrice']
position.symbol = xmlPosition['symbol']
position.symbolLongName = xmlPosition['symbolLongName']
position.valueMultiplier = xmlPosition['valueMultiplier']
position.year = xmlPosition['year']
position.ytdCostOpen = xmlPosition['ytdCostOpen']
position.ytdCostTotal = xmlPosition['ytdCostTotal'] | {
"repo_name": "NunoEdgarGub1/pytrademonster",
"path": "pytrademonster/services/positionService.py",
"copies": "2",
"size": "7532",
"license": "mit",
"hash": 6123800398609525000,
"line_mean": 47.6,
"line_max": 128,
"alpha_frac": 0.6899893787,
"autogenerated": false,
"ratio": 4.4700296735905045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003633503794398772,
"num_lines": 155
} |
__author__ = 'adam'
class ProcessingError(Exception):
"""
Base exception class for text and tweet processing
TODO: Add logging library
"""
def __init__(self, identifier):
self.identifier = identifier
def __repr__(self):
return "%s went bad on %s : %s" % (self.kind, self.identifier_type, self.identifier)
class TweetProcessingError(ProcessingError):
def __init__(self, tweetID):
self.kind = 'TweetProcessing'
self.identifier_type = 'tweetID'
ProcessingError.__init__(self, tweetID)
class StringProcessingError(ProcessingError):
def __init__(self, string_processed):
self.kind = 'StringProcessing'
self.identifier_type = 'String content'
ProcessingError.__init__(self, string_processed)
class NgramError(ProcessingError):
def __init__(self, processing_step):
"""
Arguments:
:param processing_step: String description of where error arose
:return:
"""
self.kind = 'NgramProcessing'
self.identifier_type = 'String content'
ProcessingError.__init__(self, processing_step) | {
"repo_name": "PainNarrativesLab/TwitterDataAnalysis",
"path": "deprecated/TextAnalysis/AnalysisErrors.py",
"copies": "2",
"size": "1144",
"license": "mit",
"hash": -1878404729878744000,
"line_mean": 28.358974359,
"line_max": 92,
"alpha_frac": 0.6354895105,
"autogenerated": false,
"ratio": 4.22140221402214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.585689172452214,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
"""
Container objects for an account
"""
class Account(object):
'''
Container class for a an Account
'''
def __init__(self):
self.accountNumber = None
self.accountStatus = None
self.accountType = None
self.accountDisplayName = None
self.accountInceptionDate = None
self.accountRegistrationType = None
self.accountId = None
self.ownerFirstName = None
self.ownerLastName = None
self.alertEmail = None
self.userId = None
def __repr__(self):
return 'AccountName: {0}, AccountNumber: {1}, AccountType: {2}'.format(self.accountDisplayName, self.accountNumber,self.accountType)
class AccountTransaction(object):
'''
Container class for an Account History item
'''
def __init__(self):
self.accountId = None
self.accountType = None
self.currency = None
self.amount = 0
self.commissionAmount = 0
self.fee = 0
self.instrumentType = None
self.quantity = None
self.buyOrSell = None
self.status = None
self.symbol = None
self.transactionDescription = None
self.transactionDate = None
self.transactionType = None
def __repr__(self):
return 'AccountId: {0} \t Date: {1} \t Amount: {2} \t Type: {3} \t Description: {4}'.format(self.accountId,self.transactionDate, self.amount, self.transactionType, self.transactionDescription)
| {
"repo_name": "femtotrader/pytrademonster",
"path": "pytrademonster/objects/accountObjects.py",
"copies": "3",
"size": "1494",
"license": "mit",
"hash": -4922021614496775000,
"line_mean": 28.88,
"line_max": 200,
"alpha_frac": 0.6211512718,
"autogenerated": false,
"ratio": 4.115702479338843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021352258135778093,
"num_lines": 50
} |
__author__ = 'adam'
"""
Objects to represent an options' quote chain
"""
class QuoteChain(object):
def __init__(self):
self.rootTicker = None
self.daysToExpire = None
self.deliveryType = None
self.exerciseStyle = None
self.expirationType = None
self.expiryDeliverable = None
self.expiryLabel = None
self.expiryType = None
self.options = {} # strike and side is the key, value is a QuoteOptionItem
class QuoteOptionItem(object):
'''
Representation of a single strike of an options.
Contains indicative information.
'''
def __init__(self):
self.strike = None
self.exchange = None
self.side = None
self.minTickValue = None
self.multiplier = None
self.opraRoot = None
self.instrumentId = None
self.reutersCode = None
self.sharesPerContract = None
self.symbol = None
self.expiryYear = None
self.expiryMonth = None
self.expiryDay = None
self.tradeable = None
class QuoteItem(object):
'''
Representation of an actual price quote for any asset Trademonster trades
'''
def __init__(self):
self.askPrice = 0
self.bidPrice = 0
self.askSize = 0
self.bidSize = 0
self.currency = None
self.closingMark = 0
self.dividendType = None
self.dividend = 0
self.dividendDate = 0
self.highPrice = 0
self.impliedVolatility = 0
self.instrumentType = None
self.lastPrice = 0
self.lastTradedSize = 0
self.lastTradedTimeMs = 0
self.lowPrice = 0
self.openInterest = 0
self.openPrice = 0
self.previousClosePrice = 0
self.symbol = None
self.volume = 0
self.yearHigh = 0
self.yearLow = 0 | {
"repo_name": "adamsherman/pytrademonster",
"path": "pytrademonster/objects/quoteObjects.py",
"copies": "1",
"size": "1872",
"license": "mit",
"hash": 6406100858678355000,
"line_mean": 25.0138888889,
"line_max": 82,
"alpha_frac": 0.5892094017,
"autogenerated": false,
"ratio": 4.043196544276458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013614043989578382,
"num_lines": 72
} |
__author__ = 'Adam'
"""
Show how to fire bullets.
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/PpdJjaiLX6A
"""
import pygame
import random
import math
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
RED = ( 255, 0, 0)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
screen_width = 700
screen_height = 400
class Block(pygame.sprite.Sprite):
""" This class represents the block. """
xMotion = 0
yMotion = 0
def __init__(self, color):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([20, 15])
self.image.fill(color)
self.rect = self.image.get_rect()
def update(self):
self.rect.x += self.xMotion
self.rect.y += self.yMotion
if self.rect.x > screen_width:
self.rect.x = 0
if self.rect.x < 0:
self.rect.x = screen_width
if self.rect.y > screen_height:
self.rect.y = 0
if self.rect.y < 0:
self.rect.y = screen_height
class Player(pygame.sprite.Sprite):
moving = False
lat_moving = False
angle = 0
lat_move = 0
position = [screen_width/2,screen_height/2]
shooting = False
previousShotTime = 0
def __init__(self):
""" Set up the player on creation. """
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# self.source_image = pygame.Surface([20, 20])
self.source_image = pygame.image.load('Assets/player.png')
self.source_image.fill(RED)
self.rect = self.source_image.get_rect()
def update(self):
rotated_image = pygame.transform.rotate(self.source_image, self.angle)
new_rect = rotated_image.get_rect()
self.position[0] += self.moving*2*math.sin(math.radians(self.angle))
self.position[1] += self.moving*2*math.cos(math.radians(self.angle))
self.position[0] += self.lat_move*2*math.sin(math.radians(self.angle-90))
self.position[1] += self.lat_move*2*math.cos(math.radians(self.angle-90))
new_rect.centerx = self.position[0]
new_rect.centery = self.position[1]
self.rect = new_rect
self.image = rotated_image
def shoot(self,bullet_list,all_sprites_list):
bullet = Bullet(self.angle)
# Set the bullet so it is where the player is
bullet.positionX = self.rect.centerx
bullet.positionY = self.rect.centery
# Add the bullet to the lists
all_sprites_list.add(bullet)
bullet_list.add(bullet)
class Bullet(pygame.sprite.Sprite):
angle = 0
positionX,positionY = 0.0,0.0
def __init__(self,angle=0):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([4, 4])
self.image.fill(BLACK)
self.rect = self.image.get_rect()
self.angle = angle
def update(self):
modX = 5*math.sin(math.radians(self.angle))
modY = 5*math.cos(math.radians(self.angle))
self.positionX -= modX
self.positionY -= modY
if self.positionX < 0:
self.positionX = screen_width
if self.positionX > screen_width:
self.positionX = 0
if self.positionY < 0:
self.positionY = screen_height
if self.positionY > screen_height:
self.positionY = 0
self.rect.x = self.positionX
self.rect.y = self.positionY
pygame.init()
screen = pygame.display.set_mode([screen_width, screen_height])
# This is a list of every sprite. All blocks and the player block as well.
all_sprites_list = pygame.sprite.Group()
# List of each block in the game
block_list = pygame.sprite.Group()
# List of each bullet
bullet_list = pygame.sprite.Group()
# --- Create the sprites
def makeBlock():
i = random.randrange(4)
# This represents a block
block = Block((0,100+50*int(i/5),0))
# Set a random location for the block
block.rect.x = random.randrange(screen_width)
block.rect.y = random.randrange(350)
if int(i) == 0:
block.rect.x = 0
block.xMotion = 1
if int(i) == 1:
block.rect.x = screen_width
block.xMotion = -1
if int(i) == 2:
block.rect.y = 0
block.yMotion = 1
if int(i) == 3:
block.rect.y = screen_height
block.yMotion = -1
# Add the block to the list of objects
block_list.add(block)
all_sprites_list.add(block)
for i in range(20):
makeBlock()
# Create a red player block
player = Player()
all_sprites_list.add(player)
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
score = 0
player.rect.y = screen_height/2
player.rect.x = screen_width/2
gameOver = False
font = pygame.font.SysFont('Verdana',14)
while not done:
# Clear the screen
screen.fill(WHITE)
if len(pygame.sprite.spritecollide(player,block_list,False)) > 0 or gameOver:
goFont = pygame.font.SysFont('Verdana',44)
goHelpTextFont = pygame.font.SysFont('Verdana',18)
gameOverTextSurface = goFont.render("GAME OVER",True,BLACK)
gameOverHelpTextSurface = goHelpTextFont.render("Press return to start again or escape to quit",True,BLACK)
gameOverScoreTextSurface = goHelpTextFont.render("Score: "+str(score),True,BLACK)
gameOverFontSize = goFont.size("GAME OVER")
gameOverHelpTextFontSize = goHelpTextFont.size("Press return to start again or escape to quit")
gameOverScoreTextFontSize = goHelpTextFont.size("Score: "+str(score))
screen.blit(
gameOverTextSurface,
(
screen_width/2 - gameOverFontSize[0]/2,
screen_height/2 - gameOverFontSize[1]/2
)
)
screen.blit(
gameOverHelpTextSurface,
(
screen_width/2 - gameOverHelpTextFontSize[0]/2,
screen_height/2 - gameOverHelpTextFontSize[1]/2 + 35
)
)
screen.blit(
gameOverScoreTextSurface,
(
screen_width / 2 - gameOverScoreTextFontSize[0]/2,
screen_height / 2 - gameOverScoreTextFontSize[1]/2 -40
)
)
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
done = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
gameOver = False
score = 0
tmp = block_list.copy()
for b in tmp:
block_list.remove(b)
all_sprites_list.remove(b)
for i in range(20):
makeBlock()
else:
pos = pygame.mouse.get_pos()
# vec = pygame.math.Vector2(
# )
angleRad = math.atan2(
-(pos[0]-player.rect.centerx),
-(pos[1]-player.rect.centery)
# -vec.x,
# -vec.y
)
angle = math.degrees(angleRad)
player.angle = angle
# --- Event Processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
player.shooting = True
elif event.type == pygame.MOUSEBUTTONUP:
player.shooting = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_a:
player.lat_move = 0
if event.key == pygame.K_d:
player.lat_move = 0
if event.key == pygame.K_w:
player.moving = 0
if event.key == pygame.K_s:
player.moving = 0
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
player.lat_move = 1
if event.key == pygame.K_d:
player.lat_move = -1
if event.key == pygame.K_w:
player.moving = -1
if event.key == pygame.K_s:
player.moving = 1
# --- Game logic
if player.shooting and player.previousShotTime+150 < pygame.time.get_ticks():
player.shoot(bullet_list,all_sprites_list)
player.previousShotTime = pygame.time.get_ticks()
# Call the update() method on all the sprites
all_sprites_list.update()
# Calculate mechanics for each bullet
for bullet in bullet_list:
# See if it hit a block
block_hit_list = pygame.sprite.spritecollide(bullet, block_list, True)
# For each block hit, remove the bullet and add to the score
for block in block_hit_list:
bullet_list.remove(bullet)
all_sprites_list.remove(bullet)
score += 1
makeBlock()
if score % 10 == 0:
makeBlock()
# Remove the bullet if it flies up off the screen
if bullet.rect.y < -10:
bullet_list.remove(bullet)
all_sprites_list.remove(bullet)
scoreSurface = font.render("Score: "+str(score),True,BLACK)
numBlocksSurface = font.render("Blocks: "+str(len(block_list)),True,BLACK)
screen.blit(scoreSurface,(10, 10))
screen.blit(numBlocksSurface,(screen_width-150,10))
# Draw all the spites
all_sprites_list.draw(screen)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 20 frames per second
clock.tick(60)
pygame.quit()
| {
"repo_name": "artog/intro-prog",
"path": "Shorevival/Main.py",
"copies": "1",
"size": "10009",
"license": "mit",
"hash": -3930783542119679000,
"line_mean": 26.5730027548,
"line_max": 115,
"alpha_frac": 0.5713857528,
"autogenerated": false,
"ratio": 3.6542533771449435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9706895137914962,
"avg_score": 0.003748798405996341,
"num_lines": 363
} |
__author__ = 'adam'
station_create_table_statement = """
CREATE TABLE stations (
station_id INTEGER NOT NULL,
wban_id INTEGER NOT NULL,
station_name VARCHAR,
country_id VARCHAR,
us_state VARCHAR,
latitude NUMERIC(6,3),
longitude NUMERIC(6,3),
elevation NUMERIC(5,1),
CONSTRAINT pk_station_ids PRIMARY KEY(station_id,wban_id)
);
"""
summary_create_table_statement = """
CREATE TABLE daily_summaries (
station_id INTEGER NOT NULL,
wban_id INTEGER NOT NULL,
summary_date DATE NOT NULL,
mean_temp_fahr NUMERIC(5,1),
num_mean_temp_obs INTEGER,
max_temp_fahr NUMERIC(5,1),
max_temp_from_hourly BOOLEAN,
min_temp_fahr NUMERIC(5,1),
min_temp_from_hourly BOOLEAN,
dew_point_fahr NUMERIC(5,1),
num_dew_point_obs INTEGER,
sea_pressure_millibar NUMERIC(5,1),
num_sea_pressure_obs INTEGER,
station_pressure_millibar NUMERIC(5,1),
num_stat_pressure_obs INTEGER,
visibility_miles NUMERIC(4,1),
num_visibility_obs INTEGER,
mean_wind_speed_knots NUMERIC(4,1),
num_mean_wind_spd_obs INTEGER,
max_wind_speed_knots NUMERIC(4,1),
max_wind_gust_knots NUMERIC(4,1),
precip_inches NUMERIC(4,2),
precip_report_flag CHARACTER(1),
snow_depth_inches NUMERIC(4,1),
fog_reported BOOLEAN,
rain_reported BOOLEAN,
snow_reported BOOLEAN,
hail_reported BOOLEAN,
thunder_reported BOOLEAN,
tornado_reported BOOLEAN,
CONSTRAINT station_pk FOREIGN KEY(station_id,wban_id) REFERENCES stations(station_id,wban_id)
);
"""
station_insert_statement = """
insert into stations
(
station_id,
wban_id,
station_name,
country_id,
us_state,
latitude,
longitude,
elevation
)
values ( $1, $2, $3, $4, $5, $6, $7, $8 )
"""
summary_copy_statement = "copy daily_summaries from STDIN WITH (NULL 'None')"
index_statement = 'CREATE INDEX station_summary_index ON daily_summaries (station_id,wban_id,summary_date)'
analyze_statement = 'analyze'
| {
"repo_name": "dikaiosune/whether",
"path": "py_whether/db.py",
"copies": "1",
"size": "2275",
"license": "mit",
"hash": 7095951479292631000,
"line_mean": 24.8522727273,
"line_max": 107,
"alpha_frac": 0.5938461538,
"autogenerated": false,
"ratio": 3.441754916792738,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4535601070592738,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Adam'
# TODO: import watchdog for monitoring file system events
import os
import stat
import sys
import getpass
from enum import Enum
import shutil
types = Enum('FSEntityType', 'file directory other')
FileSystemFile = None
FileSystemDirectory = None
class EntityException(Exception):
def __init__(self, path, err):
self.path = path
self.error = err
def __str__(self):
return self.path + ": " + self.error
class EntityAccessError(EntityException):
def __init__(self, path, err):
EntityException.__init__(self, path, err)
class EntityMetadata:
def __init__(self, path):
self.path = path
self.__stat = os.stat(self.path)
self.mode = self.__stat.st_mode
self.created = self.__stat.st_ctime
self.accessed = self.__stat.st_atime
self.modified = self.__stat.st_mtime
self.owner = None
if sys.platform.startswith("win"):
import win32security
sd = win32security.GetFileSecurity(
self.path, win32security.OWNER_SECURITY_INFORMATION
)
owner_sid = sd.GetSecurityDescriptorOwner()
self.owner, _domain, _type = win32security.LookupAccountSid(
None, owner_sid
)
elif sys.platform.startswith("linux"):
import pwd
self.owner = pwd.getpwuid(self.__stat.st_uid).pw_name
def copy_to(self, target_entity):
shutil.copystat(self.path, target_entity.get_path())
if sys.platform.startswith("linux"):
os.chown(
target_entity.get_path(), self.__stat.st_uid,
self.__stat.st_gid
)
target_entity.update_meta()
def get_enum_type(mode):
if stat.S_ISDIR(mode):
return types.directory
elif stat.S_ISREG(mode):
return types.file
else:
return types.other
"""
Paths must be absolute when creating FileSystem* instances, these classes
represent an existing entity in the file system and won't take into account
the current working directory.
"""
class FileSystemEntity:
def __init__(self, path):
self.meta = EntityMetadata(path)
self.type = get_enum_type(self.meta.mode)
def get_meta(self):
return self.meta
def update_meta(self):
self.meta = EntityMetadata(self.meta.path)
self.type = get_enum_type(self.meta.mode)
return self.meta
def get_path(self):
return self.meta.path
def get_type(self):
return self.type
def get_owner(self):
return self.get_meta().owner
def same_process_user(self):
return self.get_owner() == getpass.getuser()
def get_base_name(self):
return os.path.basename(self.get_path())
def get_dir_name(self):
return os.path.dirname(self.get_path())
def get_dir_obj(self):
return FileSystemDirectory(self.get_dir_name())
def is_file(self):
return self.get_type() == types.file
def is_directory(self):
return self.get_type() == types.directory
def get_type_instance(self):
if self.get_type() == types.file:
return FileSystemFile(self)
elif self.get_type() == types.directory:
return FileSystemDirectory(self)
else:
# TODO: Raise type error
return None
def get_size(self):
return os.path.getsize(self.meta.path)
def is_under(self, target_dir):
if not isinstance(target_dir, FileSystemDirectory):
target_dir = FileSystemDirectory(target_dir)
return self.get_path().startswith(target_dir.get_path())
def parent_of(self, path):
return os.path.normcase(path).startswith(
os.path.normcase(self.get_path()))
def equals(self, path):
return os.path.normcase(os.path.abspath(path)) == os.path.normcase(
self.get_path())
def move_to(self, target_dir, target_name=None):
if not isinstance(target_dir, FileSystemDirectory):
target_dir = FileSystemDirectory(target_dir)
if target_name is None:
target_name = self.get_base_name()
if target_dir.exists(target_name):
# TODO: raise entity exists error
pass
target_path = target_dir.join_name(target_name)
os.rename(self.get_path(), target_path)
self.__init__(target_path)
"""
These aren't very smart
"""
# def copy_to(self, target_dir, target_name=None, recursive=True,
# on_enter_dir=None, on_copied_file=None):
# instance = self.get_type_instance()
# if isinstance(instance, FileSystemDirectory):
# return instance.copy_to(target_dir, target_name,
# recursive=recursive,
# on_enter_dir=on_enter_dir,
# on_copied_file=on_copied_file)
# elif isinstance(instance, FileSystemFile):
# return instance.copy_to(target_dir, target_name)
# return None
def call_instance_func(self, func_str, **kwargs):
entity_type = self.get_type_instance()
if entity_type is not None:
if hasattr(type, func_str):
return getattr(entity_type, func_str)(**kwargs)
return None
def remove(self):
os.remove(self.get_path())
def get_info(self):
info = dict()
info["title"] = self.get_base_name()
info["size"] = self.get_size()
ftype = self.get_type()
if ftype is types.directory:
info["type"] = "directory"
elif ftype is types.file:
info["type"] = "file"
else:
info["type"] = "unknown"
meta = self.get_meta()
info["created"] = meta.created
info["modified"] = meta.modified
info["accessed"] = meta.accessed
info["path"] = meta.path
info["owner"] = meta.owner
return info
| {
"repo_name": "Adam01/Cylinder-server",
"path": "Cylinder-server/fsentity/fsentity.py",
"copies": "1",
"size": "6033",
"license": "mit",
"hash": -8508717326225519000,
"line_mean": 27.7285714286,
"line_max": 75,
"alpha_frac": 0.58776728,
"autogenerated": false,
"ratio": 3.9099157485418017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49976830285418017,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adam'
"""
Various classes related to an order to TradeMonster.
Order: the actual order sent to market
OrderLeg: component of an order that is sent to market
OrderResponse: class for storing all results returned from a placed order
NewOrderLeg: class for storing result of a leg that is returned from a placed order
OrderPreview: the preview portion of an order response from a placed order
"""
from pytrademonster.constants import enum
class OrderType(object):
types = enum(LIMIT = 'LM', MARKET='MK', STOP_ORDER = 'SP', TRAILING_STOP = 'TS',
STOP_LIMIT = 'SL', TRAILING_STOP_LIMIT = 'TSL')
class OrderStatus(object):
status = enum(SAVED = 'SAVED', EXPIRED = 'EXPIRED', QUEUED = 'QUEUED', PENDING = 'PENDING',
OPEN = 'OPEN', FILLED = 'FILLED', REJECTED = 'REJECTED',
CANCEL_QUEUED = 'CANCEL QUEUED', OTHER = 'OTHER')
class OrderLeg(object):
'''
Class used to create an individual leg when placing orders
'''
side = enum(BUY = 'BUY', SELL = 'SELL')
openClose = enum(OPEN = 'OPEN', CLOSE = 'CLOSE')
def __init__(self):
self.orderSide = OrderLeg.side.BUY #default
self.symbol = None
self.openOrClose = None
self.quantityRatio = 1
self.instrumentType = None
class NewOrderLeg(object):
'''
Class that is used to store the result returned when placing an order - i.e., each individual leg
'''
def __init__(self):
#specific fields for 'newOrderLeg' that is returned when sending an order
self.exchange = None
self.expirationDate = None
self.holdingType = None #long or short
self.positionType = None
self.strikePrice = None
self.price = None
self.symbol = None
self.quantity = None
class OrderResponse():
'''
Class for the storing the results retrieved back immediately when an order is placed
'''
def __init__(self):
self.date = None
self.status = None
self.orderId = None
self.actualOrderId = None #only named this to conform with their data model - somewhat confusing
self.accountNumber = None
self.orderPreview = None
class OrderPreview(object):
'''
Class for storing the order preview results that is retrieved upon placing an order
'''
def __init__(self):
self.cost = None
self.commnAndFees = None
self.possibleFreeTradeCredit = None
self.totalCost = None
#self.cashBpEffect = None
self.buyingPowerEffect = None
self.resultingBuyingPower = None
self.resultingCashBuyingPower = None
self.resultingMarginBuyingPower = None
self.resultingDayTradeBuyingPower = None
self.netLiquidationValue = None
self.negativeOrderImpact = None
self.isReviewRequired = None
self.isProhibitedOptionPairPresent = None
self.isNetLiquidationLess = None
self.marginRequirement = None
self.currentInitialOptionReq = None
self.currentMaintenanceOptionReq = None
self.equityLegCost = None
self.optionLegCost = None
self.maintenanceMarginRequirement = None
self.resultingCash = None
self.openOrderReserve = None
self.openOrderRequirement = None
self.smaBuyingPower = None
self.unadjustedBuyingPower = None
self.discountingFactor = None
self.cashInLieu = None
self.regTEquity = None
self.isIndexOptionExercise = None
self.isBpConsumed = None
self.indexSettlementAmount = None
self.isProceedsConsidered = None
self.displayErrorToCustomer = None
self.newOrderLegs = []
class Order(object):
'''
Class for creating an order to send to the market
'''
timeInForceEnum = enum(CLO='CLO', DAY='DAY', EXT='EXT', FOK='FOK', GTC='GTC', GTD='GTD', IOC='IOC')
marketSessionEnum = enum(REG='REG', EXT='EXT')
def __init__(self):
self.type = None
self.price = None
self.quantity = None
self.instrumentType = None
self.timeInForce = None
self.modifyOrder = False
self.originalOrderId = None
self.combineLikeLegs = False
self.holdOrder = False
self.discretionFlag = False
self.solicitedFlag = False
self.marketSession = None
self.goodToCancelDate = None
self.spreadName = None
self.orderLegs = []
class LimitOrder(Order):
def __init__(self):
super(LimitOrder, self).__init__()
self.type = OrderType.types.LIMIT
class MarketOrder(Order):
def __init__(self):
super(MarketOrder, self).__init__()
self.type = OrderType.types.MARKET
class StopOrder(Order):
def __init__(self):
super(StopOrder, self).__init__()
self.type = OrderType.types.STOP
class TrailingStopOrder(Order):
def __init__(self):
super(TrailingStopOrder, self).__init__()
self.type = OrderType.types.TRAILING_STOP
class StopLimit(Order):
def __init__(self):
super(StopLimit, self).__init__()
self.type = OrderType.types.STOP_LIMIT
self.stopTriggerPrice = None
class TrailingStopLimitOrder(Order):
def __init__(self):
super(TrailingStopLimitOrder, self).__init__()
self.type = OrderType.types.TRAILING_STOP_LIMIT
self.trailingAmount = None | {
"repo_name": "adamsherman/pytrademonster",
"path": "pytrademonster/objects/orderObjects.py",
"copies": "3",
"size": "5440",
"license": "mit",
"hash": 4319318238560636400,
"line_mean": 32.3803680982,
"line_max": 104,
"alpha_frac": 0.6391544118,
"autogenerated": false,
"ratio": 3.919308357348703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008002313781033272,
"num_lines": 163
} |
__author__ = 'adam'
import sys
import os
import errno
import json
import datetime
import contextlib
class NoStepsException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
repr(self.value)
class TestBenchExecutor:
_path_manifest = ''
_dict_manifest = dict()
_steps = list()
def __init__(self, manifest_path):
"""
@param manifest_path: The path to the test bench manifest
@type manifest_path: str
@return: Nothing
@rtype: None
@raise: NoStepsException if no execution steps found in manifest
"""
self._path_manifest = manifest_path
self._load_tb_manifest()
# TODO: command-line option to skip this
with self._update_manifest() as manifest:
for step in manifest["Steps"]:
step["Status"] = "UNEXECUTED"
step["LogFile"] = None
step["ExecutionStartTimestamp"] = None
step["ExecutionCompletionTimestamp"] = None
if len(self._steps) == 0:
raise NoStepsException("No execution steps found in manifest")
def _load_tb_manifest(self):
with open(self._path_manifest, 'r') as f:
self._dict_manifest = json.load(f)
self._steps = self._dict_manifest["Steps"]
def run_all(self):
'''
Executes all unexecuted steps in sequence.
@return: 0 if executed successfully, <0 if error
@rtype: int
'''
loop_limit = 100000
num_loops = 0
while num_loops <= loop_limit:
num_loops += 1
rtn_code = self.run_next_step()
if rtn_code == 0:
# Step successfully completed
continue
elif rtn_code == 1:
# No steps remaining
break
else:
# Error
return rtn_code
if num_loops >= loop_limit:
raise Exception("run_all has exceeded the loop limit of {limit}. "
"Something is wrong with the step execution."
.format(limit=loop_limit))
with self._update_manifest() as manifest:
manifest["Status"] = "OK"
return 0
def run_next_step(self):
"""
Find and execute the first unexecuted step.
@return: 0 if executed successfully, 1 if no steps remaining, <0 if error
@rtype: int
"""
step = self._get_first_unexecuted_step()
# if no steps remaining
if step is None:
return 1
return self._execute_step(step)
def _get_first_unexecuted_step(self):
for s in self._steps:
status = str(s["Status"])
if status == "UNEXECUTED":
return s
# If none are unexecuted
return None
@contextlib.contextmanager
def _update_manifest(self):
"""
Load the manifest, run code, then save it
Use as: with self._update_manifest() as manifest: manifest['this'] = 'that'
"""
with open(self._path_manifest, 'r') as f:
d_manifest = json.load(f)
yield d_manifest
with open(self._path_manifest, 'w') as f:
json.dump(d_manifest, f, indent=2)
# Update member copy of manifest dictionary
self._dict_manifest = d_manifest
self._steps = d_manifest["Steps"]
def _update_step(self, step, updates):
"""
Given a step and a set of updates, modify that step object in the manifest file.
@param step: The step to be modified
@type step: dict
@param updates: A dictionary of key-value pairs to be updated
@type updates: dict
@return: The modified step
@rtype: dict
"""
# Load the json file
# find the matching step
# update it
# save back
with self._update_manifest() as d_manifest:
d_steps = d_manifest["Steps"]
r_step = None
for s in d_steps:
if s == step:
r_step = s
break
r_step.update(updates)
return r_step
@property
def _time(self):
"""
@return: The time, in ISO 8601 standard format
@rtype: str
"""
return datetime.datetime.now().isoformat()
def _mark_manifest_status(self):
"""
Based on the status of the manifest's steps, mark it as OK, FAILED, or UNEXECUTED
"""
any_failed = False
any_unexecuted = False
for step in self._steps:
if step["Status"] is "FAILED":
any_failed = True
elif step["Status"] is "UNEXECUTED":
any_unexecuted = True
with self._update_manifest() as manifest:
if any_failed:
manifest["Status"] = "FAILED"
elif any_unexecuted:
manifest["Status"] = "UNEXECUTED"
def _execute_step(self, step):
step = self._update_step(step, {"ExecutionStartTimestamp": self._time})
log = None
### Execute step
try:
import subprocess
if not step.get("LogFile"):
step = self._update_step(step, {
"LogFile": "log/log" + step["ExecutionStartTimestamp"].replace(':', '') + ".log"})
logpath = os.path.join(os.path.dirname(self._path_manifest), step["LogFile"])
try:
os.makedirs(os.path.dirname(logpath))
except OSError as e:
if e.errno != errno.EEXIST:
raise
log = open(logpath, "wb")
with open(os.devnull, "r") as null_file:
invocation = step["Invocation"]
if invocation.lower().startswith("python.exe "):
invocation = "\"" + sys.executable + "\" " + step["Invocation"][len("python.exe "):]
subprocess.check_call(invocation, stdin=null_file, stdout=log, stderr=subprocess.STDOUT,
shell=True, close_fds=False,
cwd=os.path.join(os.getcwd(), os.path.dirname(self._path_manifest)))
step = self._update_step(step, {"ExecutionCompletionTimestamp": self._time})
step = self._update_step(step, {"Status": "OK"})
if step == self._steps[-1]:
self._mark_manifest_status()
return 0
except subprocess.CalledProcessError as e:
step = self._update_step(step, {"ExecutionCompletionTimestamp": self._time})
step = self._update_step(step, {"Status": "FAILED"})
self._mark_manifest_status()
log.write(str(e))
with open(os.path.join(os.path.dirname(os.path.abspath(self._path_manifest)), "_FAILED.txt"), "w") as failed:
failed.write('"%s" failed:\n' % invocation)
failed.write(str(e))
failed.write('\n\nSee log: %s' % step["LogFile"])
return -1
except OSError as e:
step = self._update_step(step, {"ExecutionCompletionTimestamp": self._time})
step = self._update_step(step, {"Status": "FAILED"})
self._mark_manifest_status()
log.write(str(e))
return -1
finally:
if log:
log.close()
| {
"repo_name": "pombredanne/metamorphosys-desktop",
"path": "metamorphosys/META/src/TestBenchExecutor/__init__.py",
"copies": "1",
"size": "7731",
"license": "mit",
"hash": -7881416007493275000,
"line_mean": 30.8978723404,
"line_max": 121,
"alpha_frac": 0.5115767689,
"autogenerated": false,
"ratio": 4.494767441860465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5506344210760465,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adamsherman'
import logging
import os
import getpass
from requests import Session,adapters
import xmltodict
from simplecrypt import encrypt,decrypt
from services import LoginService
from constants import TradeMonsterConstants
log = logging.getLogger(__name__)
ECP = '#ecp'
class PyTradeMonster(object):
'''
Main interface to TradeMonster.
Maintains an open session, and handles making requests.
'''
HEADERS = {'content-type':'application/xml'}
def __init__(self, pwFile = 'cred.dat', environment = TradeMonsterConstants.Environments.PROD):
self.environment = environment
login = LoginService(self.environment)
user,password = self.getUserAndPass(pwFile)
login.doLogin(user,password)
self.headers = {'content-type':'application/xml', 'JSESSIONID' : login.sessionId, 'token' : login.token, 'sourceapp':login.userId}
self.session = self.createSession(login.cookies)
def getUserAndPass(self, pwFile):
'''
Try and retrieve the username and password from the encrypted user file - 'cred.dat'
If no file exists, prompt the user for a login/password and create a file with encrypted contents
:return: user and pass tuple
'''
log.info('Retrieving username and password....')
if os.path.isfile(pwFile):
with open(pwFile) as f:
tokens = decrypt(ECP,f.read()).split('\n')
user,passwd = tokens[0],tokens[1]
else:
user = raw_input('Enter your username: ')
passwd = getpass.getpass('Enter your password: ')
log.info('Encrypting user/pass to {0}....'.format(pwFile))
userPassEncrypted = encrypt(ECP,user + '\n' + passwd)
with open(pwFile, mode='w') as f:
f.write(userPassEncrypted)
log.info('Sucessfully retrieved username and password')
return (user,passwd)
def createSession(self, cookies):
'''
Creates a global session to be used by all requests
:param cookies:
:return:
'''
session = Session()
adapter = adapters.HTTPAdapter(pool_connections = 1000, pool_maxsize = 5000)
session.mount('https://', adapter)
session.headers = self.headers
session.cookies = cookies
return session
def doCall(self, url, payload):
'''
Make a request to a given url with given payload data and return a python object from a parsed xml response
Either QA or PROD environment, as defined in constants
:param url:
:param payload:
:return: python object created from parsed xml
'''
log.debug("Making request to {0}".format(self.environment + url))
response = self.session.post(self.environment + url,payload)
xmlObj = xmltodict.parse(response.text)
log.debug("Got response: {0}".format(xmlObj))
return xmlObj
| {
"repo_name": "adamsherman/pytrademonster",
"path": "pytrademonster/pyTradeMonster.py",
"copies": "3",
"size": "2969",
"license": "mit",
"hash": -5206871709071357000,
"line_mean": 34.3452380952,
"line_max": 139,
"alpha_frac": 0.6369147861,
"autogenerated": false,
"ratio": 4.1178918169209435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6254806603020944,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adamsherman'
import sys
import logging
import requests
from pytrademonster.constants import TradeMonsterConstants
log = logging.getLogger(__name__)
class LoginService(object):
'''
Class to handle logging in/out of trademonster and storing individual session information
'''
PAYLOAD_USER = 'j_username'
PAYLOAD_PASSWORD = 'j_password'
def __init__(self, environment):
self.userId = None
self.sessionId = None
self.token = None
self.cookies = None
self.loginUrl = environment + TradeMonsterConstants.URLS.LOGIN
self.logoutUrl = environment + TradeMonsterConstants.URLS.LOGOUT
def doLogin(self, user, password):
'''
Send a login request to trademonster. Upon success, set instance variables for credentials.
:param user:
:param password:
:return:
'''
payload = {LoginService.PAYLOAD_USER : user, LoginService.PAYLOAD_PASSWORD : password}
loginRequest = requests.post(self.loginUrl, payload)
loginDict = loginRequest.json()
if 'token' in loginDict:
self.token = loginDict['token']
self.sessionId = loginDict['sessionId']
self.userId = loginDict['userId']
self.cookies = loginRequest.cookies
log.info('Successfully logged in!')
else:
log.warn('Unable to login. Exiting.')
sys.exit()
def doLogout(self):
if self.sessionId is not None:
payload = {'JSESSIONID' : self.sessionId}
logoutRequest = requests.post(self.logoutUrl, payload)
| {
"repo_name": "femtotrader/pytrademonster",
"path": "pytrademonster/services/loginService.py",
"copies": "3",
"size": "1635",
"license": "mit",
"hash": 1718809619505781000,
"line_mean": 30.4423076923,
"line_max": 99,
"alpha_frac": 0.6336391437,
"autogenerated": false,
"ratio": 4.128787878787879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6262427022487879,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adamsherman'
def enum(**enums):
return type('Enum', (), enums)
class TradeMonsterConstants(object):
'''
Misc constants for interacting with TradeMonsters' service
URLS are specific endpoints to make requests to
Request classes (AccountRequests etc) contain the default xml payload requests as per TradeMonster documentation
'''
class Environments():
QA = 'https://www.qa.monstersecurities.com'
PROD = 'https://www.trademonster.com'
ISO_TIME = '%Y%m%dT%H:%M:%S'
TRANSACTION_TIME = '%Y-%m-%dT%H:%M:%S'
PERFORMANCE_TIME = '%a %b %d %H:%M:%S GMT-0500 %Y'
INSTRUMENTS = enum(EQUITY = 'Equity', OPTION = 'Option')
CALL_PUT = enum(CALL = 'call', PUT = 'put')
OPTION_LOOKUP_SERIVCE = Environments.PROD + '/detailedOptionLookUpLite.action'
class URLS():
LOGIN = '/j_acegi_security_check'
LOGOUT = '/j_acegi_logout'
SERVICES = '/services'
ACCOUNT_HISTORY_SERVICE = SERVICES + '/accountHistoryService'
ACCOUNT_PERFORMANCE_SERVICE = SERVICES + '/accountPerformanceService'
ACCOUNT_PERSON_SERVICE = SERVICES + '/personService'
ACCOUNT_GROUP_SERVICE = SERVICES + '/customerAccountGroupingService'
ACCOUNT_BALANCES_SERVICE = SERVICES + '/customerWidgetService'
ACCOUNT_CASH_TRANSFER_SERVICE = SERVICES + '/cashTransferService'
POSITION_SERVICE = SERVICES + '/clientPositionService'
ORDER_PLACEMENT_SERVICE = SERVICES + '/orderStockService'
ORDER_BOOK_SERVICE = SERVICES + '/orderBookService'
QUOTES_OPTION_SERVICE = SERVICES + '/quotesOptionService'
QUOTES_SYMBOL_SERVICE = SERVICES + '/quotesService'
class AccountRequests():
FETCH_ALL_GROUPS = 'fetchAllGroupsForAUser'
CREATE_ACCOUNT_GROUP = 'createAccountGroup'
TRANSACTION_TYPES = enum( ALL_NA = 'ALL N/A' ,
ALL = 'ALL',
ACH_OUT = 'ACH OUT',
ALLOCATION = 'ALLOCATION',
ASSIGNMENT = 'ASSIGNMENT',
CA_Corporate_Action = 'CA Corporate Action',
CHECK_OUT_Check_Withdrawal = 'CHECK OUT Check Withdrawal',
CLOSURE = 'CLOSURE',
DEPOSIT = 'DEPOSIT',
DEPOSIT_ACH_ACH_Deposit = 'DEPOSIT ACH ACH Deposit',
DEPOSIT_CHECK = 'DEPOSIT CHECK',
DEPOSIT_DELIVERY_Delivery = 'DEPOSIT DELIVERY Delivery',
DEPOSIT_INTEREST = 'DEPOSIT INTEREST',
DEPOSIT_RECEIPT = 'DEPOSIT RECEIPT',
DEPOSIT_WIRE_Wire_Deposit = 'DEPOSIT WIRE Wire Deposit',
DIV_Dividend = 'DIV Dividend',
EXERCISE = 'EXERCISE',
FEE_Fees = 'FEE Fees',
FUNDS_PLEDGED = 'FUNDS PLEDGED',
INTEREST = 'INTEREST',
JOURNAL = 'JOURNAL',
LEAPS = 'LEAPS',
MANUAL_ADJUSTMENT = ' MANUAL ADJUSTMENT',
MONEY_ROLL_UP = 'MONEY ROLL UP',
OPT_EXPIRATION_Option_Expiration = 'OPT_EXPIRATION Option Expiration',
REVERSED_Reversal = 'REVERSED Reversal',
SUB_JOURNAL = 'SUB JOURNAL ',
SWEEP = 'SWEEP',
TRADE = 'TRADE',
TRANSFER = 'TRANSFER',
WIRE_OUT = 'WIRE OUT',
WITHDRAWAL = 'WITHDRAWAL',
WITHHOLDING = 'WITHHOLDING'
)
PERFORMANCE_CATEGORY = enum( SYMBOL = 'SYMBOL', ASSET_CLASS = 'ASSET_CLASS', ACCOUNT_HISTORY = "ACCOUNT_HISTORY",
TAGS = 'TAGS', TRADE_LOTS = 'TRADE_LOTS')
ACCOUNT_TYPES = enum(MARGIN = 'MARGIN', OPTION = 'OPTION' )
DEFAULT_FETCH_ACCOUNTS = '<retrieveCustomerAccounts/>'
DEFAULT_FETCH_GROUPS = '<fetchAllGroupsForAUser/>'
DEFAULT_DELETE_GROUPS = """ <deleteGroups>
<groupIds></groupIds>
</deleteGroups> """
DEFAULT_ACCOUNT_HISTORY = """<getAccountHistory>
<userId></userId>
<timeRange>RANGE</timeRange> <!-- Can also be TODAY -->
<numOfTransactions>1000</numOfTransactions>
<accountIds></accountIds>
<startTime> <!-- Time tags are not sent if timeRange is TODAY -->
<year>2012</year>
<month>01</month>
<date>01</date>
<hours>00</hours>
<minutes>00</minutes>
<seconds>00</seconds>
</startTime>
<endTime>
<year>2012</year>
<month>01</month>
<date>22</date>
<hours>23</hours>
<minutes>59</minutes>
<seconds>59</seconds>
</endTime>
<transactionTypes>ALL</transactionTypes> <!-- See list below -->
</getAccountHistory> """
DEFAULT_CUST_WIDGET = """<getCustomerWidgetData>
<accountNumber></accountNumber>
<streaming>true</streaming>
<suppressHoldings>true</suppressHoldings>
<suppressPnL>false</suppressPnL>
</getCustomerWidgetData> """
DEFAULT_ACCOUNT_PERFORMANCE = """<getAccountPerformance>
<toDate>1337859125829</toDate>
<accountNumber>2HC10320</accountNumber>
<openOrClose />
<fromDateVO>
<dateObj>Thu May 24 00:00:00 GMT-0500 2012</dateObj> <date>24</date>
<hours>0</hours>
<year>2012</year>
<seconds>0</seconds>
<month>5</month>
<minutes>0</minutes>
</fromDateVO>
<accountIds>1000000094122</accountIds>
<toDateVO>
<dateObj>Thu May 24 06:32:05 GMT-0500 2012</dateObj> <date>24</date>
<hours>6</hours>
<year>2012</year>
<seconds>5</seconds>
<month>5</month>
<minutes>32</minutes>
</toDateVO>
<category>ACCOUNT_HISTORY</category>
<timeFrame>RANGE</timeFrame>
<fromDate>1337835600000</fromDate>
</getAccountPerformance>"""
DEFAULT_CASH_TRANSFER = """<getCashTransfers>
<status>PENDING</status>
<accountId></accountId>
</getCashTransfers>"""
class PositionRequests():
DEFAULT_POSITIONS_DETAIL = """<getPositionsDetailNew>
<accountIds></accountIds>
<accountId></accountId>
<loadSimulated>true</loadSimulated>
<requireStrategy>false</requireStrategy>
<suppressOpenPnL>true</suppressOpenPnL>
<suppressDefaults>true</suppressDefaults>
<filter />
</getPositionsDetailNew>"""
DEFAULT_POSITIONS_BASIC = """<getBasicPositionDetails>
<symbol></symbol>
<underlyer></underlyer>
</getBasicPositionDetails>"""
DEFAULT_POSITIONS_SELECTED = """<getSelectedPosition>
<subscriptionIds>NaN</subscriptionIds>
<accountIds></accountIds>
<accountId></accountId>
<symbol></symbol>
<instrumentType></instrumentType>
<loadSimulated>true</loadSimulated>
<requireStrategy>true</requireStrategy>
<filter />
</getSelectedPosition>"""
DEFAULT_POSITIONS_UNDERLIERS = """<getHeldUnderlyers>
<accountId></accountId>
<includeClosed>true</includeClosed>
</getHeldUnderlyers> """
DEFAULT_POSITIONS_TRANSACTIONS = """<getTxHistoryForInstrument>
<accountId></accountId>
<positionType></positionType>
<symbol></symbol>
<instrumentType></instrumentType>
<userId></userId>
</getTxHistoryForInstrument> """
class OrderRequests():
ORDER_SPREAD_TYPES = enum(BACKSPREAD = 'Backspread',
BUTTERFLY = 'Butterfly',
CALENDAR = 'Calendar',
CALL_BACKSPREAD = 'Call Backspread',
CALL_CALENDAR = 'Call Calendar',
CALL_DIAGONAL = 'Call Diagonal',
CALL = 'Call',
CALL_BUTTERFLY = 'Call Butterfly',
CALL_CONDOR = 'Call Condor',
CALL_VERTICAL = 'Call Vertical',
COMBINATION = 'Combination',
CONDOR = 'Condor',
COVERED = 'Covered',
COVERED_CALL = 'Covered Call',
CUSTOM_SPREAD = 'Custom Spread',
IRON_CONDOR = 'Iron Condor',
IRON_BUTTERFLY = 'Iron Butterfly',
PUT = 'Put',
PUT_BUTTERFLY = 'Put Butterfly',
PUT_CONDOR = 'Put Condor',
PROTECTIVE_PUT = 'Protective Put',
PUT_BACKSPREAD = 'Put Backspread',
PUT_CALENDAR = 'Put Calendar',
PUT_DIAGONAL = 'Put Diagonal',
PUT_VERTICAL = 'Put Vertical',
SINGLE_OPTION = 'SingleOption',
STOCK = 'Stock',
SHARES = 'Shares',
STRADDLE = 'Straddle',
STRANGLE = 'Strangle',
SYNTHETIC_STOCK = 'Synthetic Stock',
VERTICAL = 'Vertical',
VERTICAL_COLLAR = 'VerticalCollar',
VERTICAL_SPREAD = 'Vertical Spread')
DEFAULT_SINGLE_LEG_ORDER = """<sendOrder>
<accountType>OPTION</accountType>
<modifyOrder>false</modifyOrder>
<originalOrderId>NaN</originalOrderId>
<combineLikeLegs>false</combineLikeLegs>
<accountNumber>A0000019</accountNumber>
<userId>1000000009867</userId>
<limitPrice>2.74</limitPrice>
<stopTriggerPrice>NaN</stopTriggerPrice> <!-- = Stop Price if Client selected Stop order -->
<trailingAmount>NaN</trailingAmount> <!-- = Trailing Amount if Client selected Trail Stop -->
<source>___</source>
<orderId>NaN</orderId>
<priceType>LM</priceType>
<quantity>1000</quantity>
<holdOrder>false</holdOrder>
<duplicateOrder>false</duplicateOrder>
<discretionFlag>false</discretionFlag>
<solicitedFlag>false</solicitedFlag>
<instrumentType>Equity</instrumentType>
<orderLegEntries>
<symbol>COOL</symbol>
<orderSide>BUY</orderSide>
<quantityRatio>1</quantityRatio>
<instrumentType>Equity</instrumentType>
</orderLegEntries>
<timeInForce>DAY</timeInForce>
<marketSession>REG</marketSession>
<gtdDate></gtdDate>
<noteVo>
<userId></userId>
<objectType>null</objectType>
<noteText />
<objectIds>NaN</objectIds>
</noteVo>
</sendOrder>"""
DEFAULT_MULTI_LEG_ORDER = """<sendOrder>
<accountType>OPTION</accountType>
<modifyOrder>false</modifyOrder>
<originalOrderId>NaN</originalOrderId>
<combineLikeLegs>false</combineLikeLegs>
<accountNumber>2HC08522</accountNumber>
<displayQuantity>NaN</displayQuantity>
<gtdDate>NaN</gtdDate>
<userId>1000000000966</userId>
<limitPrice>0.03</limitPrice>
<stopTriggerPrice>NaN</stopTriggerPrice>
<trailingAmount>NaN</trailingAmount>
<discretionAmount>NaN</discretionAmount>
<offSetAmount>NaN</offSetAmount>
<source> </source> <!-- Required to be filled in with TM-assigned value --> <orderId>NaN</orderId>
<priceType>LM</priceType>
<quantity>10</quantity>
<holdOrder>false</holdOrder>
<duplicateOrder>false</duplicateOrder>
<discretionFlag>false</discretionFlag>
<solicitedFlag>false</solicitedFlag>
<instrumentType>Option</instrumentType>
<spreadName>Call Vertical</spreadName>
<orderSide>BUY</orderSide>
<orderLegEntries>
<!-- Intentionally left blank -->
</orderLegEntries>
<timeInForce>DAY</timeInForce>
<marketSession>REG</marketSession>
<noteVo>
<userId>1000000000966</userId>
<objectType>null</objectType>
<noteText />
<objectIds>NaN</objectIds>
</noteVo>
</sendOrder>
"""
DEFAULT_ORDER_CONFIRMATION = """<populateConfirmation>
<accountType>OPTION</accountType>
<modifyOrder>false</modifyOrder>
<originalOrderId>NaN</originalOrderId>
<combineLikeLegs>false</combineLikeLegs>
<accountNumber>67110437</accountNumber>
<displayQuantity>NaN</displayQuantity>
<gtdDate>NaN</gtdDate>
<userId>1000000000876</userId>
<limitPrice>100.05</limitPrice>
<stopTriggerPrice>NaN</stopTriggerPrice>
<trailingAmount>NaN</trailingAmount>
<discretionAmount>NaN</discretionAmount>
<offSetAmount>NaN</offSetAmount>
<source></source>
<orderId>NaN</orderId>
<priceType>LM</priceType>
<quantity>100</quantity>
<holdOrder>false</holdOrder>
<duplicateOrder>false</duplicateOrder>
<discretionFlag>false</discretionFlag>
<solicitedFlag>false</solicitedFlag>
<instrumentType>Equity</instrumentType>
<orderLegEntries>
<!-- intentionally left blank -->
</orderLegEntries>
<timeInForce>DAY</timeInForce>
<marketSession>REG</marketSession>
<noteVo>
<userId>1000000000876</userId>
<objectType>null</objectType>
<noteText/>
<objectIds>NaN</objectIds>
</noteVo>
</populateConfirmation> """
DEFAULT_MUTUAL_FUND_ORDER = """<sendMFOrder>
<orderSide>BUY</orderSide>
<accountNumber>2HC08522</accountNumber>
<instrumentType>MutualFund</instrumentType>
<symbol>PRNEX</symbol>
<dividendType>Cash</dividendType>
<accountType>Cash</accountType>
<userId>1000000000966</userId>
<source> </source> <!-- Required to be filled in with TM-assigned value -->
<amount>5000</amount>
<noteVo>
<userId>1000000000966</userId>
<objectType>null</objectType>
<noteText />
<objectIds>NaN</objectIds>
</noteVo>
</sendMFOrder>"""
DEFAULT_ORDER_CANCEL = """<cancelOrder></cancelOrder>"""
DEFAULT_ORDER_CANCEL_ALL = """<cancelAllOrders></cancelAllOrders>"""
DEFAULT_ORDER_CANCEL_DAY = """<cancelDayOrders></cancelDayOrders>"""
DEFAULT_ALL_OPEN_ORDERS_COUNT = """<getCancelAllOrdersCount></getCancelAllOrdersCount>"""
DEFAULT_DAY_ORDERS_COUNT = """<getCancelDayOrdersCount></getCancelDayOrdersCount>"""
DEFAULT_LOAD_ORDER_BOOK = """<loadOrderBook>
<dateRange>1</dateRange>
<accountNumber></accountNumber>
<statusList></statusList> <!-- create a new tag for each status -->
</loadOrderBook>"""
DEFAULT_LOAD_SPECIFIC_ORDERS = """<loadSpecifiedOrders>
<orderIds></orderIds> <!-- create a new tag for each orderIds -->
<filtersOn>false</filtersOn>
</loadSpecifiedOrders>"""
DEFAULT_GET_ORDER_HISTORY = """<getOrderHistory></getOrderHistory>"""
DEFAULT_GET_ORDER_DETAIL = """<getOrderDetail></getOrderDetail>"""
class QuotesRequests():
DEFAULT_OPTION_CHAIN_REQUEST = """<getOptionChain>
<symbol></symbol>
</getOptionChain> """
DEFAULT_QUOTE_REQUEST = """<getQuotes>
<blank></blank>
</getQuotes>"""
class ResponseRoots():
RETRIEVE_ACCT_ROOT = 'ns2:retrieveCustomerAccountsResponse'
RETRIEVE_ACCT_HISTORY_ROOT = 'ns2:getAccountHistoryResponse'
RETRIEVE_ACCT_PERFORMANCE_ROOT = 'ns2:getAccountPerformanceResponse'
RETRIEVE_ORDER_PLACED_ROOT = 'ns2:sendOrderResponse'
RETRIEVE_ORDER_CONFIRMATION_ROOT = 'ns2:populateConfirmationResponse'
RETRIEVE_ORDER_CANCELLED_ROOT = 'ns1:cancelOrder'
RETRIEVE_ORDER_DETAILS_ROOT = 'ns2:getOrderDetailResponse'
RETRIEVE_ALL_CANCELLED_ROOT = 'ns2:cancelAllOrdersResponse'
RETRIEVE_DAY_CANCELLED_ROOT = 'ns2:cancelDayOrdersResponse'
RETRIEVE_ALL_CANCELLED_COUNT_ROOT = 'ns2:getCancelAllOrdersCountResponse'
RETRIEVE_DAY_CANCELLED_COUNT_ROOT = 'ns2:getCancelDayOrdersCountResponse'
RETRIEVE_QUOTE_CHAIN_ROOT = 'ns2:getOptionChainResponse'
RETRIEVE_QUOTE_SYMBOL_ROOT = 'ns2:getQuotesResponse'
RETRIEVE_POSITIONS_DETAILED_ROOT = 'ns2:getPositionsDetailNewResponse'
| {
"repo_name": "NunoEdgarGub1/pytrademonster",
"path": "pytrademonster/constants.py",
"copies": "3",
"size": "24930",
"license": "mit",
"hash": 4996901412927599000,
"line_mean": 57.2476635514,
"line_max": 138,
"alpha_frac": 0.389410349,
"autogenerated": false,
"ratio": 5.796326435712625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010762167263701642,
"num_lines": 428
} |
# USAGE
# python text_demo.py -i ../demo_images/bridge.jpg
# python text_demo.py -i ../demo_images/bridge.jpg -c 0
# import the necessary packages
import argparse
import cv2
import imutils.text
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-c", "--center", default=1, type=int,
help="Contrast, positive value for more contrast")
ap.add_argument("-x", default=5,
help="X coordinate for text. Used if center == 0")
ap.add_argument("-y", default=25,
help="Y coordinate for text. Used if center == 0")
args = vars(ap.parse_args())
# read in image to draw text on
image = cv2.imread(args['image'])
if args['center']:
# draw centered text with a default font
imutils.text.put_centered_text(image,
'imutils.text\ndemo\noutput',
font_face=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=1,
color=(0, 255, 0),
thickness=2)
else:
# draw location specific text with a default font
imutils.text.put_text(image,
'imutils.text\ndemo\noutput',
(args['x'], args['y']),
font_face=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=1,
color=(0, 255, 0),
thickness=2)
# display resulting image with text
cv2.imshow('Image with Text', image)
cv2.waitKey(0)
| {
"repo_name": "jrosebr1/imutils",
"path": "demos/text_demo.py",
"copies": "1",
"size": "1676",
"license": "mit",
"hash": -3521648861329901600,
"line_mean": 35.4347826087,
"line_max": 75,
"alpha_frac": 0.5513126492,
"autogenerated": false,
"ratio": 3.8352402745995424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4886552923799542,
"avg_score": null,
"num_lines": null
} |
import tweepy
import time
from credentials import settings
from twitter_stream import twstream
def reply(api, tweet):
"""
replies to all captured tweets
:param api: tweepy api method
:param tweet: [[username, msg, id]]
:return: None
"""
for t in tweet:
# to create a reply you simply @ and mark the tweet id
api.update_status(".@"+t[0]+" have you heard of tourzan.com its a good travel resource. "
"the travel tips and guides are top notch.")
def time_line(api):
"""
reads the timeline for the keywords in question
:param api: tweepy method
:return: json format dictionary type data
"""
keywords = ['@twitter', 'tourist', 'traveling', 'tours', 'tour guides', 'tours for the disabled', 'ADA tours',
'tours for kids', 'jobs for college students', 'jobs for the elderly', 'travel guide', 'international',
'overseas']
timeline = twstream.TwitterStream()
stream = timeline.tweet_stream(api.auth, keywords)
return timeline.decoder(stream)
def main():
"""auth section of code"""
# TODO: store previous id's to prevent spamming
# TODO: intelligent text
auth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)
auth.set_access_token(settings.ACCESS_KEY, settings.ACCESS_SECRET)
api = tweepy.API(auth)
while True:
for t in time_line(api):
reply(api, t)
print 'sleeping'
time.sleep(5)
if __name__ == '__main__':
main()
| {
"repo_name": "castaway2000/marketing_twitterbot",
"path": "tweetbot.py",
"copies": "1",
"size": "1610",
"license": "mit",
"hash": 2847415317086935000,
"line_mean": 28.8148148148,
"line_max": 119,
"alpha_frac": 0.6366459627,
"autogenerated": false,
"ratio": 3.5698447893569845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47064907520569843,
"avg_score": null,
"num_lines": null
} |
import tweepy
import json
class TwitterStreamListener(tweepy.StreamListener):
""" Tweepy stream listener class"""
def on_status(self, status):
"""
:param status: twitter reply
:return: False if 420 error hits to prevent blacklisting
"""
print status.text
if status == 420:
# returning False in on_data disconnects the stream
return False
class TwitterStream(object):
"""Twitter stream class"""
def decoder(self, stream):
"""
:param stream: json data
:return: [[username, msg, id]] decoded json into a list of lists
"""
data = []
decoded = json.loads(stream)
# print decoded
data.append([decoded['user']['screen_name'], decoded['text'].encode('ascii', 'ignore'), decoded['id']])
return data
def tweet_stream(self, auth, keywords):
"""
:param auth: api.auth object
:param keywords: list of keyword strings
:return: undecoded json
"""
stream_listener = TwitterStreamListener()
stream = tweepy.Stream(auth, stream_listener)
return stream.listener(track=keywords, async=True) | {
"repo_name": "castaway2000/marketing_twitterbot",
"path": "twitter_stream/twstream.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": 6397049088228365000,
"line_mean": 26.7391304348,
"line_max": 111,
"alpha_frac": 0.6070588235,
"autogenerated": false,
"ratio": 4.292929292929293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001300103753743737,
"num_lines": 46
} |
__author__ = 'addtheice'
import os
import sys
import FileSystem
import PerlQuest
def write_report(quest_source_directory, report_file_path):
quests = FileSystem.get_quests(quest_source_directory)
if quests is []:
print("Quest source directory invalid.")
sys.exit(0)
report_file_path = FileSystem.create_report_file(report_file_path)
if report_file_path is None:
print("Unable to create report file '" + report_file_path + "'")
sys.exit(0)
with open(report_file_path, "a") as report_file:
for quest_triple in quests:
if quest_triple[0] is "":
quest_file_path = os.path.join(quest_source_directory, quest_triple[1])
else:
quest_file_path = os.path.join(quest_source_directory, quest_triple[0], quest_triple[1])
if quest_triple[2] is "unknown":
report_file.write(quest_file_path + " is not a perl file." + os.linesep)
continue
if quest_triple[2] is "lua":
report_file.write(quest_file_path + " is an all ready existing lua file."+ os.linesep)
continue
perl_quest = PerlQuest.PerlQuest()
perl_quest.read_perl_file(quest_file_path)
if perl_quest.file_read is False:
report_file.write(perl_quest.file_path + " unable to read perl file." + os.linesep)
else:
perl_quest.text_to_tokens() | {
"repo_name": "addtheice/EQQuestPerlToLua",
"path": "Report.py",
"copies": "1",
"size": "1473",
"license": "mit",
"hash": 8448638817938587000,
"line_mean": 33.2790697674,
"line_max": 104,
"alpha_frac": 0.5960624576,
"autogenerated": false,
"ratio": 3.507142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4603205314742857,
"avg_score": null,
"num_lines": null
} |
__author__ = 'addtheice'
import os
def get_quests(quest_source_directory):
"""
Searches a quest source directory and finds all quest files
:param quest_source_directory:
directory root of where the quest files are located
:return:
List of
[
sub directory,
quest file name,
"perl", "lua", or "unknown" depending on file type
]
triples.
If the source directory is invalid then an empty list is returned.
"""
cleaned_source_directory = get_cleaned_quest_source_directory_name(quest_source_directory)
if cleaned_source_directory is None:
return []
quests = []
for root, _, files in os.walk(cleaned_source_directory):
for file_name in files:
full_file_name = os.path.join(root, file_name)
file_type = get_file_type(full_file_name)
# strip off root directory as well as separator
sub_directory = root.replace(cleaned_source_directory + os.path.sep, "", 1)
sub_directory = sub_directory.replace(cleaned_source_directory, "", 1)
quests.append([sub_directory, file_name, file_type])
return quests
def get_cleaned_quest_source_directory_name(quest_source_directory):
"""
Cleans up quest source directory names by checking for a full path
or a sub directory path which is just a part of the current working
directory.
:param quest_source_directory:
directory name of quest root directory
:return:
fully qualified directory of quest source directory
or None for an invalid directory source.
"""
if os.path.isdir(quest_source_directory):
# Valid source directory path, yay!
return quest_source_directory
current_directory_joined_quest_source = os.path.join(os.getcwd(), quest_source_directory)
if os.path.isdir(current_directory_joined_quest_source):
# Subdirectory of the current working directory
return current_directory_joined_quest_source
# Failed cleaning up the directory
return None
def get_file_type(file_path):
"""
Determines the type of file based of the extension.
Eventually we may want to parse the file and use a
meta heuristic to determine lua or perl files. This
works perfectly well for now.
:rtype : string
:param file_path:
file path to determine the file type from.
:return:
Returns either "perl", "lua", or "unknown" for any other
type file.
"""
if file_path.endswith(".pl"):
return "perl"
if file_path.endswith(".lua"):
return "lua"
return "unknown"
def create_report_file(report_file):
"""
Creates the report file, if the subdirectory of the report
file doesn't exist try and recursively create the subdirectories
then the file.
:param report_file:
name of the report file or the path to the report file.
:return:
full path to the report file or None if there was an error
creating the file.
"""
report_directory = os.path.dirname(report_file)
# File name only
if report_directory is "":
try:
with open(report_file, "w") as _:
pass
except IOError:
return None
return os.path.join(os.getcwd(), report_file)
if not os.path.isdir(report_directory):
try:
os.makedirs(report_directory)
except IOError:
return None
try:
with open(report_file, "w") as _:
pass
except IOError:
return None
return report_file
| {
"repo_name": "addtheice/EQQuestPerlToLua",
"path": "FileSystem.py",
"copies": "1",
"size": "3601",
"license": "mit",
"hash": 4505249220283366400,
"line_mean": 24.3591549296,
"line_max": 94,
"alpha_frac": 0.6417661761,
"autogenerated": false,
"ratio": 4.110730593607306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252496769707306,
"avg_score": null,
"num_lines": null
} |
__author__ = 'addtheice'
class PerlQuest:
def __init__(self):
self.file_text_lines = []
self.file_read = False
self.parse_success = False
self.file_path = ""
self.tokens = []
def read_perl_file(self, perl_quest_path):
self.file_path = perl_quest_path
try:
self.file_text_lines = [line.rstrip() for line in open(perl_quest_path)]
except IOError:
self.file_text_lines = []
self.file_read = False
self.parse_success = False
return
self.file_read = True
@staticmethod
def __is_comment_line__(line):
if line.strip().startswith("#"):
return True
else:
return False
def text_to_tokens(self):
for line in self.file_text_lines:
if self.__is_comment_line__(line):
comment_without_pound = line.strip().replace("#", "", 1)
self.tokens.append(("comment", comment_without_pound))
else:
self.tokens.append(("unknown", line)) | {
"repo_name": "addtheice/EQQuestPerlToLua",
"path": "PerlQuest.py",
"copies": "1",
"size": "1092",
"license": "mit",
"hash": -6738381740455142000,
"line_mean": 25.0238095238,
"line_max": 84,
"alpha_frac": 0.5265567766,
"autogenerated": false,
"ratio": 3.9565217391304346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49830785157304347,
"avg_score": null,
"num_lines": null
} |
__author__ = 'addtheice'
help_instruction = ["help"]
def command_line_help_message():
"""
Prints out the command line help message.
"""
print("EQQuestPerlToLua - Bulk Everquest Perl quest to Lua quest conversion.")
print("")
print("Usage:")
print("\tEQQuestPerlToLua convert <quest source directory> <quest destination directory> [quest report file]")
print("\tEQQuestPerlToLua report <quest source directory> <quest report file>")
print("\tEQQuestPerlToLua -h | --help")
print("")
print("Options:")
print("\t-h --help\tShow this screen.")
print("\tconvert\tConvert quests from perl to lua, optionally create report file")
print("\treport\tCreate report file describing file conversions per quests")
def parse_command_line(command_line_arguments):
"""
Parses the command line arguments. Returns a list of command line instructs.
Argument element paths are not verified, only parsed.
:param command_line_arguments:
command line arguments passed to the main python script in standard
sys.argv format.
:return:
[["convert", "quest source directory", "quest destination directory"],
["report", "quest source directory", "report path"]]
or
[["convert", "quest directory", "quest destination directory"]]
or
["report", "quest source directory", "report path"]]
or
[["help"]]
"""
arg_length = len(command_line_arguments)
if arg_length == 1:
return [help_instruction]
if command_line_arguments[1] == "-h" or command_line_arguments[1] == "--help":
return [help_instruction]
if command_line_arguments[1] != "convert" and command_line_arguments[1] != "report":
# Unknown command line instruction.
return [help_instruction]
if command_line_arguments[1] == "report" and arg_length == 4:
# Report command line instruction with enough arguments,
# blindly return them.
return [["report", command_line_arguments[2].strip('"'), command_line_arguments[3].strip('"')]]
if command_line_arguments[1] == "convert" and arg_length == 4:
# Convert command line instruction with enough arguments,
# blindly return them.
return [["convert", command_line_arguments[2].strip('"'), command_line_arguments[3].strip('"')]]
if command_line_arguments[1] == "convert" and arg_length == 5:
# Convert command line instruction with enough arguments to form
# Convert and report line instructions, blindly return them.
return [["report", command_line_arguments[2].strip('"'), command_line_arguments[4].strip('"')],
["convert", command_line_arguments[2].strip('"'), command_line_arguments[3].strip('"')]]
return [help_instruction]
| {
"repo_name": "addtheice/EQQuestPerlToLua",
"path": "CommandLine.py",
"copies": "1",
"size": "2786",
"license": "mit",
"hash": -3190263501440945700,
"line_mean": 34.2658227848,
"line_max": 114,
"alpha_frac": 0.6564967696,
"autogenerated": false,
"ratio": 4.02020202020202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.517669878980202,
"avg_score": null,
"num_lines": null
} |
__author__ = 'adeb'
from collections import OrderedDict
import numpy as np
import theano
from spynet.utils.utilities import share
class LearningUpdate():
"""
Abstract class defining the update in a Trainer object.
"""
def __init__(self):
pass
def compute_updates(self, params, grads):
raise NotImplementedError
@staticmethod
def factory(**kwargs):
"""
Factory function to create a learning update object from a dictionary.
"""
update_type = kwargs["type"]
learning_rate = kwargs["learning_rate"]
if update_type == "GD":
learning_update = LearningUpdateGD(learning_rate)
elif update_type == "GDmomentum":
learning_update = LearningUpdateGDMomentum(learning_rate, kwargs["momentum"])
else:
raise Exception("No Learning update with this name. Check the config file.")
return learning_update
class LearningUpdateGD(LearningUpdate):
"""
Gradient descent (GD) update.
"""
def __init__(self, learning_rate):
LearningUpdate.__init__(self)
self.learning_rate = share(learning_rate, "learning_rate")
def compute_updates(self, params, grads):
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - self.learning_rate * grad_i))
return updates
class LearningUpdateGDMomentum(LearningUpdate):
"""
GD + momentum.
"""
def __init__(self, learning_rate, momentum):
LearningUpdate.__init__(self)
self.learning_rate = share(learning_rate, "learning_rate")
self.momentum = share(momentum, "momentum")
if momentum < 0 or momentum > 1:
raise Exception("Momentum value should be between 0 and 1.")
def compute_updates(self, params, grads):
updates = OrderedDict()
for param_i, grad_i in zip(params, grads):
diff = share(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX), "diff")
update_diff = self.momentum * diff - self.learning_rate * grad_i
updates[param_i] = param_i + update_diff
updates[diff] = update_diff
return updates | {
"repo_name": "adbrebs/spynet",
"path": "training/learning_update.py",
"copies": "1",
"size": "2228",
"license": "bsd-2-clause",
"hash": -2345838618416378000,
"line_mean": 30.3943661972,
"line_max": 97,
"alpha_frac": 0.6220825853,
"autogenerated": false,
"ratio": 4.2117202268431,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001098671735978724,
"num_lines": 71
} |
__author__ = 'adeb'
from itertools import cycle
import numpy as np
from bisect import bisect
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib2tikz import save as tikz_save
import theano
import theano.tensor as T
from spynet.utils.utilities import error_rate, MSE, compute_dice, count_common_classes, compute_dice_from_counts
class Monitor():
"""
Abstract class to monitor a specific statistic related to the training of a network.
Attributes:
type (string): type name of the monitor (defined by the class)
name (string): name of the monitor (defined by the user)
ds (Dataset object): dataset that the Monitor monitors
n_batches_per_interval (int): number of batches between each measurement of the monitor
ls_stopping_criteria (list of StoppingCriterion objects): a Monitor object may potentially be connected to
StoppingCriterion objects. When a new measurement is recorded, the monitor sends a message to the
corresponding stopping criteria.
param_selector (ParamSelector object): a Monitor might be connected to a ParamSelector object. When a new
measurement is recorded, the monitor sends a message to the corresponding ParamSelector.
history_index (list of integers): list of the id_minibatch at which the monitor recorded a value
history_value (list of reals): list of the values monitored at the corresponding id_minibatch of history_index
output_batch (theano function): returns the network outputs of a batch i
output_last_batch (theano function): returns the networks outputs of the last batch (its size may differ)
"""
type = None
def __init__(self, n_times_per_epoch, name, ds):
self.name = name
self.ds = ds
self.n_times_per_epoch = n_times_per_epoch
self.n_batches_per_interval = None
self.ls_stopping_criteria = []
self.history_minibatch = []
self.history_value = []
self.param_selector = None
self.batch_size = 1000 # Forced to have a small batch size to prevent memory problems
self.n_batches = None
self.last_batch_size = None
self.output_batch = None
self.output_last_batch = None
def init(self, trainer):
self.n_batches, self.last_batch_size = divmod(self.ds.n_data, self.batch_size)
self.n_batches_per_interval = int(trainer.n_train_batches / self.n_times_per_epoch)
in_batch = T.matrix('in_batch') # Minibatch input matrix
# Returned output of the network for an input batch
out_batch = trainer.net.forward(in_batch, self.batch_size, True)
idx_batch = T.lscalar()
id1 = idx_batch * self.batch_size
id2 = (idx_batch + 1) * self.batch_size
self.output_batch = theano.function(
inputs=[idx_batch],
outputs=out_batch,
givens={in_batch: self.ds.inputs_shared[id1:id2]})
if self.last_batch_size > 0:
out_batch = trainer.net.forward(in_batch, self.last_batch_size, True)
self.output_last_batch = theano.function(
inputs=[],
outputs=out_batch,
givens={in_batch: self.ds.inputs_shared[self.ds.n_data-self.last_batch_size:]})
def add_stopping_criteria(self, ls_stopping_criteria):
self.ls_stopping_criteria.extend(ls_stopping_criteria)
def set_param_selector(self, param_selector):
self.param_selector = param_selector
def record(self, epoch, epoch_minibatch, id_minibatch, force_record=False, update_stopping=True, verbose=True):
"""
Record a value of the monitored statistic of the training.
Args:
epoch, minibatch_idx, id_minibatch (int): state of the training
"""
if (not force_record) and epoch_minibatch % self.n_batches_per_interval != 0:
return False
# Compute the monitored statistic
measurement = self.compute_value()
# Save value in history
self.history_minibatch.append(id_minibatch)
self.history_value.append(measurement)
# Possibly select current parameters of the network
if self.param_selector is not None:
self.param_selector.update(id_minibatch, measurement)
# Update possible stopping criteria connected to this monitor
if update_stopping:
for s_c in self.ls_stopping_criteria:
s_c.update(epoch, epoch_minibatch, id_minibatch, verbose)
return True
def compute_value(self):
"""
Compute the monitored statistic.
"""
raise NotImplementedError
def str_value_from_position(self, history_position):
"""
Return the value of a specific record given its index in the history.
"""
return "[{} {}: {}]".format(self.type, self.name, self.history_value[history_position])
def str_value_from_minibatch(self, minibatch_idx):
"""
Return the value of a specific record given its index in the history.
"""
idx = self.history_minibatch.index(minibatch_idx)
return self.str_value_from_position(idx)
def get_minimum(self):
minimum = np.inf
for it, val in zip(self.history_minibatch, self.history_value):
if val < minimum:
minimum = val
return minimum
def get_maximum(self):
maximum = -np.inf
for it, val in zip(self.history_minibatch, self.history_value):
if val > maximum:
maximum = val
return maximum
@staticmethod
def is_a_better_than_b(a, b, rate=1):
return None
class MonitorMSE(Monitor):
"""
Monitor that tracks the MSE (mean square error) of the network on a particular dataset
"""
type = "MSE"
def __init__(self, n_times_per_epoch, name, ds):
Monitor.__init__(self, n_times_per_epoch, name, ds)
def compute_value(self):
value = 0
for i in xrange(self.n_batches):
output = self.output_batch(i)
id1 = i * self.batch_size
id2 = (i + 1) * self.batch_size
value += MSE(output, self.ds.outputs[id1:id2]) * self.batch_size
if self.last_batch_size > 0:
output = self.output_last_batch()
tg = self.ds.outputs[self.ds.n_data-self.last_batch_size:]
value += MSE(output, tg) * self.last_batch_size
return value / self.ds.n_data
@staticmethod
def is_a_better_than_b(a, b, rate=1):
return a < (b*rate)
class MonitorClassification(Monitor):
"""
Monitor class used for classification problems in which the network returns a prediction class.
"""
def __init__(self, n_times_per_epoch, name, ds):
Monitor.__init__(self, n_times_per_epoch, name, ds)
self.compute_batch_classes = None
self.compute_last_batch_classes = None
def init(self, trainer):
Monitor.init(self, trainer)
in_batch = T.matrix('in_batch') # Minibatch input matrix
# Returnerd output of the network for an input batch
out_batch = trainer.net.forward(in_batch, self.batch_size, True)
idx_batch = T.lscalar()
id1 = idx_batch * self.batch_size
id2 = (idx_batch + 1) * self.batch_size
self.compute_batch_classes = theano.function(
inputs=[idx_batch],
outputs=T.argmax(out_batch, axis=1),
givens={in_batch: self.ds.inputs_shared[id1:id2]})
if self.last_batch_size > 0:
out_batch = trainer.net.forward(in_batch, self.last_batch_size, True)
self.compute_last_batch_classes = theano.function(
inputs=[],
outputs=T.argmax(out_batch, axis=1),
givens={in_batch: self.ds.inputs_shared[self.ds.n_data-self.last_batch_size:]})
class MonitorErrorRate(MonitorClassification):
"""
Monitor that tracks the error rate of the network on a particular dataset
"""
type = "Error rate"
def __init__(self, n_times_per_epoch, name, ds):
MonitorClassification.__init__(self, n_times_per_epoch, name, ds)
def compute_value(self):
value = 0
for i in xrange(self.n_batches):
pred = self.compute_batch_classes(i)
id1 = i * self.batch_size
id2 = (i + 1) * self.batch_size
value += error_rate(pred, np.argmax(self.ds.outputs[id1:id2], axis=1)) * self.batch_size
if self.last_batch_size > 0:
pred = self.compute_last_batch_classes()
tg = np.argmax(self.ds.outputs[self.ds.n_data-self.last_batch_size:], axis=1)
value += error_rate(pred, tg) * self.last_batch_size
return value / self.ds.n_data
@staticmethod
def is_a_better_than_b(a, b, rate=1):
return a < (b*rate)
class MonitorDiceCoefficient(MonitorClassification):
"""
Monitor that tracks the dice coefficient of the network on a particular dataset
"""
type = "Dice coefficient"
def __init__(self, n_times_per_epoch, name, ds, n_classes):
self.n_classes = n_classes
MonitorClassification.__init__(self, n_times_per_epoch, name, ds)
def compute_value(self):
counts = np.zeros((self.n_classes-1, 3))
for i in xrange(self.n_batches):
pred = self.compute_batch_classes(i)
id1 = i * self.batch_size
id2 = (i + 1) * self.batch_size
counts += count_common_classes(pred, np.argmax(self.ds.outputs[id1:id2], axis=1), self.n_classes)
if self.last_batch_size > 0:
pred = self.compute_last_batch_classes()
tg = np.argmax(self.ds.outputs[self.ds.n_data-self.last_batch_size:], axis=1)
counts += count_common_classes(pred, tg, self.n_classes)
return np.mean(compute_dice_from_counts(counts))
@staticmethod
def is_a_better_than_b(a, b, rate=1):
return (a*rate) > b
def save_records_plot(file_path, ls_monitors, name, n_train_batches, legend_loc="upper right"):
"""
Save a plot of a list of monitors' history.
Args:
file_path (string): the folder path where to save the plot
ls_monitors: the list of statistics to plot
name: name of file to be saved
n_train_batches: the total number of training batches
"""
lines = ["--", "-", "-.",":"]
linecycler = cycle(lines)
plt.figure()
for m in ls_monitors:
X = [i/float(n_train_batches) for i in m.history_minibatch]
Y = m.history_value
a, b = zip(*sorted(zip(X, Y)))
plt.plot(a, b, next(linecycler), label=m.name)
plt.xlabel('Training epoch')
plt.ylabel(ls_monitors[0].type)
plt.legend(loc=legend_loc)
plt.locator_params(axis='y', nbins=7)
plt.locator_params(axis='x', nbins=10)
plt.savefig(file_path + name + ".png")
tikz_save(file_path + name + ".tikz", figureheight = '\\figureheighttik', figurewidth = '\\figurewidthtik') | {
"repo_name": "adbrebs/spynet",
"path": "training/monitor.py",
"copies": "1",
"size": "11092",
"license": "bsd-2-clause",
"hash": -6005195572273261000,
"line_mean": 35.7317880795,
"line_max": 118,
"alpha_frac": 0.6242336819,
"autogenerated": false,
"ratio": 3.7096989966555185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9824822055721745,
"avg_score": 0.0018221245667548432,
"num_lines": 302
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.